mirror of
https://github.com/noDRM/DeDRM_tools.git
synced 2026-03-25 23:08:56 +00:00
Compare commits
7 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2c95633fcd | ||
|
|
07e532f59c | ||
|
|
882edb6c69 | ||
|
|
93f02c625a | ||
|
|
e95ed1a8ed | ||
|
|
ba5927a20d | ||
|
|
297a9ddc66 |
3
.gitignore
vendored
3
.gitignore
vendored
@@ -2,9 +2,6 @@
|
|||||||
__pycache__/
|
__pycache__/
|
||||||
*.pyc
|
*.pyc
|
||||||
|
|
||||||
# C extensions
|
|
||||||
*.so
|
|
||||||
|
|
||||||
# Distribution / packaging
|
# Distribution / packaging
|
||||||
.Python
|
.Python
|
||||||
env/
|
env/
|
||||||
|
|||||||
@@ -4,36 +4,69 @@ from __future__ import with_statement
|
|||||||
|
|
||||||
from calibre.customize import FileTypePlugin
|
from calibre.customize import FileTypePlugin
|
||||||
from calibre.gui2 import is_ok_to_use_qt
|
from calibre.gui2 import is_ok_to_use_qt
|
||||||
|
from calibre.utils.config import config_dir
|
||||||
|
from calibre.constants import iswindows, isosx
|
||||||
# from calibre.ptempfile import PersistentTemporaryDirectory
|
# from calibre.ptempfile import PersistentTemporaryDirectory
|
||||||
|
|
||||||
from calibre_plugins.k4mobidedrm import kgenpids
|
|
||||||
from calibre_plugins.k4mobidedrm import topazextract
|
|
||||||
from calibre_plugins.k4mobidedrm import mobidedrm
|
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
|
from zipfile import ZipFile
|
||||||
|
|
||||||
class K4DeDRM(FileTypePlugin):
|
class K4DeDRM(FileTypePlugin):
|
||||||
name = 'K4PC, K4Mac, Kindle Mobi and Topaz DeDRM' # Name of the plugin
|
name = 'K4PC, K4Mac, Kindle Mobi and Topaz DeDRM' # Name of the plugin
|
||||||
description = 'Removes DRM from K4PC and Mac, Kindle Mobi and Topaz files. Provided by the work of many including DiapDealer, SomeUpdates, IHeartCabbages, CMBDTC, Skindle, DarkReverser, ApprenticeAlf, etc.'
|
description = 'Removes DRM from Mobipocket, Kindle/Mobi, Kindle/Topaz and Kindle/Print Replica files. Provided by the work of many including DiapDealer, SomeUpdates, IHeartCabbages, CMBDTC, Skindle, DarkReverser, ApprenticeAlf, etc.'
|
||||||
supported_platforms = ['osx', 'windows', 'linux'] # Platforms this plugin will run on
|
supported_platforms = ['osx', 'windows', 'linux'] # Platforms this plugin will run on
|
||||||
author = 'DiapDealer, SomeUpdates' # The author of this plugin
|
author = 'DiapDealer, SomeUpdates' # The author of this plugin
|
||||||
version = (0, 3, 1) # The version number of this plugin
|
version = (0, 4, 2) # The version number of this plugin
|
||||||
file_types = set(['prc','mobi','azw','azw1','tpz']) # The file types that this plugin will be applied to
|
file_types = set(['prc','mobi','azw','azw1','azw3','azw4','tpz']) # The file types that this plugin will be applied to
|
||||||
on_import = True # Run this plugin during the import
|
on_import = True # Run this plugin during the import
|
||||||
priority = 210 # run this plugin before mobidedrm, k4pcdedrm, k4dedrm
|
priority = 210 # run this plugin before mobidedrm, k4pcdedrm, k4dedrm
|
||||||
minimum_calibre_version = (0, 7, 55)
|
minimum_calibre_version = (0, 7, 55)
|
||||||
|
|
||||||
def run(self, path_to_ebook):
|
def initialize(self):
|
||||||
|
"""
|
||||||
|
Dynamic modules can't be imported/loaded from a zipfile... so this routine
|
||||||
|
runs whenever the plugin gets initialized. This will extract the appropriate
|
||||||
|
library for the target OS and copy it to the 'alfcrypto' subdirectory of
|
||||||
|
calibre's configuration directory. That 'alfcrypto' directory is then
|
||||||
|
inserted into the syspath (as the very first entry) in the run function
|
||||||
|
so the CDLL stuff will work in the alfcrypto.py script.
|
||||||
|
"""
|
||||||
|
if iswindows:
|
||||||
|
names = ['alfcrypto.dll','alfcrypto64.dll']
|
||||||
|
elif isosx:
|
||||||
|
names = ['libalfcrypto.dylib']
|
||||||
|
else:
|
||||||
|
names = ['libalfcrypto32.so','libalfcrypto64.so']
|
||||||
|
lib_dict = self.load_resources(names)
|
||||||
|
self.alfdir = os.path.join(config_dir, 'alfcrypto')
|
||||||
|
if not os.path.exists(self.alfdir):
|
||||||
|
os.mkdir(self.alfdir)
|
||||||
|
for entry, data in lib_dict.items():
|
||||||
|
file_path = os.path.join(self.alfdir, entry)
|
||||||
|
with open(file_path,'wb') as f:
|
||||||
|
f.write(data)
|
||||||
|
|
||||||
|
def run(self, path_to_ebook):
|
||||||
|
# add the alfcrypto directory to sys.path so alfcrypto.py
|
||||||
|
# will be able to locate the custom lib(s) for CDLL import.
|
||||||
|
sys.path.insert(0, self.alfdir)
|
||||||
|
# Had to move these imports here so the custom libs can be
|
||||||
|
# extracted to the appropriate places beforehand these routines
|
||||||
|
# look for them.
|
||||||
|
from calibre_plugins.k4mobidedrm import kgenpids
|
||||||
|
from calibre_plugins.k4mobidedrm import topazextract
|
||||||
|
from calibre_plugins.k4mobidedrm import mobidedrm
|
||||||
|
|
||||||
|
plug_ver = '.'.join(str(self.version).strip('()').replace(' ', '').split(','))
|
||||||
k4 = True
|
k4 = True
|
||||||
if sys.platform.startswith('linux'):
|
if sys.platform.startswith('linux'):
|
||||||
k4 = False
|
k4 = False
|
||||||
pids = []
|
pids = []
|
||||||
serials = []
|
serials = []
|
||||||
kInfoFiles = []
|
kInfoFiles = []
|
||||||
|
|
||||||
# Get supplied list of PIDs to try from plugin customization.
|
# Get supplied list of PIDs to try from plugin customization.
|
||||||
customvalues = self.site_customization.split(',')
|
customvalues = self.site_customization.split(',')
|
||||||
for customvalue in customvalues:
|
for customvalue in customvalues:
|
||||||
@@ -51,7 +84,7 @@ class K4DeDRM(FileTypePlugin):
|
|||||||
try:
|
try:
|
||||||
# Find Calibre's configuration directory.
|
# Find Calibre's configuration directory.
|
||||||
confpath = os.path.split(os.path.split(self.plugin_path)[0])[0]
|
confpath = os.path.split(os.path.split(self.plugin_path)[0])[0]
|
||||||
print 'K4MobiDeDRM: Calibre configuration directory = %s' % confpath
|
print 'K4MobiDeDRM v%s: Calibre configuration directory = %s' % (plug_ver, confpath)
|
||||||
files = os.listdir(confpath)
|
files = os.listdir(confpath)
|
||||||
filefilter = re.compile("\.info$|\.kinf$", re.IGNORECASE)
|
filefilter = re.compile("\.info$|\.kinf$", re.IGNORECASE)
|
||||||
files = filter(filefilter.search, files)
|
files = filter(filefilter.search, files)
|
||||||
@@ -59,9 +92,9 @@ class K4DeDRM(FileTypePlugin):
|
|||||||
for filename in files:
|
for filename in files:
|
||||||
fpath = os.path.join(confpath, filename)
|
fpath = os.path.join(confpath, filename)
|
||||||
kInfoFiles.append(fpath)
|
kInfoFiles.append(fpath)
|
||||||
print 'K4MobiDeDRM: Kindle info/kinf file %s found in config folder.' % filename
|
print 'K4MobiDeDRM v%s: Kindle info/kinf file %s found in config folder.' % (plug_ver, filename)
|
||||||
except IOError:
|
except IOError:
|
||||||
print 'K4MobiDeDRM: Error reading kindle info/kinf files from config directory.'
|
print 'K4MobiDeDRM v%s: Error reading kindle info/kinf files from config directory.' % plug_ver
|
||||||
pass
|
pass
|
||||||
|
|
||||||
mobi = True
|
mobi = True
|
||||||
@@ -83,27 +116,34 @@ class K4DeDRM(FileTypePlugin):
|
|||||||
try:
|
try:
|
||||||
mb.processBook(pidlst)
|
mb.processBook(pidlst)
|
||||||
|
|
||||||
except mobidedrm.DrmException:
|
except mobidedrm.DrmException, e:
|
||||||
#if you reached here then no luck raise and exception
|
#if you reached here then no luck raise and exception
|
||||||
if is_ok_to_use_qt():
|
if is_ok_to_use_qt():
|
||||||
from PyQt4.Qt import QMessageBox
|
from PyQt4.Qt import QMessageBox
|
||||||
d = QMessageBox(QMessageBox.Warning, "K4MobiDeDRM Plugin", "Error decoding: %s\n" % path_to_ebook)
|
d = QMessageBox(QMessageBox.Warning, "K4MobiDeDRM v%s Plugin" % plug_ver, "Error: " + str(e) + "... %s\n" % path_to_ebook)
|
||||||
d.show()
|
d.show()
|
||||||
d.raise_()
|
d.raise_()
|
||||||
d.exec_()
|
d.exec_()
|
||||||
raise Exception("K4MobiDeDRM plugin could not decode the file")
|
raise Exception("K4MobiDeDRM plugin v%s Error: %s" % (plug_ver, str(e)))
|
||||||
except topazextract.TpzDRMError:
|
except topazextract.TpzDRMError, e:
|
||||||
#if you reached here then no luck raise and exception
|
#if you reached here then no luck raise and exception
|
||||||
if is_ok_to_use_qt():
|
if is_ok_to_use_qt():
|
||||||
from PyQt4.Qt import QMessageBox
|
from PyQt4.Qt import QMessageBox
|
||||||
d = QMessageBox(QMessageBox.Warning, "K4MobiDeDRM Plugin", "Error decoding: %s\n" % path_to_ebook)
|
d = QMessageBox(QMessageBox.Warning, "K4MobiDeDRM v%s Plugin" % plug_ver, "Error: " + str(e) + "... %s\n" % path_to_ebook)
|
||||||
d.show()
|
d.show()
|
||||||
d.raise_()
|
d.raise_()
|
||||||
d.exec_()
|
d.exec_()
|
||||||
raise Exception("K4MobiDeDRM plugin could not decode the file")
|
raise Exception("K4MobiDeDRM plugin v%s Error: %s" % (plug_ver, str(e)))
|
||||||
|
|
||||||
print "Success!"
|
print "Success!"
|
||||||
if mobi:
|
if mobi:
|
||||||
|
if mb.getPrintReplica():
|
||||||
|
of = self.temporary_file(bookname+'.azw4')
|
||||||
|
print 'K4MobiDeDRM v%s: Print Replica format detected.' % plug_ver
|
||||||
|
elif mb.getMobiVersion() >= 8:
|
||||||
|
print 'K4MobiDeDRM v%s: Stand-alone KF8 format detected.' % plug_ver
|
||||||
|
of = self.temporary_file(bookname+'.azw3')
|
||||||
|
else:
|
||||||
of = self.temporary_file(bookname+'.mobi')
|
of = self.temporary_file(bookname+'.mobi')
|
||||||
mb.getMobiFile(of.name)
|
mb.getMobiFile(of.name)
|
||||||
else:
|
else:
|
||||||
@@ -114,3 +154,11 @@ class K4DeDRM(FileTypePlugin):
|
|||||||
|
|
||||||
def customization_help(self, gui=False):
|
def customization_help(self, gui=False):
|
||||||
return 'Enter 10 character PIDs and/or Kindle serial numbers, use a comma (no spaces) to separate each PID or SerialNumber from the next.'
|
return 'Enter 10 character PIDs and/or Kindle serial numbers, use a comma (no spaces) to separate each PID or SerialNumber from the next.'
|
||||||
|
|
||||||
|
def load_resources(self, names):
|
||||||
|
ans = {}
|
||||||
|
with ZipFile(self.plugin_path, 'r') as zf:
|
||||||
|
for candidate in zf.namelist():
|
||||||
|
if candidate in names:
|
||||||
|
ans[candidate] = zf.read(candidate)
|
||||||
|
return ans
|
||||||
568
Calibre_Plugins/K4MobiDeDRM_plugin/aescbc.py
Normal file
568
Calibre_Plugins/K4MobiDeDRM_plugin/aescbc.py
Normal file
@@ -0,0 +1,568 @@
|
|||||||
|
#! /usr/bin/env python
|
||||||
|
|
||||||
|
"""
|
||||||
|
Routines for doing AES CBC in one file
|
||||||
|
|
||||||
|
Modified by some_updates to extract
|
||||||
|
and combine only those parts needed for AES CBC
|
||||||
|
into one simple to add python file
|
||||||
|
|
||||||
|
Original Version
|
||||||
|
Copyright (c) 2002 by Paul A. Lambert
|
||||||
|
Under:
|
||||||
|
CryptoPy Artisitic License Version 1.0
|
||||||
|
See the wonderful pure python package cryptopy-1.2.5
|
||||||
|
and read its LICENSE.txt for complete license details.
|
||||||
|
"""
|
||||||
|
|
||||||
|
class CryptoError(Exception):
|
||||||
|
""" Base class for crypto exceptions """
|
||||||
|
def __init__(self,errorMessage='Error!'):
|
||||||
|
self.message = errorMessage
|
||||||
|
def __str__(self):
|
||||||
|
return self.message
|
||||||
|
|
||||||
|
class InitCryptoError(CryptoError):
|
||||||
|
""" Crypto errors during algorithm initialization """
|
||||||
|
class BadKeySizeError(InitCryptoError):
|
||||||
|
""" Bad key size error """
|
||||||
|
class EncryptError(CryptoError):
|
||||||
|
""" Error in encryption processing """
|
||||||
|
class DecryptError(CryptoError):
|
||||||
|
""" Error in decryption processing """
|
||||||
|
class DecryptNotBlockAlignedError(DecryptError):
|
||||||
|
""" Error in decryption processing """
|
||||||
|
|
||||||
|
def xorS(a,b):
|
||||||
|
""" XOR two strings """
|
||||||
|
assert len(a)==len(b)
|
||||||
|
x = []
|
||||||
|
for i in range(len(a)):
|
||||||
|
x.append( chr(ord(a[i])^ord(b[i])))
|
||||||
|
return ''.join(x)
|
||||||
|
|
||||||
|
def xor(a,b):
|
||||||
|
""" XOR two strings """
|
||||||
|
x = []
|
||||||
|
for i in range(min(len(a),len(b))):
|
||||||
|
x.append( chr(ord(a[i])^ord(b[i])))
|
||||||
|
return ''.join(x)
|
||||||
|
|
||||||
|
"""
|
||||||
|
Base 'BlockCipher' and Pad classes for cipher instances.
|
||||||
|
BlockCipher supports automatic padding and type conversion. The BlockCipher
|
||||||
|
class was written to make the actual algorithm code more readable and
|
||||||
|
not for performance.
|
||||||
|
"""
|
||||||
|
|
||||||
|
class BlockCipher:
|
||||||
|
""" Block ciphers """
|
||||||
|
def __init__(self):
|
||||||
|
self.reset()
|
||||||
|
|
||||||
|
def reset(self):
|
||||||
|
self.resetEncrypt()
|
||||||
|
self.resetDecrypt()
|
||||||
|
def resetEncrypt(self):
|
||||||
|
self.encryptBlockCount = 0
|
||||||
|
self.bytesToEncrypt = ''
|
||||||
|
def resetDecrypt(self):
|
||||||
|
self.decryptBlockCount = 0
|
||||||
|
self.bytesToDecrypt = ''
|
||||||
|
|
||||||
|
def encrypt(self, plainText, more = None):
|
||||||
|
""" Encrypt a string and return a binary string """
|
||||||
|
self.bytesToEncrypt += plainText # append plainText to any bytes from prior encrypt
|
||||||
|
numBlocks, numExtraBytes = divmod(len(self.bytesToEncrypt), self.blockSize)
|
||||||
|
cipherText = ''
|
||||||
|
for i in range(numBlocks):
|
||||||
|
bStart = i*self.blockSize
|
||||||
|
ctBlock = self.encryptBlock(self.bytesToEncrypt[bStart:bStart+self.blockSize])
|
||||||
|
self.encryptBlockCount += 1
|
||||||
|
cipherText += ctBlock
|
||||||
|
if numExtraBytes > 0: # save any bytes that are not block aligned
|
||||||
|
self.bytesToEncrypt = self.bytesToEncrypt[-numExtraBytes:]
|
||||||
|
else:
|
||||||
|
self.bytesToEncrypt = ''
|
||||||
|
|
||||||
|
if more == None: # no more data expected from caller
|
||||||
|
finalBytes = self.padding.addPad(self.bytesToEncrypt,self.blockSize)
|
||||||
|
if len(finalBytes) > 0:
|
||||||
|
ctBlock = self.encryptBlock(finalBytes)
|
||||||
|
self.encryptBlockCount += 1
|
||||||
|
cipherText += ctBlock
|
||||||
|
self.resetEncrypt()
|
||||||
|
return cipherText
|
||||||
|
|
||||||
|
def decrypt(self, cipherText, more = None):
|
||||||
|
""" Decrypt a string and return a string """
|
||||||
|
self.bytesToDecrypt += cipherText # append to any bytes from prior decrypt
|
||||||
|
|
||||||
|
numBlocks, numExtraBytes = divmod(len(self.bytesToDecrypt), self.blockSize)
|
||||||
|
if more == None: # no more calls to decrypt, should have all the data
|
||||||
|
if numExtraBytes != 0:
|
||||||
|
raise DecryptNotBlockAlignedError, 'Data not block aligned on decrypt'
|
||||||
|
|
||||||
|
# hold back some bytes in case last decrypt has zero len
|
||||||
|
if (more != None) and (numExtraBytes == 0) and (numBlocks >0) :
|
||||||
|
numBlocks -= 1
|
||||||
|
numExtraBytes = self.blockSize
|
||||||
|
|
||||||
|
plainText = ''
|
||||||
|
for i in range(numBlocks):
|
||||||
|
bStart = i*self.blockSize
|
||||||
|
ptBlock = self.decryptBlock(self.bytesToDecrypt[bStart : bStart+self.blockSize])
|
||||||
|
self.decryptBlockCount += 1
|
||||||
|
plainText += ptBlock
|
||||||
|
|
||||||
|
if numExtraBytes > 0: # save any bytes that are not block aligned
|
||||||
|
self.bytesToEncrypt = self.bytesToEncrypt[-numExtraBytes:]
|
||||||
|
else:
|
||||||
|
self.bytesToEncrypt = ''
|
||||||
|
|
||||||
|
if more == None: # last decrypt remove padding
|
||||||
|
plainText = self.padding.removePad(plainText, self.blockSize)
|
||||||
|
self.resetDecrypt()
|
||||||
|
return plainText
|
||||||
|
|
||||||
|
|
||||||
|
class Pad:
|
||||||
|
def __init__(self):
|
||||||
|
pass # eventually could put in calculation of min and max size extension
|
||||||
|
|
||||||
|
class padWithPadLen(Pad):
|
||||||
|
""" Pad a binary string with the length of the padding """
|
||||||
|
|
||||||
|
def addPad(self, extraBytes, blockSize):
|
||||||
|
""" Add padding to a binary string to make it an even multiple
|
||||||
|
of the block size """
|
||||||
|
blocks, numExtraBytes = divmod(len(extraBytes), blockSize)
|
||||||
|
padLength = blockSize - numExtraBytes
|
||||||
|
return extraBytes + padLength*chr(padLength)
|
||||||
|
|
||||||
|
def removePad(self, paddedBinaryString, blockSize):
|
||||||
|
""" Remove padding from a binary string """
|
||||||
|
if not(0<len(paddedBinaryString)):
|
||||||
|
raise DecryptNotBlockAlignedError, 'Expected More Data'
|
||||||
|
return paddedBinaryString[:-ord(paddedBinaryString[-1])]
|
||||||
|
|
||||||
|
class noPadding(Pad):
|
||||||
|
""" No padding. Use this to get ECB behavior from encrypt/decrypt """
|
||||||
|
|
||||||
|
def addPad(self, extraBytes, blockSize):
|
||||||
|
""" Add no padding """
|
||||||
|
return extraBytes
|
||||||
|
|
||||||
|
def removePad(self, paddedBinaryString, blockSize):
|
||||||
|
""" Remove no padding """
|
||||||
|
return paddedBinaryString
|
||||||
|
|
||||||
|
"""
|
||||||
|
Rijndael encryption algorithm
|
||||||
|
This byte oriented implementation is intended to closely
|
||||||
|
match FIPS specification for readability. It is not implemented
|
||||||
|
for performance.
|
||||||
|
"""
|
||||||
|
|
||||||
|
class Rijndael(BlockCipher):
|
||||||
|
""" Rijndael encryption algorithm """
|
||||||
|
def __init__(self, key = None, padding = padWithPadLen(), keySize=16, blockSize=16 ):
|
||||||
|
self.name = 'RIJNDAEL'
|
||||||
|
self.keySize = keySize
|
||||||
|
self.strength = keySize*8
|
||||||
|
self.blockSize = blockSize # blockSize is in bytes
|
||||||
|
self.padding = padding # change default to noPadding() to get normal ECB behavior
|
||||||
|
|
||||||
|
assert( keySize%4==0 and NrTable[4].has_key(keySize/4)),'key size must be 16,20,24,29 or 32 bytes'
|
||||||
|
assert( blockSize%4==0 and NrTable.has_key(blockSize/4)), 'block size must be 16,20,24,29 or 32 bytes'
|
||||||
|
|
||||||
|
self.Nb = self.blockSize/4 # Nb is number of columns of 32 bit words
|
||||||
|
self.Nk = keySize/4 # Nk is the key length in 32-bit words
|
||||||
|
self.Nr = NrTable[self.Nb][self.Nk] # The number of rounds (Nr) is a function of
|
||||||
|
# the block (Nb) and key (Nk) sizes.
|
||||||
|
if key != None:
|
||||||
|
self.setKey(key)
|
||||||
|
|
||||||
|
def setKey(self, key):
|
||||||
|
""" Set a key and generate the expanded key """
|
||||||
|
assert( len(key) == (self.Nk*4) ), 'Key length must be same as keySize parameter'
|
||||||
|
self.__expandedKey = keyExpansion(self, key)
|
||||||
|
self.reset() # BlockCipher.reset()
|
||||||
|
|
||||||
|
def encryptBlock(self, plainTextBlock):
|
||||||
|
""" Encrypt a block, plainTextBlock must be a array of bytes [Nb by 4] """
|
||||||
|
self.state = self._toBlock(plainTextBlock)
|
||||||
|
AddRoundKey(self, self.__expandedKey[0:self.Nb])
|
||||||
|
for round in range(1,self.Nr): #for round = 1 step 1 to Nr
|
||||||
|
SubBytes(self)
|
||||||
|
ShiftRows(self)
|
||||||
|
MixColumns(self)
|
||||||
|
AddRoundKey(self, self.__expandedKey[round*self.Nb:(round+1)*self.Nb])
|
||||||
|
SubBytes(self)
|
||||||
|
ShiftRows(self)
|
||||||
|
AddRoundKey(self, self.__expandedKey[self.Nr*self.Nb:(self.Nr+1)*self.Nb])
|
||||||
|
return self._toBString(self.state)
|
||||||
|
|
||||||
|
|
||||||
|
def decryptBlock(self, encryptedBlock):
|
||||||
|
""" decrypt a block (array of bytes) """
|
||||||
|
self.state = self._toBlock(encryptedBlock)
|
||||||
|
AddRoundKey(self, self.__expandedKey[self.Nr*self.Nb:(self.Nr+1)*self.Nb])
|
||||||
|
for round in range(self.Nr-1,0,-1):
|
||||||
|
InvShiftRows(self)
|
||||||
|
InvSubBytes(self)
|
||||||
|
AddRoundKey(self, self.__expandedKey[round*self.Nb:(round+1)*self.Nb])
|
||||||
|
InvMixColumns(self)
|
||||||
|
InvShiftRows(self)
|
||||||
|
InvSubBytes(self)
|
||||||
|
AddRoundKey(self, self.__expandedKey[0:self.Nb])
|
||||||
|
return self._toBString(self.state)
|
||||||
|
|
||||||
|
def _toBlock(self, bs):
|
||||||
|
""" Convert binary string to array of bytes, state[col][row]"""
|
||||||
|
assert ( len(bs) == 4*self.Nb ), 'Rijndarl blocks must be of size blockSize'
|
||||||
|
return [[ord(bs[4*i]),ord(bs[4*i+1]),ord(bs[4*i+2]),ord(bs[4*i+3])] for i in range(self.Nb)]
|
||||||
|
|
||||||
|
def _toBString(self, block):
|
||||||
|
""" Convert block (array of bytes) to binary string """
|
||||||
|
l = []
|
||||||
|
for col in block:
|
||||||
|
for rowElement in col:
|
||||||
|
l.append(chr(rowElement))
|
||||||
|
return ''.join(l)
|
||||||
|
#-------------------------------------
|
||||||
|
""" Number of rounds Nr = NrTable[Nb][Nk]
|
||||||
|
|
||||||
|
Nb Nk=4 Nk=5 Nk=6 Nk=7 Nk=8
|
||||||
|
------------------------------------- """
|
||||||
|
NrTable = {4: {4:10, 5:11, 6:12, 7:13, 8:14},
|
||||||
|
5: {4:11, 5:11, 6:12, 7:13, 8:14},
|
||||||
|
6: {4:12, 5:12, 6:12, 7:13, 8:14},
|
||||||
|
7: {4:13, 5:13, 6:13, 7:13, 8:14},
|
||||||
|
8: {4:14, 5:14, 6:14, 7:14, 8:14}}
|
||||||
|
#-------------------------------------
|
||||||
|
def keyExpansion(algInstance, keyString):
|
||||||
|
""" Expand a string of size keySize into a larger array """
|
||||||
|
Nk, Nb, Nr = algInstance.Nk, algInstance.Nb, algInstance.Nr # for readability
|
||||||
|
key = [ord(byte) for byte in keyString] # convert string to list
|
||||||
|
w = [[key[4*i],key[4*i+1],key[4*i+2],key[4*i+3]] for i in range(Nk)]
|
||||||
|
for i in range(Nk,Nb*(Nr+1)):
|
||||||
|
temp = w[i-1] # a four byte column
|
||||||
|
if (i%Nk) == 0 :
|
||||||
|
temp = temp[1:]+[temp[0]] # RotWord(temp)
|
||||||
|
temp = [ Sbox[byte] for byte in temp ]
|
||||||
|
temp[0] ^= Rcon[i/Nk]
|
||||||
|
elif Nk > 6 and i%Nk == 4 :
|
||||||
|
temp = [ Sbox[byte] for byte in temp ] # SubWord(temp)
|
||||||
|
w.append( [ w[i-Nk][byte]^temp[byte] for byte in range(4) ] )
|
||||||
|
return w
|
||||||
|
|
||||||
|
Rcon = (0,0x01,0x02,0x04,0x08,0x10,0x20,0x40,0x80,0x1b,0x36, # note extra '0' !!!
|
||||||
|
0x6c,0xd8,0xab,0x4d,0x9a,0x2f,0x5e,0xbc,0x63,0xc6,
|
||||||
|
0x97,0x35,0x6a,0xd4,0xb3,0x7d,0xfa,0xef,0xc5,0x91)
|
||||||
|
|
||||||
|
#-------------------------------------
|
||||||
|
def AddRoundKey(algInstance, keyBlock):
|
||||||
|
""" XOR the algorithm state with a block of key material """
|
||||||
|
for column in range(algInstance.Nb):
|
||||||
|
for row in range(4):
|
||||||
|
algInstance.state[column][row] ^= keyBlock[column][row]
|
||||||
|
#-------------------------------------
|
||||||
|
|
||||||
|
def SubBytes(algInstance):
|
||||||
|
for column in range(algInstance.Nb):
|
||||||
|
for row in range(4):
|
||||||
|
algInstance.state[column][row] = Sbox[algInstance.state[column][row]]
|
||||||
|
|
||||||
|
def InvSubBytes(algInstance):
|
||||||
|
for column in range(algInstance.Nb):
|
||||||
|
for row in range(4):
|
||||||
|
algInstance.state[column][row] = InvSbox[algInstance.state[column][row]]
|
||||||
|
|
||||||
|
Sbox = (0x63,0x7c,0x77,0x7b,0xf2,0x6b,0x6f,0xc5,
|
||||||
|
0x30,0x01,0x67,0x2b,0xfe,0xd7,0xab,0x76,
|
||||||
|
0xca,0x82,0xc9,0x7d,0xfa,0x59,0x47,0xf0,
|
||||||
|
0xad,0xd4,0xa2,0xaf,0x9c,0xa4,0x72,0xc0,
|
||||||
|
0xb7,0xfd,0x93,0x26,0x36,0x3f,0xf7,0xcc,
|
||||||
|
0x34,0xa5,0xe5,0xf1,0x71,0xd8,0x31,0x15,
|
||||||
|
0x04,0xc7,0x23,0xc3,0x18,0x96,0x05,0x9a,
|
||||||
|
0x07,0x12,0x80,0xe2,0xeb,0x27,0xb2,0x75,
|
||||||
|
0x09,0x83,0x2c,0x1a,0x1b,0x6e,0x5a,0xa0,
|
||||||
|
0x52,0x3b,0xd6,0xb3,0x29,0xe3,0x2f,0x84,
|
||||||
|
0x53,0xd1,0x00,0xed,0x20,0xfc,0xb1,0x5b,
|
||||||
|
0x6a,0xcb,0xbe,0x39,0x4a,0x4c,0x58,0xcf,
|
||||||
|
0xd0,0xef,0xaa,0xfb,0x43,0x4d,0x33,0x85,
|
||||||
|
0x45,0xf9,0x02,0x7f,0x50,0x3c,0x9f,0xa8,
|
||||||
|
0x51,0xa3,0x40,0x8f,0x92,0x9d,0x38,0xf5,
|
||||||
|
0xbc,0xb6,0xda,0x21,0x10,0xff,0xf3,0xd2,
|
||||||
|
0xcd,0x0c,0x13,0xec,0x5f,0x97,0x44,0x17,
|
||||||
|
0xc4,0xa7,0x7e,0x3d,0x64,0x5d,0x19,0x73,
|
||||||
|
0x60,0x81,0x4f,0xdc,0x22,0x2a,0x90,0x88,
|
||||||
|
0x46,0xee,0xb8,0x14,0xde,0x5e,0x0b,0xdb,
|
||||||
|
0xe0,0x32,0x3a,0x0a,0x49,0x06,0x24,0x5c,
|
||||||
|
0xc2,0xd3,0xac,0x62,0x91,0x95,0xe4,0x79,
|
||||||
|
0xe7,0xc8,0x37,0x6d,0x8d,0xd5,0x4e,0xa9,
|
||||||
|
0x6c,0x56,0xf4,0xea,0x65,0x7a,0xae,0x08,
|
||||||
|
0xba,0x78,0x25,0x2e,0x1c,0xa6,0xb4,0xc6,
|
||||||
|
0xe8,0xdd,0x74,0x1f,0x4b,0xbd,0x8b,0x8a,
|
||||||
|
0x70,0x3e,0xb5,0x66,0x48,0x03,0xf6,0x0e,
|
||||||
|
0x61,0x35,0x57,0xb9,0x86,0xc1,0x1d,0x9e,
|
||||||
|
0xe1,0xf8,0x98,0x11,0x69,0xd9,0x8e,0x94,
|
||||||
|
0x9b,0x1e,0x87,0xe9,0xce,0x55,0x28,0xdf,
|
||||||
|
0x8c,0xa1,0x89,0x0d,0xbf,0xe6,0x42,0x68,
|
||||||
|
0x41,0x99,0x2d,0x0f,0xb0,0x54,0xbb,0x16)
|
||||||
|
|
||||||
|
InvSbox = (0x52,0x09,0x6a,0xd5,0x30,0x36,0xa5,0x38,
|
||||||
|
0xbf,0x40,0xa3,0x9e,0x81,0xf3,0xd7,0xfb,
|
||||||
|
0x7c,0xe3,0x39,0x82,0x9b,0x2f,0xff,0x87,
|
||||||
|
0x34,0x8e,0x43,0x44,0xc4,0xde,0xe9,0xcb,
|
||||||
|
0x54,0x7b,0x94,0x32,0xa6,0xc2,0x23,0x3d,
|
||||||
|
0xee,0x4c,0x95,0x0b,0x42,0xfa,0xc3,0x4e,
|
||||||
|
0x08,0x2e,0xa1,0x66,0x28,0xd9,0x24,0xb2,
|
||||||
|
0x76,0x5b,0xa2,0x49,0x6d,0x8b,0xd1,0x25,
|
||||||
|
0x72,0xf8,0xf6,0x64,0x86,0x68,0x98,0x16,
|
||||||
|
0xd4,0xa4,0x5c,0xcc,0x5d,0x65,0xb6,0x92,
|
||||||
|
0x6c,0x70,0x48,0x50,0xfd,0xed,0xb9,0xda,
|
||||||
|
0x5e,0x15,0x46,0x57,0xa7,0x8d,0x9d,0x84,
|
||||||
|
0x90,0xd8,0xab,0x00,0x8c,0xbc,0xd3,0x0a,
|
||||||
|
0xf7,0xe4,0x58,0x05,0xb8,0xb3,0x45,0x06,
|
||||||
|
0xd0,0x2c,0x1e,0x8f,0xca,0x3f,0x0f,0x02,
|
||||||
|
0xc1,0xaf,0xbd,0x03,0x01,0x13,0x8a,0x6b,
|
||||||
|
0x3a,0x91,0x11,0x41,0x4f,0x67,0xdc,0xea,
|
||||||
|
0x97,0xf2,0xcf,0xce,0xf0,0xb4,0xe6,0x73,
|
||||||
|
0x96,0xac,0x74,0x22,0xe7,0xad,0x35,0x85,
|
||||||
|
0xe2,0xf9,0x37,0xe8,0x1c,0x75,0xdf,0x6e,
|
||||||
|
0x47,0xf1,0x1a,0x71,0x1d,0x29,0xc5,0x89,
|
||||||
|
0x6f,0xb7,0x62,0x0e,0xaa,0x18,0xbe,0x1b,
|
||||||
|
0xfc,0x56,0x3e,0x4b,0xc6,0xd2,0x79,0x20,
|
||||||
|
0x9a,0xdb,0xc0,0xfe,0x78,0xcd,0x5a,0xf4,
|
||||||
|
0x1f,0xdd,0xa8,0x33,0x88,0x07,0xc7,0x31,
|
||||||
|
0xb1,0x12,0x10,0x59,0x27,0x80,0xec,0x5f,
|
||||||
|
0x60,0x51,0x7f,0xa9,0x19,0xb5,0x4a,0x0d,
|
||||||
|
0x2d,0xe5,0x7a,0x9f,0x93,0xc9,0x9c,0xef,
|
||||||
|
0xa0,0xe0,0x3b,0x4d,0xae,0x2a,0xf5,0xb0,
|
||||||
|
0xc8,0xeb,0xbb,0x3c,0x83,0x53,0x99,0x61,
|
||||||
|
0x17,0x2b,0x04,0x7e,0xba,0x77,0xd6,0x26,
|
||||||
|
0xe1,0x69,0x14,0x63,0x55,0x21,0x0c,0x7d)
|
||||||
|
|
||||||
|
#-------------------------------------
|
||||||
|
""" For each block size (Nb), the ShiftRow operation shifts row i
|
||||||
|
by the amount Ci. Note that row 0 is not shifted.
|
||||||
|
Nb C1 C2 C3
|
||||||
|
------------------- """
|
||||||
|
shiftOffset = { 4 : ( 0, 1, 2, 3),
|
||||||
|
5 : ( 0, 1, 2, 3),
|
||||||
|
6 : ( 0, 1, 2, 3),
|
||||||
|
7 : ( 0, 1, 2, 4),
|
||||||
|
8 : ( 0, 1, 3, 4) }
|
||||||
|
def ShiftRows(algInstance):
|
||||||
|
tmp = [0]*algInstance.Nb # list of size Nb
|
||||||
|
for r in range(1,4): # row 0 reamains unchanged and can be skipped
|
||||||
|
for c in range(algInstance.Nb):
|
||||||
|
tmp[c] = algInstance.state[(c+shiftOffset[algInstance.Nb][r]) % algInstance.Nb][r]
|
||||||
|
for c in range(algInstance.Nb):
|
||||||
|
algInstance.state[c][r] = tmp[c]
|
||||||
|
def InvShiftRows(algInstance):
|
||||||
|
tmp = [0]*algInstance.Nb # list of size Nb
|
||||||
|
for r in range(1,4): # row 0 reamains unchanged and can be skipped
|
||||||
|
for c in range(algInstance.Nb):
|
||||||
|
tmp[c] = algInstance.state[(c+algInstance.Nb-shiftOffset[algInstance.Nb][r]) % algInstance.Nb][r]
|
||||||
|
for c in range(algInstance.Nb):
|
||||||
|
algInstance.state[c][r] = tmp[c]
|
||||||
|
#-------------------------------------
|
||||||
|
def MixColumns(a):
|
||||||
|
Sprime = [0,0,0,0]
|
||||||
|
for j in range(a.Nb): # for each column
|
||||||
|
Sprime[0] = mul(2,a.state[j][0])^mul(3,a.state[j][1])^mul(1,a.state[j][2])^mul(1,a.state[j][3])
|
||||||
|
Sprime[1] = mul(1,a.state[j][0])^mul(2,a.state[j][1])^mul(3,a.state[j][2])^mul(1,a.state[j][3])
|
||||||
|
Sprime[2] = mul(1,a.state[j][0])^mul(1,a.state[j][1])^mul(2,a.state[j][2])^mul(3,a.state[j][3])
|
||||||
|
Sprime[3] = mul(3,a.state[j][0])^mul(1,a.state[j][1])^mul(1,a.state[j][2])^mul(2,a.state[j][3])
|
||||||
|
for i in range(4):
|
||||||
|
a.state[j][i] = Sprime[i]
|
||||||
|
|
||||||
|
def InvMixColumns(a):
|
||||||
|
""" Mix the four bytes of every column in a linear way
|
||||||
|
This is the opposite operation of Mixcolumn """
|
||||||
|
Sprime = [0,0,0,0]
|
||||||
|
for j in range(a.Nb): # for each column
|
||||||
|
Sprime[0] = mul(0x0E,a.state[j][0])^mul(0x0B,a.state[j][1])^mul(0x0D,a.state[j][2])^mul(0x09,a.state[j][3])
|
||||||
|
Sprime[1] = mul(0x09,a.state[j][0])^mul(0x0E,a.state[j][1])^mul(0x0B,a.state[j][2])^mul(0x0D,a.state[j][3])
|
||||||
|
Sprime[2] = mul(0x0D,a.state[j][0])^mul(0x09,a.state[j][1])^mul(0x0E,a.state[j][2])^mul(0x0B,a.state[j][3])
|
||||||
|
Sprime[3] = mul(0x0B,a.state[j][0])^mul(0x0D,a.state[j][1])^mul(0x09,a.state[j][2])^mul(0x0E,a.state[j][3])
|
||||||
|
for i in range(4):
|
||||||
|
a.state[j][i] = Sprime[i]
|
||||||
|
|
||||||
|
#-------------------------------------
|
||||||
|
def mul(a, b):
|
||||||
|
""" Multiply two elements of GF(2^m)
|
||||||
|
needed for MixColumn and InvMixColumn """
|
||||||
|
if (a !=0 and b!=0):
|
||||||
|
return Alogtable[(Logtable[a] + Logtable[b])%255]
|
||||||
|
else:
|
||||||
|
return 0
|
||||||
|
|
||||||
|
Logtable = ( 0, 0, 25, 1, 50, 2, 26, 198, 75, 199, 27, 104, 51, 238, 223, 3,
|
||||||
|
100, 4, 224, 14, 52, 141, 129, 239, 76, 113, 8, 200, 248, 105, 28, 193,
|
||||||
|
125, 194, 29, 181, 249, 185, 39, 106, 77, 228, 166, 114, 154, 201, 9, 120,
|
||||||
|
101, 47, 138, 5, 33, 15, 225, 36, 18, 240, 130, 69, 53, 147, 218, 142,
|
||||||
|
150, 143, 219, 189, 54, 208, 206, 148, 19, 92, 210, 241, 64, 70, 131, 56,
|
||||||
|
102, 221, 253, 48, 191, 6, 139, 98, 179, 37, 226, 152, 34, 136, 145, 16,
|
||||||
|
126, 110, 72, 195, 163, 182, 30, 66, 58, 107, 40, 84, 250, 133, 61, 186,
|
||||||
|
43, 121, 10, 21, 155, 159, 94, 202, 78, 212, 172, 229, 243, 115, 167, 87,
|
||||||
|
175, 88, 168, 80, 244, 234, 214, 116, 79, 174, 233, 213, 231, 230, 173, 232,
|
||||||
|
44, 215, 117, 122, 235, 22, 11, 245, 89, 203, 95, 176, 156, 169, 81, 160,
|
||||||
|
127, 12, 246, 111, 23, 196, 73, 236, 216, 67, 31, 45, 164, 118, 123, 183,
|
||||||
|
204, 187, 62, 90, 251, 96, 177, 134, 59, 82, 161, 108, 170, 85, 41, 157,
|
||||||
|
151, 178, 135, 144, 97, 190, 220, 252, 188, 149, 207, 205, 55, 63, 91, 209,
|
||||||
|
83, 57, 132, 60, 65, 162, 109, 71, 20, 42, 158, 93, 86, 242, 211, 171,
|
||||||
|
68, 17, 146, 217, 35, 32, 46, 137, 180, 124, 184, 38, 119, 153, 227, 165,
|
||||||
|
103, 74, 237, 222, 197, 49, 254, 24, 13, 99, 140, 128, 192, 247, 112, 7)
|
||||||
|
|
||||||
|
Alogtable= ( 1, 3, 5, 15, 17, 51, 85, 255, 26, 46, 114, 150, 161, 248, 19, 53,
|
||||||
|
95, 225, 56, 72, 216, 115, 149, 164, 247, 2, 6, 10, 30, 34, 102, 170,
|
||||||
|
229, 52, 92, 228, 55, 89, 235, 38, 106, 190, 217, 112, 144, 171, 230, 49,
|
||||||
|
83, 245, 4, 12, 20, 60, 68, 204, 79, 209, 104, 184, 211, 110, 178, 205,
|
||||||
|
76, 212, 103, 169, 224, 59, 77, 215, 98, 166, 241, 8, 24, 40, 120, 136,
|
||||||
|
131, 158, 185, 208, 107, 189, 220, 127, 129, 152, 179, 206, 73, 219, 118, 154,
|
||||||
|
181, 196, 87, 249, 16, 48, 80, 240, 11, 29, 39, 105, 187, 214, 97, 163,
|
||||||
|
254, 25, 43, 125, 135, 146, 173, 236, 47, 113, 147, 174, 233, 32, 96, 160,
|
||||||
|
251, 22, 58, 78, 210, 109, 183, 194, 93, 231, 50, 86, 250, 21, 63, 65,
|
||||||
|
195, 94, 226, 61, 71, 201, 64, 192, 91, 237, 44, 116, 156, 191, 218, 117,
|
||||||
|
159, 186, 213, 100, 172, 239, 42, 126, 130, 157, 188, 223, 122, 142, 137, 128,
|
||||||
|
155, 182, 193, 88, 232, 35, 101, 175, 234, 37, 111, 177, 200, 67, 197, 84,
|
||||||
|
252, 31, 33, 99, 165, 244, 7, 9, 27, 45, 119, 153, 176, 203, 70, 202,
|
||||||
|
69, 207, 74, 222, 121, 139, 134, 145, 168, 227, 62, 66, 198, 81, 243, 14,
|
||||||
|
18, 54, 90, 238, 41, 123, 141, 140, 143, 138, 133, 148, 167, 242, 13, 23,
|
||||||
|
57, 75, 221, 124, 132, 151, 162, 253, 28, 36, 108, 180, 199, 82, 246, 1)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
AES Encryption Algorithm
|
||||||
|
The AES algorithm is just Rijndael algorithm restricted to the default
|
||||||
|
blockSize of 128 bits.
|
||||||
|
"""
|
||||||
|
|
||||||
|
class AES(Rijndael):
|
||||||
|
""" The AES algorithm is the Rijndael block cipher restricted to block
|
||||||
|
sizes of 128 bits and key sizes of 128, 192 or 256 bits
|
||||||
|
"""
|
||||||
|
def __init__(self, key = None, padding = padWithPadLen(), keySize=16):
|
||||||
|
""" Initialize AES, keySize is in bytes """
|
||||||
|
if not (keySize == 16 or keySize == 24 or keySize == 32) :
|
||||||
|
raise BadKeySizeError, 'Illegal AES key size, must be 16, 24, or 32 bytes'
|
||||||
|
|
||||||
|
Rijndael.__init__( self, key, padding=padding, keySize=keySize, blockSize=16 )
|
||||||
|
|
||||||
|
self.name = 'AES'
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
CBC mode of encryption for block ciphers.
|
||||||
|
This algorithm mode wraps any BlockCipher to make a
|
||||||
|
Cipher Block Chaining mode.
|
||||||
|
"""
|
||||||
|
from random import Random # should change to crypto.random!!!
|
||||||
|
|
||||||
|
|
||||||
|
class CBC(BlockCipher):
|
||||||
|
""" The CBC class wraps block ciphers to make cipher block chaining (CBC) mode
|
||||||
|
algorithms. The initialization (IV) is automatic if set to None. Padding
|
||||||
|
is also automatic based on the Pad class used to initialize the algorithm
|
||||||
|
"""
|
||||||
|
def __init__(self, blockCipherInstance, padding = padWithPadLen()):
|
||||||
|
""" CBC algorithms are created by initializing with a BlockCipher instance """
|
||||||
|
self.baseCipher = blockCipherInstance
|
||||||
|
self.name = self.baseCipher.name + '_CBC'
|
||||||
|
self.blockSize = self.baseCipher.blockSize
|
||||||
|
self.keySize = self.baseCipher.keySize
|
||||||
|
self.padding = padding
|
||||||
|
self.baseCipher.padding = noPadding() # baseCipher should NOT pad!!
|
||||||
|
self.r = Random() # for IV generation, currently uses
|
||||||
|
# mediocre standard distro version <----------------
|
||||||
|
import time
|
||||||
|
newSeed = time.ctime()+str(self.r) # seed with instance location
|
||||||
|
self.r.seed(newSeed) # to make unique
|
||||||
|
self.reset()
|
||||||
|
|
||||||
|
def setKey(self, key):
|
||||||
|
self.baseCipher.setKey(key)
|
||||||
|
|
||||||
|
# Overload to reset both CBC state and the wrapped baseCipher
|
||||||
|
def resetEncrypt(self):
|
||||||
|
BlockCipher.resetEncrypt(self) # reset CBC encrypt state (super class)
|
||||||
|
self.baseCipher.resetEncrypt() # reset base cipher encrypt state
|
||||||
|
|
||||||
|
def resetDecrypt(self):
|
||||||
|
BlockCipher.resetDecrypt(self) # reset CBC state (super class)
|
||||||
|
self.baseCipher.resetDecrypt() # reset base cipher decrypt state
|
||||||
|
|
||||||
|
def encrypt(self, plainText, iv=None, more=None):
|
||||||
|
""" CBC encryption - overloads baseCipher to allow optional explicit IV
|
||||||
|
when iv=None, iv is auto generated!
|
||||||
|
"""
|
||||||
|
if self.encryptBlockCount == 0:
|
||||||
|
self.iv = iv
|
||||||
|
else:
|
||||||
|
assert(iv==None), 'IV used only on first call to encrypt'
|
||||||
|
|
||||||
|
return BlockCipher.encrypt(self,plainText, more=more)
|
||||||
|
|
||||||
|
def decrypt(self, cipherText, iv=None, more=None):
|
||||||
|
""" CBC decryption - overloads baseCipher to allow optional explicit IV
|
||||||
|
when iv=None, iv is auto generated!
|
||||||
|
"""
|
||||||
|
if self.decryptBlockCount == 0:
|
||||||
|
self.iv = iv
|
||||||
|
else:
|
||||||
|
assert(iv==None), 'IV used only on first call to decrypt'
|
||||||
|
|
||||||
|
return BlockCipher.decrypt(self, cipherText, more=more)
|
||||||
|
|
||||||
|
def encryptBlock(self, plainTextBlock):
|
||||||
|
""" CBC block encryption, IV is set with 'encrypt' """
|
||||||
|
auto_IV = ''
|
||||||
|
if self.encryptBlockCount == 0:
|
||||||
|
if self.iv == None:
|
||||||
|
# generate IV and use
|
||||||
|
self.iv = ''.join([chr(self.r.randrange(256)) for i in range(self.blockSize)])
|
||||||
|
self.prior_encr_CT_block = self.iv
|
||||||
|
auto_IV = self.prior_encr_CT_block # prepend IV if it's automatic
|
||||||
|
else: # application provided IV
|
||||||
|
assert(len(self.iv) == self.blockSize ),'IV must be same length as block'
|
||||||
|
self.prior_encr_CT_block = self.iv
|
||||||
|
""" encrypt the prior CT XORed with the PT """
|
||||||
|
ct = self.baseCipher.encryptBlock( xor(self.prior_encr_CT_block, plainTextBlock) )
|
||||||
|
self.prior_encr_CT_block = ct
|
||||||
|
return auto_IV+ct
|
||||||
|
|
||||||
|
def decryptBlock(self, encryptedBlock):
|
||||||
|
""" Decrypt a single block """
|
||||||
|
|
||||||
|
if self.decryptBlockCount == 0: # first call, process IV
|
||||||
|
if self.iv == None: # auto decrypt IV?
|
||||||
|
self.prior_CT_block = encryptedBlock
|
||||||
|
return ''
|
||||||
|
else:
|
||||||
|
assert(len(self.iv)==self.blockSize),"Bad IV size on CBC decryption"
|
||||||
|
self.prior_CT_block = self.iv
|
||||||
|
|
||||||
|
dct = self.baseCipher.decryptBlock(encryptedBlock)
|
||||||
|
""" XOR the prior decrypted CT with the prior CT """
|
||||||
|
dct_XOR_priorCT = xor( self.prior_CT_block, dct )
|
||||||
|
|
||||||
|
self.prior_CT_block = encryptedBlock
|
||||||
|
|
||||||
|
return dct_XOR_priorCT
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
AES_CBC Encryption Algorithm
|
||||||
|
"""
|
||||||
|
|
||||||
|
class AES_CBC(CBC):
|
||||||
|
""" AES encryption in CBC feedback mode """
|
||||||
|
def __init__(self, key=None, padding=padWithPadLen(), keySize=16):
|
||||||
|
CBC.__init__( self, AES(key, noPadding(), keySize), padding)
|
||||||
|
self.name = 'AES_CBC'
|
||||||
BIN
Calibre_Plugins/K4MobiDeDRM_plugin/alfcrypto.dll
Normal file
BIN
Calibre_Plugins/K4MobiDeDRM_plugin/alfcrypto.dll
Normal file
Binary file not shown.
290
Calibre_Plugins/K4MobiDeDRM_plugin/alfcrypto.py
Normal file
290
Calibre_Plugins/K4MobiDeDRM_plugin/alfcrypto.py
Normal file
@@ -0,0 +1,290 @@
|
|||||||
|
#! /usr/bin/env python
|
||||||
|
|
||||||
|
import sys, os
|
||||||
|
import hmac
|
||||||
|
from struct import pack
|
||||||
|
import hashlib
|
||||||
|
|
||||||
|
|
||||||
|
# interface to needed routines libalfcrypto
|
||||||
|
def _load_libalfcrypto():
|
||||||
|
import ctypes
|
||||||
|
from ctypes import CDLL, byref, POINTER, c_void_p, c_char_p, c_int, c_long, \
|
||||||
|
Structure, c_ulong, create_string_buffer, addressof, string_at, cast, sizeof
|
||||||
|
|
||||||
|
pointer_size = ctypes.sizeof(ctypes.c_voidp)
|
||||||
|
name_of_lib = None
|
||||||
|
if sys.platform.startswith('darwin'):
|
||||||
|
name_of_lib = 'libalfcrypto.dylib'
|
||||||
|
elif sys.platform.startswith('win'):
|
||||||
|
if pointer_size == 4:
|
||||||
|
name_of_lib = 'alfcrypto.dll'
|
||||||
|
else:
|
||||||
|
name_of_lib = 'alfcrypto64.dll'
|
||||||
|
else:
|
||||||
|
if pointer_size == 4:
|
||||||
|
name_of_lib = 'libalfcrypto32.so'
|
||||||
|
else:
|
||||||
|
name_of_lib = 'libalfcrypto64.so'
|
||||||
|
|
||||||
|
libalfcrypto = sys.path[0] + os.sep + name_of_lib
|
||||||
|
|
||||||
|
if not os.path.isfile(libalfcrypto):
|
||||||
|
raise Exception('libalfcrypto not found')
|
||||||
|
|
||||||
|
libalfcrypto = CDLL(libalfcrypto)
|
||||||
|
|
||||||
|
c_char_pp = POINTER(c_char_p)
|
||||||
|
c_int_p = POINTER(c_int)
|
||||||
|
|
||||||
|
|
||||||
|
def F(restype, name, argtypes):
|
||||||
|
func = getattr(libalfcrypto, name)
|
||||||
|
func.restype = restype
|
||||||
|
func.argtypes = argtypes
|
||||||
|
return func
|
||||||
|
|
||||||
|
# aes cbc decryption
|
||||||
|
#
|
||||||
|
# struct aes_key_st {
|
||||||
|
# unsigned long rd_key[4 *(AES_MAXNR + 1)];
|
||||||
|
# int rounds;
|
||||||
|
# };
|
||||||
|
#
|
||||||
|
# typedef struct aes_key_st AES_KEY;
|
||||||
|
#
|
||||||
|
# int AES_set_decrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key);
|
||||||
|
#
|
||||||
|
#
|
||||||
|
# void AES_cbc_encrypt(const unsigned char *in, unsigned char *out,
|
||||||
|
# const unsigned long length, const AES_KEY *key,
|
||||||
|
# unsigned char *ivec, const int enc);
|
||||||
|
|
||||||
|
AES_MAXNR = 14
|
||||||
|
|
||||||
|
class AES_KEY(Structure):
|
||||||
|
_fields_ = [('rd_key', c_long * (4 * (AES_MAXNR + 1))), ('rounds', c_int)]
|
||||||
|
|
||||||
|
AES_KEY_p = POINTER(AES_KEY)
|
||||||
|
AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',[c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p, c_int])
|
||||||
|
AES_set_decrypt_key = F(c_int, 'AES_set_decrypt_key',[c_char_p, c_int, AES_KEY_p])
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Pukall 1 Cipher
|
||||||
|
# unsigned char *PC1(const unsigned char *key, unsigned int klen, const unsigned char *src,
|
||||||
|
# unsigned char *dest, unsigned int len, int decryption);
|
||||||
|
|
||||||
|
PC1 = F(c_char_p, 'PC1', [c_char_p, c_ulong, c_char_p, c_char_p, c_ulong, c_ulong])
|
||||||
|
|
||||||
|
# Topaz Encryption
|
||||||
|
# typedef struct _TpzCtx {
|
||||||
|
# unsigned int v[2];
|
||||||
|
# } TpzCtx;
|
||||||
|
#
|
||||||
|
# void topazCryptoInit(TpzCtx *ctx, const unsigned char *key, int klen);
|
||||||
|
# void topazCryptoDecrypt(const TpzCtx *ctx, const unsigned char *in, unsigned char *out, int len);
|
||||||
|
|
||||||
|
class TPZ_CTX(Structure):
|
||||||
|
_fields_ = [('v', c_long * 2)]
|
||||||
|
|
||||||
|
TPZ_CTX_p = POINTER(TPZ_CTX)
|
||||||
|
topazCryptoInit = F(None, 'topazCryptoInit', [TPZ_CTX_p, c_char_p, c_ulong])
|
||||||
|
topazCryptoDecrypt = F(None, 'topazCryptoDecrypt', [TPZ_CTX_p, c_char_p, c_char_p, c_ulong])
|
||||||
|
|
||||||
|
|
||||||
|
class AES_CBC(object):
|
||||||
|
def __init__(self):
|
||||||
|
self._blocksize = 0
|
||||||
|
self._keyctx = None
|
||||||
|
self._iv = 0
|
||||||
|
|
||||||
|
def set_decrypt_key(self, userkey, iv):
|
||||||
|
self._blocksize = len(userkey)
|
||||||
|
if (self._blocksize != 16) and (self._blocksize != 24) and (self._blocksize != 32) :
|
||||||
|
raise Exception('AES CBC improper key used')
|
||||||
|
return
|
||||||
|
keyctx = self._keyctx = AES_KEY()
|
||||||
|
self._iv = iv
|
||||||
|
rv = AES_set_decrypt_key(userkey, len(userkey) * 8, keyctx)
|
||||||
|
if rv < 0:
|
||||||
|
raise Exception('Failed to initialize AES CBC key')
|
||||||
|
|
||||||
|
def decrypt(self, data):
|
||||||
|
out = create_string_buffer(len(data))
|
||||||
|
mutable_iv = create_string_buffer(self._iv, len(self._iv))
|
||||||
|
rv = AES_cbc_encrypt(data, out, len(data), self._keyctx, mutable_iv, 0)
|
||||||
|
if rv == 0:
|
||||||
|
raise Exception('AES CBC decryption failed')
|
||||||
|
return out.raw
|
||||||
|
|
||||||
|
class Pukall_Cipher(object):
|
||||||
|
def __init__(self):
|
||||||
|
self.key = None
|
||||||
|
|
||||||
|
def PC1(self, key, src, decryption=True):
|
||||||
|
self.key = key
|
||||||
|
out = create_string_buffer(len(src))
|
||||||
|
de = 0
|
||||||
|
if decryption:
|
||||||
|
de = 1
|
||||||
|
rv = PC1(key, len(key), src, out, len(src), de)
|
||||||
|
return out.raw
|
||||||
|
|
||||||
|
class Topaz_Cipher(object):
|
||||||
|
def __init__(self):
|
||||||
|
self._ctx = None
|
||||||
|
|
||||||
|
def ctx_init(self, key):
|
||||||
|
tpz_ctx = self._ctx = TPZ_CTX()
|
||||||
|
topazCryptoInit(tpz_ctx, key, len(key))
|
||||||
|
return tpz_ctx
|
||||||
|
|
||||||
|
def decrypt(self, data, ctx=None):
|
||||||
|
if ctx == None:
|
||||||
|
ctx = self._ctx
|
||||||
|
out = create_string_buffer(len(data))
|
||||||
|
topazCryptoDecrypt(ctx, data, out, len(data))
|
||||||
|
return out.raw
|
||||||
|
|
||||||
|
print "Using Library AlfCrypto DLL/DYLIB/SO"
|
||||||
|
return (AES_CBC, Pukall_Cipher, Topaz_Cipher)
|
||||||
|
|
||||||
|
|
||||||
|
def _load_python_alfcrypto():
|
||||||
|
|
||||||
|
import aescbc
|
||||||
|
|
||||||
|
class Pukall_Cipher(object):
|
||||||
|
def __init__(self):
|
||||||
|
self.key = None
|
||||||
|
|
||||||
|
def PC1(self, key, src, decryption=True):
|
||||||
|
sum1 = 0;
|
||||||
|
sum2 = 0;
|
||||||
|
keyXorVal = 0;
|
||||||
|
if len(key)!=16:
|
||||||
|
print "Bad key length!"
|
||||||
|
return None
|
||||||
|
wkey = []
|
||||||
|
for i in xrange(8):
|
||||||
|
wkey.append(ord(key[i*2])<<8 | ord(key[i*2+1]))
|
||||||
|
dst = ""
|
||||||
|
for i in xrange(len(src)):
|
||||||
|
temp1 = 0;
|
||||||
|
byteXorVal = 0;
|
||||||
|
for j in xrange(8):
|
||||||
|
temp1 ^= wkey[j]
|
||||||
|
sum2 = (sum2+j)*20021 + sum1
|
||||||
|
sum1 = (temp1*346)&0xFFFF
|
||||||
|
sum2 = (sum2+sum1)&0xFFFF
|
||||||
|
temp1 = (temp1*20021+1)&0xFFFF
|
||||||
|
byteXorVal ^= temp1 ^ sum2
|
||||||
|
curByte = ord(src[i])
|
||||||
|
if not decryption:
|
||||||
|
keyXorVal = curByte * 257;
|
||||||
|
curByte = ((curByte ^ (byteXorVal >> 8)) ^ byteXorVal) & 0xFF
|
||||||
|
if decryption:
|
||||||
|
keyXorVal = curByte * 257;
|
||||||
|
for j in xrange(8):
|
||||||
|
wkey[j] ^= keyXorVal;
|
||||||
|
dst+=chr(curByte)
|
||||||
|
return dst
|
||||||
|
|
||||||
|
class Topaz_Cipher(object):
|
||||||
|
def __init__(self):
|
||||||
|
self._ctx = None
|
||||||
|
|
||||||
|
def ctx_init(self, key):
|
||||||
|
ctx1 = 0x0CAFFE19E
|
||||||
|
for keyChar in key:
|
||||||
|
keyByte = ord(keyChar)
|
||||||
|
ctx2 = ctx1
|
||||||
|
ctx1 = ((((ctx1 >>2) * (ctx1 >>7))&0xFFFFFFFF) ^ (keyByte * keyByte * 0x0F902007)& 0xFFFFFFFF )
|
||||||
|
self._ctx = [ctx1, ctx2]
|
||||||
|
return [ctx1,ctx2]
|
||||||
|
|
||||||
|
def decrypt(self, data, ctx=None):
|
||||||
|
if ctx == None:
|
||||||
|
ctx = self._ctx
|
||||||
|
ctx1 = ctx[0]
|
||||||
|
ctx2 = ctx[1]
|
||||||
|
plainText = ""
|
||||||
|
for dataChar in data:
|
||||||
|
dataByte = ord(dataChar)
|
||||||
|
m = (dataByte ^ ((ctx1 >> 3) &0xFF) ^ ((ctx2<<3) & 0xFF)) &0xFF
|
||||||
|
ctx2 = ctx1
|
||||||
|
ctx1 = (((ctx1 >> 2) * (ctx1 >> 7)) &0xFFFFFFFF) ^((m * m * 0x0F902007) &0xFFFFFFFF)
|
||||||
|
plainText += chr(m)
|
||||||
|
return plainText
|
||||||
|
|
||||||
|
class AES_CBC(object):
|
||||||
|
def __init__(self):
|
||||||
|
self._key = None
|
||||||
|
self._iv = None
|
||||||
|
self.aes = None
|
||||||
|
|
||||||
|
def set_decrypt_key(self, userkey, iv):
|
||||||
|
self._key = userkey
|
||||||
|
self._iv = iv
|
||||||
|
self.aes = aescbc.AES_CBC(userkey, aescbc.noPadding(), len(userkey))
|
||||||
|
|
||||||
|
def decrypt(self, data):
|
||||||
|
iv = self._iv
|
||||||
|
cleartext = self.aes.decrypt(iv + data)
|
||||||
|
return cleartext
|
||||||
|
|
||||||
|
return (AES_CBC, Pukall_Cipher, Topaz_Cipher)
|
||||||
|
|
||||||
|
|
||||||
|
def _load_crypto():
|
||||||
|
AES_CBC = Pukall_Cipher = Topaz_Cipher = None
|
||||||
|
cryptolist = (_load_libalfcrypto, _load_python_alfcrypto)
|
||||||
|
for loader in cryptolist:
|
||||||
|
try:
|
||||||
|
AES_CBC, Pukall_Cipher, Topaz_Cipher = loader()
|
||||||
|
break
|
||||||
|
except (ImportError, Exception):
|
||||||
|
pass
|
||||||
|
return AES_CBC, Pukall_Cipher, Topaz_Cipher
|
||||||
|
|
||||||
|
AES_CBC, Pukall_Cipher, Topaz_Cipher = _load_crypto()
|
||||||
|
|
||||||
|
|
||||||
|
class KeyIVGen(object):
|
||||||
|
# this only exists in openssl so we will use pure python implementation instead
|
||||||
|
# PKCS5_PBKDF2_HMAC_SHA1 = F(c_int, 'PKCS5_PBKDF2_HMAC_SHA1',
|
||||||
|
# [c_char_p, c_ulong, c_char_p, c_ulong, c_ulong, c_ulong, c_char_p])
|
||||||
|
def pbkdf2(self, passwd, salt, iter, keylen):
|
||||||
|
|
||||||
|
def xorstr( a, b ):
|
||||||
|
if len(a) != len(b):
|
||||||
|
raise Exception("xorstr(): lengths differ")
|
||||||
|
return ''.join((chr(ord(x)^ord(y)) for x, y in zip(a, b)))
|
||||||
|
|
||||||
|
def prf( h, data ):
|
||||||
|
hm = h.copy()
|
||||||
|
hm.update( data )
|
||||||
|
return hm.digest()
|
||||||
|
|
||||||
|
def pbkdf2_F( h, salt, itercount, blocknum ):
|
||||||
|
U = prf( h, salt + pack('>i',blocknum ) )
|
||||||
|
T = U
|
||||||
|
for i in range(2, itercount+1):
|
||||||
|
U = prf( h, U )
|
||||||
|
T = xorstr( T, U )
|
||||||
|
return T
|
||||||
|
|
||||||
|
sha = hashlib.sha1
|
||||||
|
digest_size = sha().digest_size
|
||||||
|
# l - number of output blocks to produce
|
||||||
|
l = keylen / digest_size
|
||||||
|
if keylen % digest_size != 0:
|
||||||
|
l += 1
|
||||||
|
h = hmac.new( passwd, None, sha )
|
||||||
|
T = ""
|
||||||
|
for i in range(1, l+1):
|
||||||
|
T += pbkdf2_F( h, salt, iter, i )
|
||||||
|
return T[0: keylen]
|
||||||
|
|
||||||
|
|
||||||
BIN
Calibre_Plugins/K4MobiDeDRM_plugin/alfcrypto64.dll
Normal file
BIN
Calibre_Plugins/K4MobiDeDRM_plugin/alfcrypto64.dll
Normal file
Binary file not shown.
BIN
Calibre_Plugins/K4MobiDeDRM_plugin/alfcrypto_src.zip
Normal file
BIN
Calibre_Plugins/K4MobiDeDRM_plugin/alfcrypto_src.zip
Normal file
Binary file not shown.
899
Calibre_Plugins/K4MobiDeDRM_plugin/cmbtc_v2.2.py
Normal file
899
Calibre_Plugins/K4MobiDeDRM_plugin/cmbtc_v2.2.py
Normal file
@@ -0,0 +1,899 @@
|
|||||||
|
#! /usr/bin/python
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
Comprehensive Mazama Book DRM with Topaz Cryptography V2.2
|
||||||
|
|
||||||
|
-----BEGIN PUBLIC KEY-----
|
||||||
|
MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDdBHJ4CNc6DNFCw4MRCw4SWAK6
|
||||||
|
M8hYfnNEI0yQmn5Ti+W8biT7EatpauE/5jgQMPBmdNrDr1hbHyHBSP7xeC2qlRWC
|
||||||
|
B62UCxeu/fpfnvNHDN/wPWWH4jynZ2M6cdcnE5LQ+FfeKqZn7gnG2No1U9h7oOHx
|
||||||
|
y2/pHuYme7U1TsgSjwIDAQAB
|
||||||
|
-----END PUBLIC KEY-----
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import with_statement
|
||||||
|
|
||||||
|
import csv
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
import getopt
|
||||||
|
import zlib
|
||||||
|
from struct import pack
|
||||||
|
from struct import unpack
|
||||||
|
from ctypes import windll, c_char_p, c_wchar_p, c_uint, POINTER, byref, \
|
||||||
|
create_unicode_buffer, create_string_buffer, CFUNCTYPE, addressof, \
|
||||||
|
string_at, Structure, c_void_p, cast
|
||||||
|
import _winreg as winreg
|
||||||
|
import Tkinter
|
||||||
|
import Tkconstants
|
||||||
|
import tkMessageBox
|
||||||
|
import traceback
|
||||||
|
import hashlib
|
||||||
|
|
||||||
|
MAX_PATH = 255
|
||||||
|
|
||||||
|
kernel32 = windll.kernel32
|
||||||
|
advapi32 = windll.advapi32
|
||||||
|
crypt32 = windll.crypt32
|
||||||
|
|
||||||
|
global kindleDatabase
|
||||||
|
global bookFile
|
||||||
|
global bookPayloadOffset
|
||||||
|
global bookHeaderRecords
|
||||||
|
global bookMetadata
|
||||||
|
global bookKey
|
||||||
|
global command
|
||||||
|
|
||||||
|
#
|
||||||
|
# Various character maps used to decrypt books. Probably supposed to act as obfuscation
|
||||||
|
#
|
||||||
|
|
||||||
|
charMap1 = "n5Pr6St7Uv8Wx9YzAb0Cd1Ef2Gh3Jk4M"
|
||||||
|
charMap2 = "AaZzB0bYyCc1XxDdW2wEeVv3FfUuG4g-TtHh5SsIiR6rJjQq7KkPpL8lOoMm9Nn_"
|
||||||
|
charMap3 = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
|
||||||
|
charMap4 = "ABCDEFGHIJKLMNPQRSTUVWXYZ123456789"
|
||||||
|
|
||||||
|
#
|
||||||
|
# Exceptions for all the problems that might happen during the script
|
||||||
|
#
|
||||||
|
|
||||||
|
class CMBDTCError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
class CMBDTCFatal(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
#
|
||||||
|
# Stolen stuff
|
||||||
|
#
|
||||||
|
|
||||||
|
class DataBlob(Structure):
|
||||||
|
_fields_ = [('cbData', c_uint),
|
||||||
|
('pbData', c_void_p)]
|
||||||
|
DataBlob_p = POINTER(DataBlob)
|
||||||
|
|
||||||
|
def GetSystemDirectory():
|
||||||
|
GetSystemDirectoryW = kernel32.GetSystemDirectoryW
|
||||||
|
GetSystemDirectoryW.argtypes = [c_wchar_p, c_uint]
|
||||||
|
GetSystemDirectoryW.restype = c_uint
|
||||||
|
def GetSystemDirectory():
|
||||||
|
buffer = create_unicode_buffer(MAX_PATH + 1)
|
||||||
|
GetSystemDirectoryW(buffer, len(buffer))
|
||||||
|
return buffer.value
|
||||||
|
return GetSystemDirectory
|
||||||
|
GetSystemDirectory = GetSystemDirectory()
|
||||||
|
|
||||||
|
|
||||||
|
def GetVolumeSerialNumber():
|
||||||
|
GetVolumeInformationW = kernel32.GetVolumeInformationW
|
||||||
|
GetVolumeInformationW.argtypes = [c_wchar_p, c_wchar_p, c_uint,
|
||||||
|
POINTER(c_uint), POINTER(c_uint),
|
||||||
|
POINTER(c_uint), c_wchar_p, c_uint]
|
||||||
|
GetVolumeInformationW.restype = c_uint
|
||||||
|
def GetVolumeSerialNumber(path):
|
||||||
|
vsn = c_uint(0)
|
||||||
|
GetVolumeInformationW(path, None, 0, byref(vsn), None, None, None, 0)
|
||||||
|
return vsn.value
|
||||||
|
return GetVolumeSerialNumber
|
||||||
|
GetVolumeSerialNumber = GetVolumeSerialNumber()
|
||||||
|
|
||||||
|
|
||||||
|
def GetUserName():
|
||||||
|
GetUserNameW = advapi32.GetUserNameW
|
||||||
|
GetUserNameW.argtypes = [c_wchar_p, POINTER(c_uint)]
|
||||||
|
GetUserNameW.restype = c_uint
|
||||||
|
def GetUserName():
|
||||||
|
buffer = create_unicode_buffer(32)
|
||||||
|
size = c_uint(len(buffer))
|
||||||
|
while not GetUserNameW(buffer, byref(size)):
|
||||||
|
buffer = create_unicode_buffer(len(buffer) * 2)
|
||||||
|
size.value = len(buffer)
|
||||||
|
return buffer.value.encode('utf-16-le')[::2]
|
||||||
|
return GetUserName
|
||||||
|
GetUserName = GetUserName()
|
||||||
|
|
||||||
|
|
||||||
|
def CryptUnprotectData():
|
||||||
|
_CryptUnprotectData = crypt32.CryptUnprotectData
|
||||||
|
_CryptUnprotectData.argtypes = [DataBlob_p, c_wchar_p, DataBlob_p,
|
||||||
|
c_void_p, c_void_p, c_uint, DataBlob_p]
|
||||||
|
_CryptUnprotectData.restype = c_uint
|
||||||
|
def CryptUnprotectData(indata, entropy):
|
||||||
|
indatab = create_string_buffer(indata)
|
||||||
|
indata = DataBlob(len(indata), cast(indatab, c_void_p))
|
||||||
|
entropyb = create_string_buffer(entropy)
|
||||||
|
entropy = DataBlob(len(entropy), cast(entropyb, c_void_p))
|
||||||
|
outdata = DataBlob()
|
||||||
|
if not _CryptUnprotectData(byref(indata), None, byref(entropy),
|
||||||
|
None, None, 0, byref(outdata)):
|
||||||
|
raise CMBDTCFatal("Failed to Unprotect Data")
|
||||||
|
return string_at(outdata.pbData, outdata.cbData)
|
||||||
|
return CryptUnprotectData
|
||||||
|
CryptUnprotectData = CryptUnprotectData()
|
||||||
|
|
||||||
|
#
|
||||||
|
# Returns the MD5 digest of "message"
|
||||||
|
#
|
||||||
|
|
||||||
|
def MD5(message):
|
||||||
|
ctx = hashlib.md5()
|
||||||
|
ctx.update(message)
|
||||||
|
return ctx.digest()
|
||||||
|
|
||||||
|
#
|
||||||
|
# Returns the MD5 digest of "message"
|
||||||
|
#
|
||||||
|
|
||||||
|
def SHA1(message):
|
||||||
|
ctx = hashlib.sha1()
|
||||||
|
ctx.update(message)
|
||||||
|
return ctx.digest()
|
||||||
|
|
||||||
|
#
|
||||||
|
# Open the book file at path
|
||||||
|
#
|
||||||
|
|
||||||
|
def openBook(path):
|
||||||
|
try:
|
||||||
|
return open(path,'rb')
|
||||||
|
except:
|
||||||
|
raise CMBDTCFatal("Could not open book file: " + path)
|
||||||
|
#
|
||||||
|
# Encode the bytes in data with the characters in map
|
||||||
|
#
|
||||||
|
|
||||||
|
def encode(data, map):
|
||||||
|
result = ""
|
||||||
|
for char in data:
|
||||||
|
value = ord(char)
|
||||||
|
Q = (value ^ 0x80) // len(map)
|
||||||
|
R = value % len(map)
|
||||||
|
result += map[Q]
|
||||||
|
result += map[R]
|
||||||
|
return result
|
||||||
|
|
||||||
|
#
|
||||||
|
# Hash the bytes in data and then encode the digest with the characters in map
|
||||||
|
#
|
||||||
|
|
||||||
|
def encodeHash(data,map):
|
||||||
|
return encode(MD5(data),map)
|
||||||
|
|
||||||
|
#
|
||||||
|
# Decode the string in data with the characters in map. Returns the decoded bytes
|
||||||
|
#
|
||||||
|
|
||||||
|
def decode(data,map):
|
||||||
|
result = ""
|
||||||
|
for i in range (0,len(data),2):
|
||||||
|
high = map.find(data[i])
|
||||||
|
low = map.find(data[i+1])
|
||||||
|
value = (((high * 0x40) ^ 0x80) & 0xFF) + low
|
||||||
|
result += pack("B",value)
|
||||||
|
return result
|
||||||
|
|
||||||
|
#
|
||||||
|
# Locate and open the Kindle.info file (Hopefully in the way it is done in the Kindle application)
|
||||||
|
#
|
||||||
|
|
||||||
|
def openKindleInfo():
|
||||||
|
regkey = winreg.OpenKey(winreg.HKEY_CURRENT_USER, "Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Shell Folders\\")
|
||||||
|
path = winreg.QueryValueEx(regkey, 'Local AppData')[0]
|
||||||
|
return open(path+'\\Amazon\\Kindle For PC\\{AMAwzsaPaaZAzmZzZQzgZCAkZ3AjA_AY}\\kindle.info','r')
|
||||||
|
|
||||||
|
#
|
||||||
|
# Parse the Kindle.info file and return the records as a list of key-values
|
||||||
|
#
|
||||||
|
|
||||||
|
def parseKindleInfo():
|
||||||
|
DB = {}
|
||||||
|
infoReader = openKindleInfo()
|
||||||
|
infoReader.read(1)
|
||||||
|
data = infoReader.read()
|
||||||
|
items = data.split('{')
|
||||||
|
|
||||||
|
for item in items:
|
||||||
|
splito = item.split(':')
|
||||||
|
DB[splito[0]] =splito[1]
|
||||||
|
return DB
|
||||||
|
|
||||||
|
#
|
||||||
|
# Find if the original string for a hashed/encoded string is known. If so return the original string othwise return an empty string. (Totally not optimal)
|
||||||
|
#
|
||||||
|
|
||||||
|
def findNameForHash(hash):
|
||||||
|
names = ["kindle.account.tokens","kindle.cookie.item","eulaVersionAccepted","login_date","kindle.token.item","login","kindle.key.item","kindle.name.info","kindle.device.info", "MazamaRandomNumber"]
|
||||||
|
result = ""
|
||||||
|
for name in names:
|
||||||
|
if hash == encodeHash(name, charMap2):
|
||||||
|
result = name
|
||||||
|
break
|
||||||
|
return name
|
||||||
|
|
||||||
|
#
|
||||||
|
# Print all the records from the kindle.info file (option -i)
|
||||||
|
#
|
||||||
|
|
||||||
|
def printKindleInfo():
|
||||||
|
for record in kindleDatabase:
|
||||||
|
name = findNameForHash(record)
|
||||||
|
if name != "" :
|
||||||
|
print (name)
|
||||||
|
print ("--------------------------\n")
|
||||||
|
else :
|
||||||
|
print ("Unknown Record")
|
||||||
|
print getKindleInfoValueForHash(record)
|
||||||
|
print "\n"
|
||||||
|
#
|
||||||
|
# Get a record from the Kindle.info file for the key "hashedKey" (already hashed and encoded). Return the decoded and decrypted record
|
||||||
|
#
|
||||||
|
|
||||||
|
def getKindleInfoValueForHash(hashedKey):
|
||||||
|
global kindleDatabase
|
||||||
|
encryptedValue = decode(kindleDatabase[hashedKey],charMap2)
|
||||||
|
return CryptUnprotectData(encryptedValue,"")
|
||||||
|
|
||||||
|
#
|
||||||
|
# Get a record from the Kindle.info file for the string in "key" (plaintext). Return the decoded and decrypted record
|
||||||
|
#
|
||||||
|
|
||||||
|
def getKindleInfoValueForKey(key):
|
||||||
|
return getKindleInfoValueForHash(encodeHash(key,charMap2))
|
||||||
|
|
||||||
|
#
|
||||||
|
# Get a 7 bit encoded number from the book file
|
||||||
|
#
|
||||||
|
|
||||||
|
def bookReadEncodedNumber():
|
||||||
|
flag = False
|
||||||
|
data = ord(bookFile.read(1))
|
||||||
|
|
||||||
|
if data == 0xFF:
|
||||||
|
flag = True
|
||||||
|
data = ord(bookFile.read(1))
|
||||||
|
|
||||||
|
if data >= 0x80:
|
||||||
|
datax = (data & 0x7F)
|
||||||
|
while data >= 0x80 :
|
||||||
|
data = ord(bookFile.read(1))
|
||||||
|
datax = (datax <<7) + (data & 0x7F)
|
||||||
|
data = datax
|
||||||
|
|
||||||
|
if flag:
|
||||||
|
data = -data
|
||||||
|
return data
|
||||||
|
|
||||||
|
#
|
||||||
|
# Encode a number in 7 bit format
|
||||||
|
#
|
||||||
|
|
||||||
|
def encodeNumber(number):
|
||||||
|
result = ""
|
||||||
|
negative = False
|
||||||
|
flag = 0
|
||||||
|
|
||||||
|
if number < 0 :
|
||||||
|
number = -number + 1
|
||||||
|
negative = True
|
||||||
|
|
||||||
|
while True:
|
||||||
|
byte = number & 0x7F
|
||||||
|
number = number >> 7
|
||||||
|
byte += flag
|
||||||
|
result += chr(byte)
|
||||||
|
flag = 0x80
|
||||||
|
if number == 0 :
|
||||||
|
if (byte == 0xFF and negative == False) :
|
||||||
|
result += chr(0x80)
|
||||||
|
break
|
||||||
|
|
||||||
|
if negative:
|
||||||
|
result += chr(0xFF)
|
||||||
|
|
||||||
|
return result[::-1]
|
||||||
|
|
||||||
|
#
|
||||||
|
# Get a length prefixed string from the file
|
||||||
|
#
|
||||||
|
|
||||||
|
def bookReadString():
|
||||||
|
stringLength = bookReadEncodedNumber()
|
||||||
|
return unpack(str(stringLength)+"s",bookFile.read(stringLength))[0]
|
||||||
|
|
||||||
|
#
|
||||||
|
# Returns a length prefixed string
|
||||||
|
#
|
||||||
|
|
||||||
|
def lengthPrefixString(data):
|
||||||
|
return encodeNumber(len(data))+data
|
||||||
|
|
||||||
|
|
||||||
|
#
|
||||||
|
# Read and return the data of one header record at the current book file position [[offset,compressedLength,decompressedLength],...]
|
||||||
|
#
|
||||||
|
|
||||||
|
def bookReadHeaderRecordData():
|
||||||
|
nbValues = bookReadEncodedNumber()
|
||||||
|
values = []
|
||||||
|
for i in range (0,nbValues):
|
||||||
|
values.append([bookReadEncodedNumber(),bookReadEncodedNumber(),bookReadEncodedNumber()])
|
||||||
|
return values
|
||||||
|
|
||||||
|
#
|
||||||
|
# Read and parse one header record at the current book file position and return the associated data [[offset,compressedLength,decompressedLength],...]
|
||||||
|
#
|
||||||
|
|
||||||
|
def parseTopazHeaderRecord():
|
||||||
|
if ord(bookFile.read(1)) != 0x63:
|
||||||
|
raise CMBDTCFatal("Parse Error : Invalid Header")
|
||||||
|
|
||||||
|
tag = bookReadString()
|
||||||
|
record = bookReadHeaderRecordData()
|
||||||
|
return [tag,record]
|
||||||
|
|
||||||
|
#
|
||||||
|
# Parse the header of a Topaz file, get all the header records and the offset for the payload
|
||||||
|
#
|
||||||
|
|
||||||
|
def parseTopazHeader():
|
||||||
|
global bookHeaderRecords
|
||||||
|
global bookPayloadOffset
|
||||||
|
magic = unpack("4s",bookFile.read(4))[0]
|
||||||
|
|
||||||
|
if magic != 'TPZ0':
|
||||||
|
raise CMBDTCFatal("Parse Error : Invalid Header, not a Topaz file")
|
||||||
|
|
||||||
|
nbRecords = bookReadEncodedNumber()
|
||||||
|
bookHeaderRecords = {}
|
||||||
|
|
||||||
|
for i in range (0,nbRecords):
|
||||||
|
result = parseTopazHeaderRecord()
|
||||||
|
bookHeaderRecords[result[0]] = result[1]
|
||||||
|
|
||||||
|
if ord(bookFile.read(1)) != 0x64 :
|
||||||
|
raise CMBDTCFatal("Parse Error : Invalid Header")
|
||||||
|
|
||||||
|
bookPayloadOffset = bookFile.tell()
|
||||||
|
|
||||||
|
#
|
||||||
|
# Get a record in the book payload, given its name and index. If necessary the record is decrypted. The record is not decompressed
|
||||||
|
#
|
||||||
|
|
||||||
|
def getBookPayloadRecord(name, index):
|
||||||
|
encrypted = False
|
||||||
|
|
||||||
|
try:
|
||||||
|
recordOffset = bookHeaderRecords[name][index][0]
|
||||||
|
except:
|
||||||
|
raise CMBDTCFatal("Parse Error : Invalid Record, record not found")
|
||||||
|
|
||||||
|
bookFile.seek(bookPayloadOffset + recordOffset)
|
||||||
|
|
||||||
|
tag = bookReadString()
|
||||||
|
if tag != name :
|
||||||
|
raise CMBDTCFatal("Parse Error : Invalid Record, record name doesn't match")
|
||||||
|
|
||||||
|
recordIndex = bookReadEncodedNumber()
|
||||||
|
|
||||||
|
if recordIndex < 0 :
|
||||||
|
encrypted = True
|
||||||
|
recordIndex = -recordIndex -1
|
||||||
|
|
||||||
|
if recordIndex != index :
|
||||||
|
raise CMBDTCFatal("Parse Error : Invalid Record, index doesn't match")
|
||||||
|
|
||||||
|
if bookHeaderRecords[name][index][2] != 0 :
|
||||||
|
record = bookFile.read(bookHeaderRecords[name][index][2])
|
||||||
|
else:
|
||||||
|
record = bookFile.read(bookHeaderRecords[name][index][1])
|
||||||
|
|
||||||
|
if encrypted:
|
||||||
|
ctx = topazCryptoInit(bookKey)
|
||||||
|
record = topazCryptoDecrypt(record,ctx)
|
||||||
|
|
||||||
|
return record
|
||||||
|
|
||||||
|
#
|
||||||
|
# Extract, decrypt and decompress a book record indicated by name and index and print it or save it in "filename"
|
||||||
|
#
|
||||||
|
|
||||||
|
def extractBookPayloadRecord(name, index, filename):
|
||||||
|
compressed = False
|
||||||
|
|
||||||
|
try:
|
||||||
|
compressed = bookHeaderRecords[name][index][2] != 0
|
||||||
|
record = getBookPayloadRecord(name,index)
|
||||||
|
except:
|
||||||
|
print("Could not find record")
|
||||||
|
|
||||||
|
if compressed:
|
||||||
|
try:
|
||||||
|
record = zlib.decompress(record)
|
||||||
|
except:
|
||||||
|
raise CMBDTCFatal("Could not decompress record")
|
||||||
|
|
||||||
|
if filename != "":
|
||||||
|
try:
|
||||||
|
file = open(filename,"wb")
|
||||||
|
file.write(record)
|
||||||
|
file.close()
|
||||||
|
except:
|
||||||
|
raise CMBDTCFatal("Could not write to destination file")
|
||||||
|
else:
|
||||||
|
print(record)
|
||||||
|
|
||||||
|
#
|
||||||
|
# return next record [key,value] from the book metadata from the current book position
|
||||||
|
#
|
||||||
|
|
||||||
|
def readMetadataRecord():
|
||||||
|
return [bookReadString(),bookReadString()]
|
||||||
|
|
||||||
|
#
|
||||||
|
# Parse the metadata record from the book payload and return a list of [key,values]
|
||||||
|
#
|
||||||
|
|
||||||
|
def parseMetadata():
|
||||||
|
global bookHeaderRecords
|
||||||
|
global bookPayloadAddress
|
||||||
|
global bookMetadata
|
||||||
|
bookMetadata = {}
|
||||||
|
bookFile.seek(bookPayloadOffset + bookHeaderRecords["metadata"][0][0])
|
||||||
|
tag = bookReadString()
|
||||||
|
if tag != "metadata" :
|
||||||
|
raise CMBDTCFatal("Parse Error : Record Names Don't Match")
|
||||||
|
|
||||||
|
flags = ord(bookFile.read(1))
|
||||||
|
nbRecords = ord(bookFile.read(1))
|
||||||
|
|
||||||
|
for i in range (0,nbRecords) :
|
||||||
|
record =readMetadataRecord()
|
||||||
|
bookMetadata[record[0]] = record[1]
|
||||||
|
|
||||||
|
#
|
||||||
|
# Returns two bit at offset from a bit field
|
||||||
|
#
|
||||||
|
|
||||||
|
def getTwoBitsFromBitField(bitField,offset):
|
||||||
|
byteNumber = offset // 4
|
||||||
|
bitPosition = 6 - 2*(offset % 4)
|
||||||
|
|
||||||
|
return ord(bitField[byteNumber]) >> bitPosition & 3
|
||||||
|
|
||||||
|
#
|
||||||
|
# Returns the six bits at offset from a bit field
|
||||||
|
#
|
||||||
|
|
||||||
|
def getSixBitsFromBitField(bitField,offset):
|
||||||
|
offset *= 3
|
||||||
|
value = (getTwoBitsFromBitField(bitField,offset) <<4) + (getTwoBitsFromBitField(bitField,offset+1) << 2) +getTwoBitsFromBitField(bitField,offset+2)
|
||||||
|
return value
|
||||||
|
|
||||||
|
#
|
||||||
|
# 8 bits to six bits encoding from hash to generate PID string
|
||||||
|
#
|
||||||
|
|
||||||
|
def encodePID(hash):
|
||||||
|
global charMap3
|
||||||
|
PID = ""
|
||||||
|
for position in range (0,8):
|
||||||
|
PID += charMap3[getSixBitsFromBitField(hash,position)]
|
||||||
|
return PID
|
||||||
|
|
||||||
|
#
|
||||||
|
# Context initialisation for the Topaz Crypto
|
||||||
|
#
|
||||||
|
|
||||||
|
def topazCryptoInit(key):
|
||||||
|
ctx1 = 0x0CAFFE19E
|
||||||
|
|
||||||
|
for keyChar in key:
|
||||||
|
keyByte = ord(keyChar)
|
||||||
|
ctx2 = ctx1
|
||||||
|
ctx1 = ((((ctx1 >>2) * (ctx1 >>7))&0xFFFFFFFF) ^ (keyByte * keyByte * 0x0F902007)& 0xFFFFFFFF )
|
||||||
|
return [ctx1,ctx2]
|
||||||
|
|
||||||
|
#
|
||||||
|
# decrypt data with the context prepared by topazCryptoInit()
|
||||||
|
#
|
||||||
|
|
||||||
|
def topazCryptoDecrypt(data, ctx):
|
||||||
|
ctx1 = ctx[0]
|
||||||
|
ctx2 = ctx[1]
|
||||||
|
|
||||||
|
plainText = ""
|
||||||
|
|
||||||
|
for dataChar in data:
|
||||||
|
dataByte = ord(dataChar)
|
||||||
|
m = (dataByte ^ ((ctx1 >> 3) &0xFF) ^ ((ctx2<<3) & 0xFF)) &0xFF
|
||||||
|
ctx2 = ctx1
|
||||||
|
ctx1 = (((ctx1 >> 2) * (ctx1 >> 7)) &0xFFFFFFFF) ^((m * m * 0x0F902007) &0xFFFFFFFF)
|
||||||
|
plainText += chr(m)
|
||||||
|
|
||||||
|
return plainText
|
||||||
|
|
||||||
|
#
|
||||||
|
# Decrypt a payload record with the PID
|
||||||
|
#
|
||||||
|
|
||||||
|
def decryptRecord(data,PID):
|
||||||
|
ctx = topazCryptoInit(PID)
|
||||||
|
return topazCryptoDecrypt(data, ctx)
|
||||||
|
|
||||||
|
#
|
||||||
|
# Try to decrypt a dkey record (contains the book PID)
|
||||||
|
#
|
||||||
|
|
||||||
|
def decryptDkeyRecord(data,PID):
|
||||||
|
record = decryptRecord(data,PID)
|
||||||
|
fields = unpack("3sB8sB8s3s",record)
|
||||||
|
|
||||||
|
if fields[0] != "PID" or fields[5] != "pid" :
|
||||||
|
raise CMBDTCError("Didn't find PID magic numbers in record")
|
||||||
|
elif fields[1] != 8 or fields[3] != 8 :
|
||||||
|
raise CMBDTCError("Record didn't contain correct length fields")
|
||||||
|
elif fields[2] != PID :
|
||||||
|
raise CMBDTCError("Record didn't contain PID")
|
||||||
|
|
||||||
|
return fields[4]
|
||||||
|
|
||||||
|
#
|
||||||
|
# Decrypt all the book's dkey records (contain the book PID)
|
||||||
|
#
|
||||||
|
|
||||||
|
def decryptDkeyRecords(data,PID):
|
||||||
|
nbKeyRecords = ord(data[0])
|
||||||
|
records = []
|
||||||
|
data = data[1:]
|
||||||
|
for i in range (0,nbKeyRecords):
|
||||||
|
length = ord(data[0])
|
||||||
|
try:
|
||||||
|
key = decryptDkeyRecord(data[1:length+1],PID)
|
||||||
|
records.append(key)
|
||||||
|
except CMBDTCError:
|
||||||
|
pass
|
||||||
|
data = data[1+length:]
|
||||||
|
|
||||||
|
return records
|
||||||
|
|
||||||
|
#
|
||||||
|
# Encryption table used to generate the device PID
|
||||||
|
#
|
||||||
|
|
||||||
|
def generatePidEncryptionTable() :
|
||||||
|
table = []
|
||||||
|
for counter1 in range (0,0x100):
|
||||||
|
value = counter1
|
||||||
|
for counter2 in range (0,8):
|
||||||
|
if (value & 1 == 0) :
|
||||||
|
value = value >> 1
|
||||||
|
else :
|
||||||
|
value = value >> 1
|
||||||
|
value = value ^ 0xEDB88320
|
||||||
|
table.append(value)
|
||||||
|
return table
|
||||||
|
|
||||||
|
#
|
||||||
|
# Seed value used to generate the device PID
|
||||||
|
#
|
||||||
|
|
||||||
|
def generatePidSeed(table,dsn) :
|
||||||
|
value = 0
|
||||||
|
for counter in range (0,4) :
|
||||||
|
index = (ord(dsn[counter]) ^ value) &0xFF
|
||||||
|
value = (value >> 8) ^ table[index]
|
||||||
|
return value
|
||||||
|
|
||||||
|
#
|
||||||
|
# Generate the device PID
|
||||||
|
#
|
||||||
|
|
||||||
|
def generateDevicePID(table,dsn,nbRoll):
|
||||||
|
seed = generatePidSeed(table,dsn)
|
||||||
|
pidAscii = ""
|
||||||
|
pid = [(seed >>24) &0xFF,(seed >> 16) &0xff,(seed >> 8) &0xFF,(seed) & 0xFF,(seed>>24) & 0xFF,(seed >> 16) &0xff,(seed >> 8) &0xFF,(seed) & 0xFF]
|
||||||
|
index = 0
|
||||||
|
|
||||||
|
for counter in range (0,nbRoll):
|
||||||
|
pid[index] = pid[index] ^ ord(dsn[counter])
|
||||||
|
index = (index+1) %8
|
||||||
|
|
||||||
|
for counter in range (0,8):
|
||||||
|
index = ((((pid[counter] >>5) & 3) ^ pid[counter]) & 0x1f) + (pid[counter] >> 7)
|
||||||
|
pidAscii += charMap4[index]
|
||||||
|
return pidAscii
|
||||||
|
|
||||||
|
#
|
||||||
|
# Create decrypted book payload
|
||||||
|
#
|
||||||
|
|
||||||
|
def createDecryptedPayload(payload):
|
||||||
|
|
||||||
|
# store data to be able to create the header later
|
||||||
|
headerData= []
|
||||||
|
currentOffset = 0
|
||||||
|
|
||||||
|
# Add social DRM to decrypted files
|
||||||
|
|
||||||
|
try:
|
||||||
|
data = getKindleInfoValueForKey("kindle.name.info")+":"+ getKindleInfoValueForKey("login")
|
||||||
|
if payload!= None:
|
||||||
|
payload.write(lengthPrefixString("sdrm"))
|
||||||
|
payload.write(encodeNumber(0))
|
||||||
|
payload.write(data)
|
||||||
|
else:
|
||||||
|
currentOffset += len(lengthPrefixString("sdrm"))
|
||||||
|
currentOffset += len(encodeNumber(0))
|
||||||
|
currentOffset += len(data)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
for headerRecord in bookHeaderRecords:
|
||||||
|
name = headerRecord
|
||||||
|
newRecord = []
|
||||||
|
|
||||||
|
if name != "dkey" :
|
||||||
|
|
||||||
|
for index in range (0,len(bookHeaderRecords[name])) :
|
||||||
|
offset = currentOffset
|
||||||
|
|
||||||
|
if payload != None:
|
||||||
|
# write tag
|
||||||
|
payload.write(lengthPrefixString(name))
|
||||||
|
# write data
|
||||||
|
payload.write(encodeNumber(index))
|
||||||
|
payload.write(getBookPayloadRecord(name, index))
|
||||||
|
|
||||||
|
else :
|
||||||
|
currentOffset += len(lengthPrefixString(name))
|
||||||
|
currentOffset += len(encodeNumber(index))
|
||||||
|
currentOffset += len(getBookPayloadRecord(name, index))
|
||||||
|
newRecord.append([offset,bookHeaderRecords[name][index][1],bookHeaderRecords[name][index][2]])
|
||||||
|
|
||||||
|
headerData.append([name,newRecord])
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
return headerData
|
||||||
|
|
||||||
|
#
|
||||||
|
# Create decrypted book
|
||||||
|
#
|
||||||
|
|
||||||
|
def createDecryptedBook(outputFile):
|
||||||
|
outputFile = open(outputFile,"wb")
|
||||||
|
# Write the payload in a temporary file
|
||||||
|
headerData = createDecryptedPayload(None)
|
||||||
|
outputFile.write("TPZ0")
|
||||||
|
outputFile.write(encodeNumber(len(headerData)))
|
||||||
|
|
||||||
|
for header in headerData :
|
||||||
|
outputFile.write(chr(0x63))
|
||||||
|
outputFile.write(lengthPrefixString(header[0]))
|
||||||
|
outputFile.write(encodeNumber(len(header[1])))
|
||||||
|
for numbers in header[1] :
|
||||||
|
outputFile.write(encodeNumber(numbers[0]))
|
||||||
|
outputFile.write(encodeNumber(numbers[1]))
|
||||||
|
outputFile.write(encodeNumber(numbers[2]))
|
||||||
|
|
||||||
|
outputFile.write(chr(0x64))
|
||||||
|
createDecryptedPayload(outputFile)
|
||||||
|
outputFile.close()
|
||||||
|
|
||||||
|
#
|
||||||
|
# Set the command to execute by the programm according to cmdLine parameters
|
||||||
|
#
|
||||||
|
|
||||||
|
def setCommand(name) :
|
||||||
|
global command
|
||||||
|
if command != "" :
|
||||||
|
raise CMBDTCFatal("Invalid command line parameters")
|
||||||
|
else :
|
||||||
|
command = name
|
||||||
|
|
||||||
|
#
|
||||||
|
# Program usage
|
||||||
|
#
|
||||||
|
|
||||||
|
def usage():
|
||||||
|
print("\nUsage:")
|
||||||
|
print("\nCMBDTC.py [options] bookFileName\n")
|
||||||
|
print("-p Adds a PID to the list of PIDs that are tried to decrypt the book key (can be used several times)")
|
||||||
|
print("-d Saves a decrypted copy of the book")
|
||||||
|
print("-r Prints or writes to disk a record indicated in the form name:index (e.g \"img:0\")")
|
||||||
|
print("-o Output file name to write records and decrypted books")
|
||||||
|
print("-v Verbose (can be used several times)")
|
||||||
|
print("-i Prints kindle.info database")
|
||||||
|
|
||||||
|
#
|
||||||
|
# Main
|
||||||
|
#
|
||||||
|
|
||||||
|
def main(argv=sys.argv):
|
||||||
|
global kindleDatabase
|
||||||
|
global bookMetadata
|
||||||
|
global bookKey
|
||||||
|
global bookFile
|
||||||
|
global command
|
||||||
|
|
||||||
|
progname = os.path.basename(argv[0])
|
||||||
|
|
||||||
|
verbose = 0
|
||||||
|
recordName = ""
|
||||||
|
recordIndex = 0
|
||||||
|
outputFile = ""
|
||||||
|
PIDs = []
|
||||||
|
kindleDatabase = None
|
||||||
|
command = ""
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
opts, args = getopt.getopt(sys.argv[1:], "vdir:o:p:")
|
||||||
|
except getopt.GetoptError, err:
|
||||||
|
# print help information and exit:
|
||||||
|
print str(err) # will print something like "option -a not recognized"
|
||||||
|
usage()
|
||||||
|
sys.exit(2)
|
||||||
|
|
||||||
|
if len(opts) == 0 and len(args) == 0 :
|
||||||
|
usage()
|
||||||
|
sys.exit(2)
|
||||||
|
|
||||||
|
for o, a in opts:
|
||||||
|
if o == "-v":
|
||||||
|
verbose+=1
|
||||||
|
if o == "-i":
|
||||||
|
setCommand("printInfo")
|
||||||
|
if o =="-o":
|
||||||
|
if a == None :
|
||||||
|
raise CMBDTCFatal("Invalid parameter for -o")
|
||||||
|
outputFile = a
|
||||||
|
if o =="-r":
|
||||||
|
setCommand("printRecord")
|
||||||
|
try:
|
||||||
|
recordName,recordIndex = a.split(':')
|
||||||
|
except:
|
||||||
|
raise CMBDTCFatal("Invalid parameter for -r")
|
||||||
|
if o =="-p":
|
||||||
|
PIDs.append(a)
|
||||||
|
if o =="-d":
|
||||||
|
setCommand("doit")
|
||||||
|
|
||||||
|
if command == "" :
|
||||||
|
raise CMBDTCFatal("No action supplied on command line")
|
||||||
|
|
||||||
|
#
|
||||||
|
# Read the encrypted database
|
||||||
|
#
|
||||||
|
|
||||||
|
try:
|
||||||
|
kindleDatabase = parseKindleInfo()
|
||||||
|
except Exception, message:
|
||||||
|
if verbose>0:
|
||||||
|
print(message)
|
||||||
|
|
||||||
|
if kindleDatabase != None :
|
||||||
|
if command == "printInfo" :
|
||||||
|
printKindleInfo()
|
||||||
|
|
||||||
|
#
|
||||||
|
# Compute the DSN
|
||||||
|
#
|
||||||
|
|
||||||
|
# Get the Mazama Random number
|
||||||
|
MazamaRandomNumber = getKindleInfoValueForKey("MazamaRandomNumber")
|
||||||
|
|
||||||
|
# Get the HDD serial
|
||||||
|
encodedSystemVolumeSerialNumber = encodeHash(str(GetVolumeSerialNumber(GetSystemDirectory().split('\\')[0] + '\\')),charMap1)
|
||||||
|
|
||||||
|
# Get the current user name
|
||||||
|
encodedUsername = encodeHash(GetUserName(),charMap1)
|
||||||
|
|
||||||
|
# concat, hash and encode
|
||||||
|
DSN = encode(SHA1(MazamaRandomNumber+encodedSystemVolumeSerialNumber+encodedUsername),charMap1)
|
||||||
|
|
||||||
|
if verbose >1:
|
||||||
|
print("DSN: " + DSN)
|
||||||
|
|
||||||
|
#
|
||||||
|
# Compute the device PID
|
||||||
|
#
|
||||||
|
|
||||||
|
table = generatePidEncryptionTable()
|
||||||
|
devicePID = generateDevicePID(table,DSN,4)
|
||||||
|
PIDs.append(devicePID)
|
||||||
|
|
||||||
|
if verbose > 0:
|
||||||
|
print("Device PID: " + devicePID)
|
||||||
|
|
||||||
|
#
|
||||||
|
# Open book and parse metadata
|
||||||
|
#
|
||||||
|
|
||||||
|
if len(args) == 1:
|
||||||
|
|
||||||
|
bookFile = openBook(args[0])
|
||||||
|
parseTopazHeader()
|
||||||
|
parseMetadata()
|
||||||
|
|
||||||
|
#
|
||||||
|
# Compute book PID
|
||||||
|
#
|
||||||
|
|
||||||
|
# Get the account token
|
||||||
|
|
||||||
|
if kindleDatabase != None:
|
||||||
|
kindleAccountToken = getKindleInfoValueForKey("kindle.account.tokens")
|
||||||
|
|
||||||
|
if verbose >1:
|
||||||
|
print("Account Token: " + kindleAccountToken)
|
||||||
|
|
||||||
|
keysRecord = bookMetadata["keys"]
|
||||||
|
keysRecordRecord = bookMetadata[keysRecord]
|
||||||
|
|
||||||
|
pidHash = SHA1(DSN+kindleAccountToken+keysRecord+keysRecordRecord)
|
||||||
|
|
||||||
|
bookPID = encodePID(pidHash)
|
||||||
|
PIDs.append(bookPID)
|
||||||
|
|
||||||
|
if verbose > 0:
|
||||||
|
print ("Book PID: " + bookPID )
|
||||||
|
|
||||||
|
#
|
||||||
|
# Decrypt book key
|
||||||
|
#
|
||||||
|
|
||||||
|
dkey = getBookPayloadRecord('dkey', 0)
|
||||||
|
|
||||||
|
bookKeys = []
|
||||||
|
for PID in PIDs :
|
||||||
|
bookKeys+=decryptDkeyRecords(dkey,PID)
|
||||||
|
|
||||||
|
if len(bookKeys) == 0 :
|
||||||
|
if verbose > 0 :
|
||||||
|
print ("Book key could not be found. Maybe this book is not registered with this device.")
|
||||||
|
else :
|
||||||
|
bookKey = bookKeys[0]
|
||||||
|
if verbose > 0:
|
||||||
|
print("Book key: " + bookKey.encode('hex'))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
if command == "printRecord" :
|
||||||
|
extractBookPayloadRecord(recordName,int(recordIndex),outputFile)
|
||||||
|
if outputFile != "" and verbose>0 :
|
||||||
|
print("Wrote record to file: "+outputFile)
|
||||||
|
elif command == "doit" :
|
||||||
|
if outputFile!="" :
|
||||||
|
createDecryptedBook(outputFile)
|
||||||
|
if verbose >0 :
|
||||||
|
print ("Decrypted book saved. Don't pirate!")
|
||||||
|
elif verbose > 0:
|
||||||
|
print("Output file name was not supplied.")
|
||||||
|
|
||||||
|
return 0
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
sys.exit(main())
|
||||||
@@ -20,6 +20,8 @@ import getopt
|
|||||||
from struct import pack
|
from struct import pack
|
||||||
from struct import unpack
|
from struct import unpack
|
||||||
|
|
||||||
|
class TpzDRMError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
# Get a 7 bit encoded number from string. The most
|
# Get a 7 bit encoded number from string. The most
|
||||||
# significant byte comes first and has the high bit (8th) set
|
# significant byte comes first and has the high bit (8th) set
|
||||||
@@ -138,7 +140,8 @@ class Dictionary(object):
|
|||||||
return self.stable[self.pos]
|
return self.stable[self.pos]
|
||||||
else:
|
else:
|
||||||
print "Error - %d outside of string table limits" % val
|
print "Error - %d outside of string table limits" % val
|
||||||
sys.exit(-1)
|
raise TpzDRMError('outside of string table limits')
|
||||||
|
# sys.exit(-1)
|
||||||
|
|
||||||
def getSize(self):
|
def getSize(self):
|
||||||
return self.size
|
return self.size
|
||||||
@@ -243,6 +246,7 @@ class PageParser(object):
|
|||||||
'region.y' : (1, 'scalar_number', 0, 0),
|
'region.y' : (1, 'scalar_number', 0, 0),
|
||||||
'region.h' : (1, 'scalar_number', 0, 0),
|
'region.h' : (1, 'scalar_number', 0, 0),
|
||||||
'region.w' : (1, 'scalar_number', 0, 0),
|
'region.w' : (1, 'scalar_number', 0, 0),
|
||||||
|
'region.orientation' : (1, 'scalar_number', 0, 0),
|
||||||
|
|
||||||
'empty_text_region' : (1, 'snippets', 1, 0),
|
'empty_text_region' : (1, 'snippets', 1, 0),
|
||||||
|
|
||||||
@@ -258,6 +262,13 @@ class PageParser(object):
|
|||||||
'paragraph.class' : (1, 'scalar_text', 0, 0),
|
'paragraph.class' : (1, 'scalar_text', 0, 0),
|
||||||
'paragraph.firstWord' : (1, 'scalar_number', 0, 0),
|
'paragraph.firstWord' : (1, 'scalar_number', 0, 0),
|
||||||
'paragraph.lastWord' : (1, 'scalar_number', 0, 0),
|
'paragraph.lastWord' : (1, 'scalar_number', 0, 0),
|
||||||
|
'paragraph.lastWord' : (1, 'scalar_number', 0, 0),
|
||||||
|
'paragraph.gridSize' : (1, 'scalar_number', 0, 0),
|
||||||
|
'paragraph.gridBottomCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'paragraph.gridTopCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'paragraph.gridBeginCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'paragraph.gridEndCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
|
||||||
|
|
||||||
'word_semantic' : (1, 'snippets', 1, 1),
|
'word_semantic' : (1, 'snippets', 1, 1),
|
||||||
'word_semantic.type' : (1, 'scalar_text', 0, 0),
|
'word_semantic.type' : (1, 'scalar_text', 0, 0),
|
||||||
@@ -272,11 +283,21 @@ class PageParser(object):
|
|||||||
|
|
||||||
'_span' : (1, 'snippets', 1, 0),
|
'_span' : (1, 'snippets', 1, 0),
|
||||||
'_span.firstWord' : (1, 'scalar_number', 0, 0),
|
'_span.firstWord' : (1, 'scalar_number', 0, 0),
|
||||||
'-span.lastWord' : (1, 'scalar_number', 0, 0),
|
'_span.lastWord' : (1, 'scalar_number', 0, 0),
|
||||||
|
'_span.gridSize' : (1, 'scalar_number', 0, 0),
|
||||||
|
'_span.gridBottomCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'_span.gridTopCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'_span.gridBeginCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'_span.gridEndCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
|
||||||
'span' : (1, 'snippets', 1, 0),
|
'span' : (1, 'snippets', 1, 0),
|
||||||
'span.firstWord' : (1, 'scalar_number', 0, 0),
|
'span.firstWord' : (1, 'scalar_number', 0, 0),
|
||||||
'span.lastWord' : (1, 'scalar_number', 0, 0),
|
'span.lastWord' : (1, 'scalar_number', 0, 0),
|
||||||
|
'span.gridSize' : (1, 'scalar_number', 0, 0),
|
||||||
|
'span.gridBottomCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'span.gridTopCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'span.gridBeginCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'span.gridEndCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
|
||||||
'extratokens' : (1, 'snippets', 1, 0),
|
'extratokens' : (1, 'snippets', 1, 0),
|
||||||
'extratokens.type' : (1, 'scalar_text', 0, 0),
|
'extratokens.type' : (1, 'scalar_text', 0, 0),
|
||||||
@@ -595,28 +616,30 @@ class PageParser(object):
|
|||||||
nodename = fullpathname.pop()
|
nodename = fullpathname.pop()
|
||||||
ilvl = len(fullpathname)
|
ilvl = len(fullpathname)
|
||||||
indent = ' ' * (3 * ilvl)
|
indent = ' ' * (3 * ilvl)
|
||||||
result = indent + '<' + nodename + '>'
|
rlst = []
|
||||||
|
rlst.append(indent + '<' + nodename + '>')
|
||||||
if len(argList) > 0:
|
if len(argList) > 0:
|
||||||
argres = ''
|
alst = []
|
||||||
for j in argList:
|
for j in argList:
|
||||||
if (argtype == 'text') or (argtype == 'scalar_text') :
|
if (argtype == 'text') or (argtype == 'scalar_text') :
|
||||||
argres += j + '|'
|
alst.append(j + '|')
|
||||||
else :
|
else :
|
||||||
argres += str(j) + ','
|
alst.append(str(j) + ',')
|
||||||
|
argres = "".join(alst)
|
||||||
argres = argres[0:-1]
|
argres = argres[0:-1]
|
||||||
if argtype == 'snippets' :
|
if argtype == 'snippets' :
|
||||||
result += 'snippets:' + argres
|
rlst.append('snippets:' + argres)
|
||||||
else :
|
else :
|
||||||
result += argres
|
rlst.append(argres)
|
||||||
if len(subtagList) > 0 :
|
if len(subtagList) > 0 :
|
||||||
result += '\n'
|
rlst.append('\n')
|
||||||
for j in subtagList:
|
for j in subtagList:
|
||||||
if len(j) > 0 :
|
if len(j) > 0 :
|
||||||
result += self.formatTag(j)
|
rlst.append(self.formatTag(j))
|
||||||
result += indent + '</' + nodename + '>\n'
|
rlst.append(indent + '</' + nodename + '>\n')
|
||||||
else:
|
else:
|
||||||
result += '</' + nodename + '>\n'
|
rlst.append('</' + nodename + '>\n')
|
||||||
return result
|
return "".join(rlst)
|
||||||
|
|
||||||
|
|
||||||
# flatten tag
|
# flatten tag
|
||||||
@@ -625,35 +648,38 @@ class PageParser(object):
|
|||||||
subtagList = node[1]
|
subtagList = node[1]
|
||||||
argtype = node[2]
|
argtype = node[2]
|
||||||
argList = node[3]
|
argList = node[3]
|
||||||
result = name
|
rlst = []
|
||||||
|
rlst.append(name)
|
||||||
if (len(argList) > 0):
|
if (len(argList) > 0):
|
||||||
argres = ''
|
alst = []
|
||||||
for j in argList:
|
for j in argList:
|
||||||
if (argtype == 'text') or (argtype == 'scalar_text') :
|
if (argtype == 'text') or (argtype == 'scalar_text') :
|
||||||
argres += j + '|'
|
alst.append(j + '|')
|
||||||
else :
|
else :
|
||||||
argres += str(j) + '|'
|
alst.append(str(j) + '|')
|
||||||
|
argres = "".join(alst)
|
||||||
argres = argres[0:-1]
|
argres = argres[0:-1]
|
||||||
if argtype == 'snippets' :
|
if argtype == 'snippets' :
|
||||||
result += '.snippets=' + argres
|
rlst.append('.snippets=' + argres)
|
||||||
else :
|
else :
|
||||||
result += '=' + argres
|
rlst.append('=' + argres)
|
||||||
result += '\n'
|
rlst.append('\n')
|
||||||
for j in subtagList:
|
for j in subtagList:
|
||||||
if len(j) > 0 :
|
if len(j) > 0 :
|
||||||
result += self.flattenTag(j)
|
rlst.append(self.flattenTag(j))
|
||||||
return result
|
return "".join(rlst)
|
||||||
|
|
||||||
|
|
||||||
# reduce create xml output
|
# reduce create xml output
|
||||||
def formatDoc(self, flat_xml):
|
def formatDoc(self, flat_xml):
|
||||||
result = ''
|
rlst = []
|
||||||
for j in self.doc :
|
for j in self.doc :
|
||||||
if len(j) > 0:
|
if len(j) > 0:
|
||||||
if flat_xml:
|
if flat_xml:
|
||||||
result += self.flattenTag(j)
|
rlst.append(self.flattenTag(j))
|
||||||
else:
|
else:
|
||||||
result += self.formatTag(j)
|
rlst.append(self.formatTag(j))
|
||||||
|
result = "".join(rlst)
|
||||||
if self.debug : print result
|
if self.debug : print result
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|||||||
@@ -271,6 +271,9 @@ class DocParser(object):
|
|||||||
|
|
||||||
pclass = self.getClass(pclass)
|
pclass = self.getClass(pclass)
|
||||||
|
|
||||||
|
# if paragraph uses extratokens (extra glyphs) then make it fixed
|
||||||
|
(pos, extraglyphs) = self.findinDoc('paragraph.extratokens',start,end)
|
||||||
|
|
||||||
# build up a description of the paragraph in result and return it
|
# build up a description of the paragraph in result and return it
|
||||||
# first check for the basic - all words paragraph
|
# first check for the basic - all words paragraph
|
||||||
(pos, sfirst) = self.findinDoc('paragraph.firstWord',start,end)
|
(pos, sfirst) = self.findinDoc('paragraph.firstWord',start,end)
|
||||||
@@ -280,6 +283,7 @@ class DocParser(object):
|
|||||||
last = int(slast)
|
last = int(slast)
|
||||||
|
|
||||||
makeImage = (regtype == 'vertical') or (regtype == 'table')
|
makeImage = (regtype == 'vertical') or (regtype == 'table')
|
||||||
|
makeImage = makeImage or (extraglyphs != None)
|
||||||
if self.fixedimage:
|
if self.fixedimage:
|
||||||
makeImage = makeImage or (regtype == 'fixed')
|
makeImage = makeImage or (regtype == 'fixed')
|
||||||
|
|
||||||
@@ -288,6 +292,11 @@ class DocParser(object):
|
|||||||
if self.fixedimage :
|
if self.fixedimage :
|
||||||
makeImage = makeImage or (pclass.find('cl-f-') >= 0)
|
makeImage = makeImage or (pclass.find('cl-f-') >= 0)
|
||||||
|
|
||||||
|
# before creating an image make sure glyph info exists
|
||||||
|
gidList = self.getData('info.glyph.glyphID',0,-1)
|
||||||
|
|
||||||
|
makeImage = makeImage & (len(gidList) > 0)
|
||||||
|
|
||||||
if not makeImage :
|
if not makeImage :
|
||||||
# standard all word paragraph
|
# standard all word paragraph
|
||||||
for wordnum in xrange(first, last):
|
for wordnum in xrange(first, last):
|
||||||
@@ -353,6 +362,8 @@ class DocParser(object):
|
|||||||
|
|
||||||
word_class = ''
|
word_class = ''
|
||||||
|
|
||||||
|
word_semantic_type = ''
|
||||||
|
|
||||||
while (line < end) :
|
while (line < end) :
|
||||||
|
|
||||||
(name, argres) = self.lineinDoc(line)
|
(name, argres) = self.lineinDoc(line)
|
||||||
@@ -512,13 +523,80 @@ class DocParser(object):
|
|||||||
return parares
|
return parares
|
||||||
|
|
||||||
|
|
||||||
|
def buildTOCEntry(self, pdesc) :
|
||||||
|
parares = ''
|
||||||
|
sep =''
|
||||||
|
tocentry = ''
|
||||||
|
handle_links = len(self.link_id) > 0
|
||||||
|
|
||||||
|
lstart = 0
|
||||||
|
|
||||||
|
cnt = len(pdesc)
|
||||||
|
for j in xrange( 0, cnt) :
|
||||||
|
|
||||||
|
(wtype, num) = pdesc[j]
|
||||||
|
|
||||||
|
if wtype == 'ocr' :
|
||||||
|
word = self.ocrtext[num]
|
||||||
|
sep = ' '
|
||||||
|
|
||||||
|
if handle_links:
|
||||||
|
link = self.link_id[num]
|
||||||
|
if (link > 0):
|
||||||
|
linktype = self.link_type[link-1]
|
||||||
|
title = self.link_title[link-1]
|
||||||
|
title = title.rstrip('. ')
|
||||||
|
alt_title = parares[lstart:]
|
||||||
|
alt_title = alt_title.strip()
|
||||||
|
# now strip off the actual printed page number
|
||||||
|
alt_title = alt_title.rstrip('01234567890ivxldIVXLD-.')
|
||||||
|
alt_title = alt_title.rstrip('. ')
|
||||||
|
# skip over any external links - can't have them in a books toc
|
||||||
|
if linktype == 'external' :
|
||||||
|
title = ''
|
||||||
|
alt_title = ''
|
||||||
|
linkpage = ''
|
||||||
|
else :
|
||||||
|
if len(self.link_page) >= link :
|
||||||
|
ptarget = self.link_page[link-1] - 1
|
||||||
|
linkpage = '%04d' % ptarget
|
||||||
|
else :
|
||||||
|
# just link to the current page
|
||||||
|
linkpage = self.id[4:]
|
||||||
|
if len(alt_title) >= len(title):
|
||||||
|
title = alt_title
|
||||||
|
if title != '' and linkpage != '':
|
||||||
|
tocentry += title + '|' + linkpage + '\n'
|
||||||
|
lstart = len(parares)
|
||||||
|
if word == '_link_' : word = ''
|
||||||
|
elif (link < 0) :
|
||||||
|
if word == '_link_' : word = ''
|
||||||
|
|
||||||
|
if word == '_lb_':
|
||||||
|
word = ''
|
||||||
|
sep = ''
|
||||||
|
|
||||||
|
if num in self.dehyphen_rootid :
|
||||||
|
word = word[0:-1]
|
||||||
|
sep = ''
|
||||||
|
|
||||||
|
parares += word + sep
|
||||||
|
|
||||||
|
else :
|
||||||
|
continue
|
||||||
|
|
||||||
|
return tocentry
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# walk the document tree collecting the information needed
|
# walk the document tree collecting the information needed
|
||||||
# to build an html page using the ocrText
|
# to build an html page using the ocrText
|
||||||
|
|
||||||
def process(self):
|
def process(self):
|
||||||
|
|
||||||
htmlpage = ''
|
tocinfo = ''
|
||||||
|
hlst = []
|
||||||
|
|
||||||
# get the ocr text
|
# get the ocr text
|
||||||
(pos, argres) = self.findinDoc('info.word.ocrText',0,-1)
|
(pos, argres) = self.findinDoc('info.word.ocrText',0,-1)
|
||||||
@@ -575,8 +653,8 @@ class DocParser(object):
|
|||||||
|
|
||||||
# set anchor for link target on this page
|
# set anchor for link target on this page
|
||||||
if not anchorSet and not first_para_continued:
|
if not anchorSet and not first_para_continued:
|
||||||
htmlpage += '<div style="visibility: hidden; height: 0; width: 0;" id="'
|
hlst.append('<div style="visibility: hidden; height: 0; width: 0;" id="')
|
||||||
htmlpage += self.id + '" title="pagetype_' + pagetype + '"></div>\n'
|
hlst.append(self.id + '" title="pagetype_' + pagetype + '"></div>\n')
|
||||||
anchorSet = True
|
anchorSet = True
|
||||||
|
|
||||||
# handle groups of graphics with text captions
|
# handle groups of graphics with text captions
|
||||||
@@ -585,12 +663,12 @@ class DocParser(object):
|
|||||||
if grptype != None:
|
if grptype != None:
|
||||||
if grptype == 'graphic':
|
if grptype == 'graphic':
|
||||||
gcstr = ' class="' + grptype + '"'
|
gcstr = ' class="' + grptype + '"'
|
||||||
htmlpage += '<div' + gcstr + '>'
|
hlst.append('<div' + gcstr + '>')
|
||||||
inGroup = True
|
inGroup = True
|
||||||
|
|
||||||
elif (etype == 'grpend'):
|
elif (etype == 'grpend'):
|
||||||
if inGroup:
|
if inGroup:
|
||||||
htmlpage += '</div>\n'
|
hlst.append('</div>\n')
|
||||||
inGroup = False
|
inGroup = False
|
||||||
|
|
||||||
else:
|
else:
|
||||||
@@ -600,25 +678,25 @@ class DocParser(object):
|
|||||||
(pos, simgsrc) = self.findinDoc('img.src',start,end)
|
(pos, simgsrc) = self.findinDoc('img.src',start,end)
|
||||||
if simgsrc:
|
if simgsrc:
|
||||||
if inGroup:
|
if inGroup:
|
||||||
htmlpage += '<img src="img/img%04d.jpg" alt="" />' % int(simgsrc)
|
hlst.append('<img src="img/img%04d.jpg" alt="" />' % int(simgsrc))
|
||||||
else:
|
else:
|
||||||
htmlpage += '<div class="graphic"><img src="img/img%04d.jpg" alt="" /></div>' % int(simgsrc)
|
hlst.append('<div class="graphic"><img src="img/img%04d.jpg" alt="" /></div>' % int(simgsrc))
|
||||||
|
|
||||||
elif regtype == 'chapterheading' :
|
elif regtype == 'chapterheading' :
|
||||||
(pclass, pdesc) = self.getParaDescription(start,end, regtype)
|
(pclass, pdesc) = self.getParaDescription(start,end, regtype)
|
||||||
if not breakSet:
|
if not breakSet:
|
||||||
htmlpage += '<div style="page-break-after: always;"> </div>\n'
|
hlst.append('<div style="page-break-after: always;"> </div>\n')
|
||||||
breakSet = True
|
breakSet = True
|
||||||
tag = 'h1'
|
tag = 'h1'
|
||||||
if pclass and (len(pclass) >= 7):
|
if pclass and (len(pclass) >= 7):
|
||||||
if pclass[3:7] == 'ch1-' : tag = 'h1'
|
if pclass[3:7] == 'ch1-' : tag = 'h1'
|
||||||
if pclass[3:7] == 'ch2-' : tag = 'h2'
|
if pclass[3:7] == 'ch2-' : tag = 'h2'
|
||||||
if pclass[3:7] == 'ch3-' : tag = 'h3'
|
if pclass[3:7] == 'ch3-' : tag = 'h3'
|
||||||
htmlpage += '<' + tag + ' class="' + pclass + '">'
|
hlst.append('<' + tag + ' class="' + pclass + '">')
|
||||||
else:
|
else:
|
||||||
htmlpage += '<' + tag + '>'
|
hlst.append('<' + tag + '>')
|
||||||
htmlpage += self.buildParagraph(pclass, pdesc, 'middle', regtype)
|
hlst.append(self.buildParagraph(pclass, pdesc, 'middle', regtype))
|
||||||
htmlpage += '</' + tag + '>'
|
hlst.append('</' + tag + '>')
|
||||||
|
|
||||||
elif (regtype == 'text') or (regtype == 'fixed') or (regtype == 'insert') or (regtype == 'listitem'):
|
elif (regtype == 'text') or (regtype == 'fixed') or (regtype == 'insert') or (regtype == 'listitem'):
|
||||||
ptype = 'full'
|
ptype = 'full'
|
||||||
@@ -632,11 +710,11 @@ class DocParser(object):
|
|||||||
if pclass[3:6] == 'h1-' : tag = 'h4'
|
if pclass[3:6] == 'h1-' : tag = 'h4'
|
||||||
if pclass[3:6] == 'h2-' : tag = 'h5'
|
if pclass[3:6] == 'h2-' : tag = 'h5'
|
||||||
if pclass[3:6] == 'h3-' : tag = 'h6'
|
if pclass[3:6] == 'h3-' : tag = 'h6'
|
||||||
htmlpage += '<' + tag + ' class="' + pclass + '">'
|
hlst.append('<' + tag + ' class="' + pclass + '">')
|
||||||
htmlpage += self.buildParagraph(pclass, pdesc, 'middle', regtype)
|
hlst.append(self.buildParagraph(pclass, pdesc, 'middle', regtype))
|
||||||
htmlpage += '</' + tag + '>'
|
hlst.append('</' + tag + '>')
|
||||||
else :
|
else :
|
||||||
htmlpage += self.buildParagraph(pclass, pdesc, ptype, regtype)
|
hlst.append(self.buildParagraph(pclass, pdesc, ptype, regtype))
|
||||||
|
|
||||||
elif (regtype == 'tocentry') :
|
elif (regtype == 'tocentry') :
|
||||||
ptype = 'full'
|
ptype = 'full'
|
||||||
@@ -644,8 +722,8 @@ class DocParser(object):
|
|||||||
ptype = 'end'
|
ptype = 'end'
|
||||||
first_para_continued = False
|
first_para_continued = False
|
||||||
(pclass, pdesc) = self.getParaDescription(start,end, regtype)
|
(pclass, pdesc) = self.getParaDescription(start,end, regtype)
|
||||||
htmlpage += self.buildParagraph(pclass, pdesc, ptype, regtype)
|
tocinfo += self.buildTOCEntry(pdesc)
|
||||||
|
hlst.append(self.buildParagraph(pclass, pdesc, ptype, regtype))
|
||||||
|
|
||||||
elif (regtype == 'vertical') or (regtype == 'table') :
|
elif (regtype == 'vertical') or (regtype == 'table') :
|
||||||
ptype = 'full'
|
ptype = 'full'
|
||||||
@@ -655,13 +733,13 @@ class DocParser(object):
|
|||||||
ptype = 'end'
|
ptype = 'end'
|
||||||
first_para_continued = False
|
first_para_continued = False
|
||||||
(pclass, pdesc) = self.getParaDescription(start, end, regtype)
|
(pclass, pdesc) = self.getParaDescription(start, end, regtype)
|
||||||
htmlpage += self.buildParagraph(pclass, pdesc, ptype, regtype)
|
hlst.append(self.buildParagraph(pclass, pdesc, ptype, regtype))
|
||||||
|
|
||||||
|
|
||||||
elif (regtype == 'synth_fcvr.center'):
|
elif (regtype == 'synth_fcvr.center'):
|
||||||
(pos, simgsrc) = self.findinDoc('img.src',start,end)
|
(pos, simgsrc) = self.findinDoc('img.src',start,end)
|
||||||
if simgsrc:
|
if simgsrc:
|
||||||
htmlpage += '<div class="graphic"><img src="img/img%04d.jpg" alt="" /></div>' % int(simgsrc)
|
hlst.append('<div class="graphic"><img src="img/img%04d.jpg" alt="" /></div>' % int(simgsrc))
|
||||||
|
|
||||||
else :
|
else :
|
||||||
print ' Making region type', regtype,
|
print ' Making region type', regtype,
|
||||||
@@ -687,29 +765,29 @@ class DocParser(object):
|
|||||||
if pclass[3:6] == 'h1-' : tag = 'h4'
|
if pclass[3:6] == 'h1-' : tag = 'h4'
|
||||||
if pclass[3:6] == 'h2-' : tag = 'h5'
|
if pclass[3:6] == 'h2-' : tag = 'h5'
|
||||||
if pclass[3:6] == 'h3-' : tag = 'h6'
|
if pclass[3:6] == 'h3-' : tag = 'h6'
|
||||||
htmlpage += '<' + tag + ' class="' + pclass + '">'
|
hlst.append('<' + tag + ' class="' + pclass + '">')
|
||||||
htmlpage += self.buildParagraph(pclass, pdesc, 'middle', regtype)
|
hlst.append(self.buildParagraph(pclass, pdesc, 'middle', regtype))
|
||||||
htmlpage += '</' + tag + '>'
|
hlst.append('</' + tag + '>')
|
||||||
else :
|
else :
|
||||||
htmlpage += self.buildParagraph(pclass, pdesc, ptype, regtype)
|
hlst.append(self.buildParagraph(pclass, pdesc, ptype, regtype))
|
||||||
else :
|
else :
|
||||||
print ' a "graphic" region'
|
print ' a "graphic" region'
|
||||||
(pos, simgsrc) = self.findinDoc('img.src',start,end)
|
(pos, simgsrc) = self.findinDoc('img.src',start,end)
|
||||||
if simgsrc:
|
if simgsrc:
|
||||||
htmlpage += '<div class="graphic"><img src="img/img%04d.jpg" alt="" /></div>' % int(simgsrc)
|
hlst.append('<div class="graphic"><img src="img/img%04d.jpg" alt="" /></div>' % int(simgsrc))
|
||||||
|
|
||||||
|
|
||||||
|
htmlpage = "".join(hlst)
|
||||||
if last_para_continued :
|
if last_para_continued :
|
||||||
if htmlpage[-4:] == '</p>':
|
if htmlpage[-4:] == '</p>':
|
||||||
htmlpage = htmlpage[0:-4]
|
htmlpage = htmlpage[0:-4]
|
||||||
last_para_continued = False
|
last_para_continued = False
|
||||||
|
|
||||||
return htmlpage
|
return htmlpage, tocinfo
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def convert2HTML(flatxml, classlst, fileid, bookDir, gdict, fixedimage):
|
def convert2HTML(flatxml, classlst, fileid, bookDir, gdict, fixedimage):
|
||||||
# create a document parser
|
# create a document parser
|
||||||
dp = DocParser(flatxml, classlst, fileid, bookDir, gdict, fixedimage)
|
dp = DocParser(flatxml, classlst, fileid, bookDir, gdict, fixedimage)
|
||||||
htmlpage = dp.process()
|
htmlpage, tocinfo = dp.process()
|
||||||
return htmlpage
|
return htmlpage, tocinfo
|
||||||
|
|||||||
@@ -10,17 +10,94 @@ from struct import unpack
|
|||||||
|
|
||||||
|
|
||||||
class PParser(object):
|
class PParser(object):
|
||||||
def __init__(self, gd, flatxml):
|
def __init__(self, gd, flatxml, meta_array):
|
||||||
self.gd = gd
|
self.gd = gd
|
||||||
self.flatdoc = flatxml.split('\n')
|
self.flatdoc = flatxml.split('\n')
|
||||||
|
self.docSize = len(self.flatdoc)
|
||||||
self.temp = []
|
self.temp = []
|
||||||
foo = self.getData('page.h') or self.getData('book.h')
|
|
||||||
self.ph = foo[0]
|
self.ph = -1
|
||||||
foo = self.getData('page.w') or self.getData('book.w')
|
self.pw = -1
|
||||||
self.pw = foo[0]
|
startpos = self.posinDoc('page.h') or self.posinDoc('book.h')
|
||||||
self.gx = self.getData('info.glyph.x')
|
for p in startpos:
|
||||||
self.gy = self.getData('info.glyph.y')
|
(name, argres) = self.lineinDoc(p)
|
||||||
self.gid = self.getData('info.glyph.glyphID')
|
self.ph = max(self.ph, int(argres))
|
||||||
|
startpos = self.posinDoc('page.w') or self.posinDoc('book.w')
|
||||||
|
for p in startpos:
|
||||||
|
(name, argres) = self.lineinDoc(p)
|
||||||
|
self.pw = max(self.pw, int(argres))
|
||||||
|
|
||||||
|
if self.ph <= 0:
|
||||||
|
self.ph = int(meta_array.get('pageHeight', '11000'))
|
||||||
|
if self.pw <= 0:
|
||||||
|
self.pw = int(meta_array.get('pageWidth', '8500'))
|
||||||
|
|
||||||
|
res = []
|
||||||
|
startpos = self.posinDoc('info.glyph.x')
|
||||||
|
for p in startpos:
|
||||||
|
argres = self.getDataatPos('info.glyph.x', p)
|
||||||
|
res.extend(argres)
|
||||||
|
self.gx = res
|
||||||
|
|
||||||
|
res = []
|
||||||
|
startpos = self.posinDoc('info.glyph.y')
|
||||||
|
for p in startpos:
|
||||||
|
argres = self.getDataatPos('info.glyph.y', p)
|
||||||
|
res.extend(argres)
|
||||||
|
self.gy = res
|
||||||
|
|
||||||
|
res = []
|
||||||
|
startpos = self.posinDoc('info.glyph.glyphID')
|
||||||
|
for p in startpos:
|
||||||
|
argres = self.getDataatPos('info.glyph.glyphID', p)
|
||||||
|
res.extend(argres)
|
||||||
|
self.gid = res
|
||||||
|
|
||||||
|
|
||||||
|
# return tag at line pos in document
|
||||||
|
def lineinDoc(self, pos) :
|
||||||
|
if (pos >= 0) and (pos < self.docSize) :
|
||||||
|
item = self.flatdoc[pos]
|
||||||
|
if item.find('=') >= 0:
|
||||||
|
(name, argres) = item.split('=',1)
|
||||||
|
else :
|
||||||
|
name = item
|
||||||
|
argres = ''
|
||||||
|
return name, argres
|
||||||
|
|
||||||
|
# find tag in doc if within pos to end inclusive
|
||||||
|
def findinDoc(self, tagpath, pos, end) :
|
||||||
|
result = None
|
||||||
|
if end == -1 :
|
||||||
|
end = self.docSize
|
||||||
|
else:
|
||||||
|
end = min(self.docSize, end)
|
||||||
|
foundat = -1
|
||||||
|
for j in xrange(pos, end):
|
||||||
|
item = self.flatdoc[j]
|
||||||
|
if item.find('=') >= 0:
|
||||||
|
(name, argres) = item.split('=',1)
|
||||||
|
else :
|
||||||
|
name = item
|
||||||
|
argres = ''
|
||||||
|
if name.endswith(tagpath) :
|
||||||
|
result = argres
|
||||||
|
foundat = j
|
||||||
|
break
|
||||||
|
return foundat, result
|
||||||
|
|
||||||
|
# return list of start positions for the tagpath
|
||||||
|
def posinDoc(self, tagpath):
|
||||||
|
startpos = []
|
||||||
|
pos = 0
|
||||||
|
res = ""
|
||||||
|
while res != None :
|
||||||
|
(foundpos, res) = self.findinDoc(tagpath, pos, -1)
|
||||||
|
if res != None :
|
||||||
|
startpos.append(foundpos)
|
||||||
|
pos = foundpos + 1
|
||||||
|
return startpos
|
||||||
|
|
||||||
def getData(self, path):
|
def getData(self, path):
|
||||||
result = None
|
result = None
|
||||||
cnt = len(self.flatdoc)
|
cnt = len(self.flatdoc)
|
||||||
@@ -39,6 +116,23 @@ class PParser(object):
|
|||||||
for j in xrange(0,len(argres)):
|
for j in xrange(0,len(argres)):
|
||||||
argres[j] = int(argres[j])
|
argres[j] = int(argres[j])
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
def getDataatPos(self, path, pos):
|
||||||
|
result = None
|
||||||
|
item = self.flatdoc[pos]
|
||||||
|
if item.find('=') >= 0:
|
||||||
|
(name, argt) = item.split('=')
|
||||||
|
argres = argt.split('|')
|
||||||
|
else:
|
||||||
|
name = item
|
||||||
|
argres = []
|
||||||
|
if (len(argres) > 0) :
|
||||||
|
for j in xrange(0,len(argres)):
|
||||||
|
argres[j] = int(argres[j])
|
||||||
|
if (name.endswith(path)):
|
||||||
|
result = argres
|
||||||
|
return result
|
||||||
|
|
||||||
def getDataTemp(self, path):
|
def getDataTemp(self, path):
|
||||||
result = None
|
result = None
|
||||||
cnt = len(self.temp)
|
cnt = len(self.temp)
|
||||||
@@ -58,6 +152,7 @@ class PParser(object):
|
|||||||
for j in xrange(0,len(argres)):
|
for j in xrange(0,len(argres)):
|
||||||
argres[j] = int(argres[j])
|
argres[j] = int(argres[j])
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def getImages(self):
|
def getImages(self):
|
||||||
result = []
|
result = []
|
||||||
self.temp = self.flatdoc
|
self.temp = self.flatdoc
|
||||||
@@ -69,6 +164,7 @@ class PParser(object):
|
|||||||
src = self.getDataTemp('img.src')[0]
|
src = self.getDataTemp('img.src')[0]
|
||||||
result.append('<image xlink:href="../img/img%04d.jpg" x="%d" y="%d" width="%d" height="%d" />\n' % (src, x, y, w, h))
|
result.append('<image xlink:href="../img/img%04d.jpg" x="%d" y="%d" width="%d" height="%d" />\n' % (src, x, y, w, h))
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def getGlyphs(self):
|
def getGlyphs(self):
|
||||||
result = []
|
result = []
|
||||||
if (self.gid != None) and (len(self.gid) > 0):
|
if (self.gid != None) and (len(self.gid) > 0):
|
||||||
@@ -84,68 +180,70 @@ class PParser(object):
|
|||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def convert2SVG(gdict, flat_xml, counter, numfiles, svgDir, raw, meta_array, scaledpi):
|
def convert2SVG(gdict, flat_xml, pageid, previd, nextid, svgDir, raw, meta_array, scaledpi):
|
||||||
ml = ''
|
mlst = []
|
||||||
pp = PParser(gdict, flat_xml)
|
pp = PParser(gdict, flat_xml, meta_array)
|
||||||
ml += '<?xml version="1.0" standalone="no"?>\n'
|
mlst.append('<?xml version="1.0" standalone="no"?>\n')
|
||||||
if (raw):
|
if (raw):
|
||||||
ml += '<!DOCTYPE svg PUBLIC "-//W3C/DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">\n'
|
mlst.append('<!DOCTYPE svg PUBLIC "-//W3C/DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">\n')
|
||||||
ml += '<svg width="%fin" height="%fin" viewBox="0 0 %d %d" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1">\n' % (pp.pw / scaledpi, pp.ph / scaledpi, pp.pw -1, pp.ph -1)
|
mlst.append('<svg width="%fin" height="%fin" viewBox="0 0 %d %d" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1">\n' % (pp.pw / scaledpi, pp.ph / scaledpi, pp.pw -1, pp.ph -1))
|
||||||
ml += '<title>Page %d - %s by %s</title>\n' % (counter, meta_array['Title'],meta_array['Authors'])
|
mlst.append('<title>Page %d - %s by %s</title>\n' % (pageid, meta_array['Title'],meta_array['Authors']))
|
||||||
else:
|
else:
|
||||||
ml += '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n'
|
mlst.append('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n')
|
||||||
ml += '<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" ><head>\n'
|
mlst.append('<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" ><head>\n')
|
||||||
ml += '<title>Page %d - %s by %s</title>\n' % (counter, meta_array['Title'],meta_array['Authors'])
|
mlst.append('<title>Page %d - %s by %s</title>\n' % (pageid, meta_array['Title'],meta_array['Authors']))
|
||||||
ml += '<script><![CDATA[\n'
|
mlst.append('<script><![CDATA[\n')
|
||||||
ml += 'function gd(){var p=window.location.href.replace(/^.*\?dpi=(\d+).*$/i,"$1");return p;}\n'
|
mlst.append('function gd(){var p=window.location.href.replace(/^.*\?dpi=(\d+).*$/i,"$1");return p;}\n')
|
||||||
ml += 'var dpi=%d;\n' % scaledpi
|
mlst.append('var dpi=%d;\n' % scaledpi)
|
||||||
if (counter) :
|
if (previd) :
|
||||||
ml += 'var prevpage="page%04d.xhtml";\n' % (counter - 1)
|
mlst.append('var prevpage="page%04d.xhtml";\n' % (previd))
|
||||||
if (counter < numfiles-1) :
|
if (nextid) :
|
||||||
ml += 'var nextpage="page%04d.xhtml";\n' % (counter + 1)
|
mlst.append('var nextpage="page%04d.xhtml";\n' % (nextid))
|
||||||
ml += 'var pw=%d;var ph=%d;' % (pp.pw, pp.ph)
|
mlst.append('var pw=%d;var ph=%d;' % (pp.pw, pp.ph))
|
||||||
ml += 'function zoomin(){dpi=dpi*(0.8);setsize();}\n'
|
mlst.append('function zoomin(){dpi=dpi*(0.8);setsize();}\n')
|
||||||
ml += 'function zoomout(){dpi=dpi*1.25;setsize();}\n'
|
mlst.append('function zoomout(){dpi=dpi*1.25;setsize();}\n')
|
||||||
ml += 'function setsize(){var svg=document.getElementById("svgimg");var prev=document.getElementById("prevsvg");var next=document.getElementById("nextsvg");var width=(pw/dpi)+"in";var height=(ph/dpi)+"in";svg.setAttribute("width",width);svg.setAttribute("height",height);prev.setAttribute("height",height);prev.setAttribute("width","50px");next.setAttribute("height",height);next.setAttribute("width","50px");}\n'
|
mlst.append('function setsize(){var svg=document.getElementById("svgimg");var prev=document.getElementById("prevsvg");var next=document.getElementById("nextsvg");var width=(pw/dpi)+"in";var height=(ph/dpi)+"in";svg.setAttribute("width",width);svg.setAttribute("height",height);prev.setAttribute("height",height);prev.setAttribute("width","50px");next.setAttribute("height",height);next.setAttribute("width","50px");}\n')
|
||||||
ml += 'function ppage(){window.location.href=prevpage+"?dpi="+Math.round(dpi);}\n'
|
mlst.append('function ppage(){window.location.href=prevpage+"?dpi="+Math.round(dpi);}\n')
|
||||||
ml += 'function npage(){window.location.href=nextpage+"?dpi="+Math.round(dpi);}\n'
|
mlst.append('function npage(){window.location.href=nextpage+"?dpi="+Math.round(dpi);}\n')
|
||||||
ml += 'var gt=gd();if(gt>0){dpi=gt;}\n'
|
mlst.append('var gt=gd();if(gt>0){dpi=gt;}\n')
|
||||||
ml += 'window.onload=setsize;\n'
|
mlst.append('window.onload=setsize;\n')
|
||||||
ml += ']]></script>\n'
|
mlst.append(']]></script>\n')
|
||||||
ml += '</head>\n'
|
mlst.append('</head>\n')
|
||||||
ml += '<body onLoad="setsize();" style="background-color:#777;text-align:center;">\n'
|
mlst.append('<body onLoad="setsize();" style="background-color:#777;text-align:center;">\n')
|
||||||
ml += '<div style="white-space:nowrap;">\n'
|
mlst.append('<div style="white-space:nowrap;">\n')
|
||||||
if (counter == 0) :
|
if previd == None:
|
||||||
ml += '<a href="javascript:ppage();"><svg id="prevsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"></svg></a>\n'
|
mlst.append('<a href="javascript:ppage();"><svg id="prevsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"></svg></a>\n')
|
||||||
else:
|
else:
|
||||||
ml += '<a href="javascript:ppage();"><svg id="prevsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"><polygon points="5,150,95,5,95,295" fill="#AAAAAA" /></svg></a>\n'
|
mlst.append('<a href="javascript:ppage();"><svg id="prevsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"><polygon points="5,150,95,5,95,295" fill="#AAAAAA" /></svg></a>\n')
|
||||||
ml += '<a href="javascript:npage();"><svg id="svgimg" viewBox="0 0 %d %d" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" style="background-color:#FFF;border:1px solid black;">' % (pp.pw, pp.ph)
|
|
||||||
|
mlst.append('<a href="javascript:npage();"><svg id="svgimg" viewBox="0 0 %d %d" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" style="background-color:#FFF;border:1px solid black;">' % (pp.pw, pp.ph))
|
||||||
if (pp.gid != None):
|
if (pp.gid != None):
|
||||||
ml += '<defs>\n'
|
mlst.append('<defs>\n')
|
||||||
gdefs = pp.getGlyphs()
|
gdefs = pp.getGlyphs()
|
||||||
for j in xrange(0,len(gdefs)):
|
for j in xrange(0,len(gdefs)):
|
||||||
ml += gdefs[j]
|
mlst.append(gdefs[j])
|
||||||
ml += '</defs>\n'
|
mlst.append('</defs>\n')
|
||||||
img = pp.getImages()
|
img = pp.getImages()
|
||||||
if (img != None):
|
if (img != None):
|
||||||
for j in xrange(0,len(img)):
|
for j in xrange(0,len(img)):
|
||||||
ml += img[j]
|
mlst.append(img[j])
|
||||||
if (pp.gid != None):
|
if (pp.gid != None):
|
||||||
for j in xrange(0,len(pp.gid)):
|
for j in xrange(0,len(pp.gid)):
|
||||||
ml += '<use xlink:href="#gl%d" x="%d" y="%d" />\n' % (pp.gid[j], pp.gx[j], pp.gy[j])
|
mlst.append('<use xlink:href="#gl%d" x="%d" y="%d" />\n' % (pp.gid[j], pp.gx[j], pp.gy[j]))
|
||||||
if (img == None or len(img) == 0) and (pp.gid == None or len(pp.gid) == 0):
|
if (img == None or len(img) == 0) and (pp.gid == None or len(pp.gid) == 0):
|
||||||
ml += '<text x="10" y="10" font-family="Helvetica" font-size="100" stroke="black">This page intentionally left blank.</text>\n<text x="10" y="110" font-family="Helvetica" font-size="50" stroke="black">Until this notice unintentionally gave it content. (gensvg.py)</text>\n'
|
xpos = "%d" % (pp.pw // 3)
|
||||||
|
ypos = "%d" % (pp.ph // 3)
|
||||||
|
mlst.append('<text x="' + xpos + '" y="' + ypos + '" font-size="' + meta_array['fontSize'] + '" font-family="Helvetica" stroke="black">This page intentionally left blank.</text>\n')
|
||||||
if (raw) :
|
if (raw) :
|
||||||
ml += '</svg>'
|
mlst.append('</svg>')
|
||||||
else :
|
else :
|
||||||
ml += '</svg></a>\n'
|
mlst.append('</svg></a>\n')
|
||||||
if (counter == numfiles - 1) :
|
if nextid == None:
|
||||||
ml += '<a href="javascript:npage();"><svg id="nextsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"></svg></a>\n'
|
mlst.append('<a href="javascript:npage();"><svg id="nextsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"></svg></a>\n')
|
||||||
else :
|
else :
|
||||||
ml += '<a href="javascript:npage();"><svg id="nextsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"><polygon points="5,5,5,295,95,150" fill="#AAAAAA" /></svg></a>\n'
|
mlst.append('<a href="javascript:npage();"><svg id="nextsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"><polygon points="5,5,5,295,95,150" fill="#AAAAAA" /></svg></a>\n')
|
||||||
ml += '</div>\n'
|
mlst.append('</div>\n')
|
||||||
ml += '<div><a href="javascript:zoomin();">zoom in</a> - <a href="javascript:zoomout();">zoom out</a></div>\n'
|
mlst.append('<div><a href="javascript:zoomin();">zoom in</a> - <a href="javascript:zoomout();">zoom out</a></div>\n')
|
||||||
ml += '</body>\n'
|
mlst.append('</body>\n')
|
||||||
ml += '</html>\n'
|
mlst.append('</html>\n')
|
||||||
return ml
|
return "".join(mlst)
|
||||||
|
|
||||||
|
|||||||
@@ -19,6 +19,8 @@ import getopt
|
|||||||
from struct import pack
|
from struct import pack
|
||||||
from struct import unpack
|
from struct import unpack
|
||||||
|
|
||||||
|
class TpzDRMError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
# local support routines
|
# local support routines
|
||||||
if 'calibre' in sys.modules:
|
if 'calibre' in sys.modules:
|
||||||
@@ -37,6 +39,8 @@ else :
|
|||||||
import flatxml2svg
|
import flatxml2svg
|
||||||
import stylexml2css
|
import stylexml2css
|
||||||
|
|
||||||
|
# global switch
|
||||||
|
buildXML = False
|
||||||
|
|
||||||
# Get a 7 bit encoded number from a file
|
# Get a 7 bit encoded number from a file
|
||||||
def readEncodedNumber(file):
|
def readEncodedNumber(file):
|
||||||
@@ -114,7 +118,8 @@ class Dictionary(object):
|
|||||||
return self.stable[self.pos]
|
return self.stable[self.pos]
|
||||||
else:
|
else:
|
||||||
print "Error - %d outside of string table limits" % val
|
print "Error - %d outside of string table limits" % val
|
||||||
sys.exit(-1)
|
raise TpzDRMError('outside or string table limits')
|
||||||
|
# sys.exit(-1)
|
||||||
def getSize(self):
|
def getSize(self):
|
||||||
return self.size
|
return self.size
|
||||||
def getPos(self):
|
def getPos(self):
|
||||||
@@ -295,6 +300,7 @@ def generateBook(bookDir, raw, fixedimage):
|
|||||||
if not os.path.exists(svgDir) :
|
if not os.path.exists(svgDir) :
|
||||||
os.makedirs(svgDir)
|
os.makedirs(svgDir)
|
||||||
|
|
||||||
|
if buildXML:
|
||||||
xmlDir = os.path.join(bookDir,'xml')
|
xmlDir = os.path.join(bookDir,'xml')
|
||||||
if not os.path.exists(xmlDir) :
|
if not os.path.exists(xmlDir) :
|
||||||
os.makedirs(xmlDir)
|
os.makedirs(xmlDir)
|
||||||
@@ -345,23 +351,38 @@ def generateBook(bookDir, raw, fixedimage):
|
|||||||
authors = authors.replace('>','>')
|
authors = authors.replace('>','>')
|
||||||
meta_array['Authors'] = authors
|
meta_array['Authors'] = authors
|
||||||
|
|
||||||
|
if buildXML:
|
||||||
xname = os.path.join(xmlDir, 'metadata.xml')
|
xname = os.path.join(xmlDir, 'metadata.xml')
|
||||||
metastr = ''
|
mlst = []
|
||||||
for key in meta_array:
|
for key in meta_array:
|
||||||
metastr += '<meta name="' + key + '" content="' + meta_array[key] + '" />\n'
|
mlst.append('<meta name="' + key + '" content="' + meta_array[key] + '" />\n')
|
||||||
|
metastr = "".join(mlst)
|
||||||
|
mlst = None
|
||||||
file(xname, 'wb').write(metastr)
|
file(xname, 'wb').write(metastr)
|
||||||
|
|
||||||
print 'Processing StyleSheet'
|
print 'Processing StyleSheet'
|
||||||
|
|
||||||
# get some scaling info from metadata to use while processing styles
|
# get some scaling info from metadata to use while processing styles
|
||||||
|
# and first page info
|
||||||
|
|
||||||
fontsize = '135'
|
fontsize = '135'
|
||||||
if 'fontSize' in meta_array:
|
if 'fontSize' in meta_array:
|
||||||
fontsize = meta_array['fontSize']
|
fontsize = meta_array['fontSize']
|
||||||
|
|
||||||
# also get the size of a normal text page
|
# also get the size of a normal text page
|
||||||
|
# get the total number of pages unpacked as a safety check
|
||||||
|
filenames = os.listdir(pageDir)
|
||||||
|
numfiles = len(filenames)
|
||||||
|
|
||||||
spage = '1'
|
spage = '1'
|
||||||
if 'firstTextPage' in meta_array:
|
if 'firstTextPage' in meta_array:
|
||||||
spage = meta_array['firstTextPage']
|
spage = meta_array['firstTextPage']
|
||||||
pnum = int(spage)
|
pnum = int(spage)
|
||||||
|
if pnum >= numfiles or pnum < 0:
|
||||||
|
# metadata is wrong so just select a page near the front
|
||||||
|
# 10% of the book to get a normal text page
|
||||||
|
pnum = int(0.10 * numfiles)
|
||||||
|
# print "first normal text page is", spage
|
||||||
|
|
||||||
# get page height and width from first text page for use in stylesheet scaling
|
# get page height and width from first text page for use in stylesheet scaling
|
||||||
pname = 'page%04d.dat' % (pnum + 1)
|
pname = 'page%04d.dat' % (pnum + 1)
|
||||||
@@ -371,12 +392,37 @@ def generateBook(bookDir, raw, fixedimage):
|
|||||||
(ph, pw) = getPageDim(flat_xml)
|
(ph, pw) = getPageDim(flat_xml)
|
||||||
if (ph == '-1') or (ph == '0') : ph = '11000'
|
if (ph == '-1') or (ph == '0') : ph = '11000'
|
||||||
if (pw == '-1') or (pw == '0') : pw = '8500'
|
if (pw == '-1') or (pw == '0') : pw = '8500'
|
||||||
|
meta_array['pageHeight'] = ph
|
||||||
|
meta_array['pageWidth'] = pw
|
||||||
|
if 'fontSize' not in meta_array.keys():
|
||||||
|
meta_array['fontSize'] = fontsize
|
||||||
|
|
||||||
# print ' ', 'other0000.dat'
|
# process other.dat for css info and for map of page files to svg images
|
||||||
|
# this map is needed because some pages actually are made up of multiple
|
||||||
|
# pageXXXX.xml files
|
||||||
xname = os.path.join(bookDir, 'style.css')
|
xname = os.path.join(bookDir, 'style.css')
|
||||||
flat_xml = convert2xml.fromData(dict, otherFile)
|
flat_xml = convert2xml.fromData(dict, otherFile)
|
||||||
|
|
||||||
|
# extract info.original.pid to get original page information
|
||||||
|
pageIDMap = {}
|
||||||
|
pageidnums = stylexml2css.getpageIDMap(flat_xml)
|
||||||
|
if len(pageidnums) == 0:
|
||||||
|
filenames = os.listdir(pageDir)
|
||||||
|
numfiles = len(filenames)
|
||||||
|
for k in range(numfiles):
|
||||||
|
pageidnums.append(k)
|
||||||
|
# create a map from page ids to list of page file nums to process for that page
|
||||||
|
for i in range(len(pageidnums)):
|
||||||
|
id = pageidnums[i]
|
||||||
|
if id in pageIDMap.keys():
|
||||||
|
pageIDMap[id].append(i)
|
||||||
|
else:
|
||||||
|
pageIDMap[id] = [i]
|
||||||
|
|
||||||
|
# now get the css info
|
||||||
cssstr , classlst = stylexml2css.convert2CSS(flat_xml, fontsize, ph, pw)
|
cssstr , classlst = stylexml2css.convert2CSS(flat_xml, fontsize, ph, pw)
|
||||||
file(xname, 'wb').write(cssstr)
|
file(xname, 'wb').write(cssstr)
|
||||||
|
if buildXML:
|
||||||
xname = os.path.join(xmlDir, 'other0000.xml')
|
xname = os.path.join(xmlDir, 'other0000.xml')
|
||||||
file(xname, 'wb').write(convert2xml.getXML(dict, otherFile))
|
file(xname, 'wb').write(convert2xml.getXML(dict, otherFile))
|
||||||
|
|
||||||
@@ -398,6 +444,7 @@ def generateBook(bookDir, raw, fixedimage):
|
|||||||
fname = os.path.join(glyphsDir,filename)
|
fname = os.path.join(glyphsDir,filename)
|
||||||
flat_xml = convert2xml.fromData(dict, fname)
|
flat_xml = convert2xml.fromData(dict, fname)
|
||||||
|
|
||||||
|
if buildXML:
|
||||||
xname = os.path.join(xmlDir, filename.replace('.dat','.xml'))
|
xname = os.path.join(xmlDir, filename.replace('.dat','.xml'))
|
||||||
file(xname, 'wb').write(convert2xml.getXML(dict, fname))
|
file(xname, 'wb').write(convert2xml.getXML(dict, fname))
|
||||||
|
|
||||||
@@ -414,108 +461,188 @@ def generateBook(bookDir, raw, fixedimage):
|
|||||||
glyfile.close()
|
glyfile.close()
|
||||||
print " "
|
print " "
|
||||||
|
|
||||||
|
|
||||||
# start up the html
|
# start up the html
|
||||||
|
# also build up tocentries while processing html
|
||||||
htmlFileName = "book.html"
|
htmlFileName = "book.html"
|
||||||
htmlstr = '<?xml version="1.0" encoding="utf-8"?>\n'
|
hlst = []
|
||||||
htmlstr += '<!DOCTYPE HTML PUBLIC "-//W3C//DTD XHTML 1.1 Strict//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11-strict.dtd">\n'
|
hlst.append('<?xml version="1.0" encoding="utf-8"?>\n')
|
||||||
htmlstr += '<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en">\n'
|
hlst.append('<!DOCTYPE HTML PUBLIC "-//W3C//DTD XHTML 1.1 Strict//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11-strict.dtd">\n')
|
||||||
htmlstr += '<head>\n'
|
hlst.append('<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en">\n')
|
||||||
htmlstr += '<meta http-equiv="content-type" content="text/html; charset=utf-8"/>\n'
|
hlst.append('<head>\n')
|
||||||
htmlstr += '<title>' + meta_array['Title'] + ' by ' + meta_array['Authors'] + '</title>\n'
|
hlst.append('<meta http-equiv="content-type" content="text/html; charset=utf-8"/>\n')
|
||||||
htmlstr += '<meta name="Author" content="' + meta_array['Authors'] + '" />\n'
|
hlst.append('<title>' + meta_array['Title'] + ' by ' + meta_array['Authors'] + '</title>\n')
|
||||||
htmlstr += '<meta name="Title" content="' + meta_array['Title'] + '" />\n'
|
hlst.append('<meta name="Author" content="' + meta_array['Authors'] + '" />\n')
|
||||||
|
hlst.append('<meta name="Title" content="' + meta_array['Title'] + '" />\n')
|
||||||
if 'ASIN' in meta_array:
|
if 'ASIN' in meta_array:
|
||||||
htmlstr += '<meta name="ASIN" content="' + meta_array['ASIN'] + '" />\n'
|
hlst.append('<meta name="ASIN" content="' + meta_array['ASIN'] + '" />\n')
|
||||||
if 'GUID' in meta_array:
|
if 'GUID' in meta_array:
|
||||||
htmlstr += '<meta name="GUID" content="' + meta_array['GUID'] + '" />\n'
|
hlst.append('<meta name="GUID" content="' + meta_array['GUID'] + '" />\n')
|
||||||
htmlstr += '<link href="style.css" rel="stylesheet" type="text/css" />\n'
|
hlst.append('<link href="style.css" rel="stylesheet" type="text/css" />\n')
|
||||||
htmlstr += '</head>\n<body>\n'
|
hlst.append('</head>\n<body>\n')
|
||||||
|
|
||||||
print 'Processing Pages'
|
print 'Processing Pages'
|
||||||
# Books are at 1440 DPI. This is rendering at twice that size for
|
# Books are at 1440 DPI. This is rendering at twice that size for
|
||||||
# readability when rendering to the screen.
|
# readability when rendering to the screen.
|
||||||
scaledpi = 1440.0
|
scaledpi = 1440.0
|
||||||
|
|
||||||
svgindex = '<?xml version="1.0" encoding="utf-8"?>\n'
|
|
||||||
svgindex += '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n'
|
|
||||||
svgindex += '<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" >'
|
|
||||||
svgindex += '<head>\n'
|
|
||||||
svgindex += '<title>' + meta_array['Title'] + '</title>\n'
|
|
||||||
svgindex += '<meta name="Author" content="' + meta_array['Authors'] + '" />\n'
|
|
||||||
svgindex += '<meta name="Title" content="' + meta_array['Title'] + '" />\n'
|
|
||||||
if 'ASIN' in meta_array:
|
|
||||||
svgindex += '<meta name="ASIN" content="' + meta_array['ASIN'] + '" />\n'
|
|
||||||
if 'GUID' in meta_array:
|
|
||||||
svgindex += '<meta name="GUID" content="' + meta_array['GUID'] + '" />\n'
|
|
||||||
svgindex += '</head>\n'
|
|
||||||
svgindex += '<body>\n'
|
|
||||||
|
|
||||||
filenames = os.listdir(pageDir)
|
filenames = os.listdir(pageDir)
|
||||||
filenames = sorted(filenames)
|
filenames = sorted(filenames)
|
||||||
numfiles = len(filenames)
|
numfiles = len(filenames)
|
||||||
counter = 0
|
|
||||||
|
xmllst = []
|
||||||
|
elst = []
|
||||||
|
|
||||||
for filename in filenames:
|
for filename in filenames:
|
||||||
# print ' ', filename
|
# print ' ', filename
|
||||||
print ".",
|
print ".",
|
||||||
|
|
||||||
fname = os.path.join(pageDir,filename)
|
fname = os.path.join(pageDir,filename)
|
||||||
flat_xml = convert2xml.fromData(dict, fname)
|
flat_xml = convert2xml.fromData(dict, fname)
|
||||||
|
|
||||||
|
# keep flat_xml for later svg processing
|
||||||
|
xmllst.append(flat_xml)
|
||||||
|
|
||||||
|
if buildXML:
|
||||||
xname = os.path.join(xmlDir, filename.replace('.dat','.xml'))
|
xname = os.path.join(xmlDir, filename.replace('.dat','.xml'))
|
||||||
file(xname, 'wb').write(convert2xml.getXML(dict, fname))
|
file(xname, 'wb').write(convert2xml.getXML(dict, fname))
|
||||||
|
|
||||||
# first get the html
|
# first get the html
|
||||||
htmlstr += flatxml2html.convert2HTML(flat_xml, classlst, fname, bookDir, gd, fixedimage)
|
pagehtml, tocinfo = flatxml2html.convert2HTML(flat_xml, classlst, fname, bookDir, gd, fixedimage)
|
||||||
|
elst.append(tocinfo)
|
||||||
|
hlst.append(pagehtml)
|
||||||
|
|
||||||
# now get the svg image of the page
|
# finish up the html string and output it
|
||||||
svgxml = flatxml2svg.convert2SVG(gd, flat_xml, counter, numfiles, svgDir, raw, meta_array, scaledpi)
|
hlst.append('</body>\n</html>\n')
|
||||||
|
htmlstr = "".join(hlst)
|
||||||
|
hlst = None
|
||||||
|
file(os.path.join(bookDir, htmlFileName), 'wb').write(htmlstr)
|
||||||
|
|
||||||
|
print " "
|
||||||
|
print 'Extracting Table of Contents from Amazon OCR'
|
||||||
|
|
||||||
|
# first create a table of contents file for the svg images
|
||||||
|
tlst = []
|
||||||
|
tlst.append('<?xml version="1.0" encoding="utf-8"?>\n')
|
||||||
|
tlst.append('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n')
|
||||||
|
tlst.append('<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" >')
|
||||||
|
tlst.append('<head>\n')
|
||||||
|
tlst.append('<title>' + meta_array['Title'] + '</title>\n')
|
||||||
|
tlst.append('<meta name="Author" content="' + meta_array['Authors'] + '" />\n')
|
||||||
|
tlst.append('<meta name="Title" content="' + meta_array['Title'] + '" />\n')
|
||||||
|
if 'ASIN' in meta_array:
|
||||||
|
tlst.append('<meta name="ASIN" content="' + meta_array['ASIN'] + '" />\n')
|
||||||
|
if 'GUID' in meta_array:
|
||||||
|
tlst.append('<meta name="GUID" content="' + meta_array['GUID'] + '" />\n')
|
||||||
|
tlst.append('</head>\n')
|
||||||
|
tlst.append('<body>\n')
|
||||||
|
|
||||||
|
tlst.append('<h2>Table of Contents</h2>\n')
|
||||||
|
start = pageidnums[0]
|
||||||
if (raw):
|
if (raw):
|
||||||
pfile = open(os.path.join(svgDir,filename.replace('.dat','.svg')), 'w')
|
startname = 'page%04d.svg' % start
|
||||||
svgindex += '<a href="svg/page%04d.svg">Page %d</a>\n' % (counter, counter)
|
|
||||||
else:
|
else:
|
||||||
pfile = open(os.path.join(svgDir,'page%04d.xhtml' % counter), 'w')
|
startname = 'page%04d.xhtml' % start
|
||||||
svgindex += '<a href="svg/page%04d.xhtml">Page %d</a>\n' % (counter, counter)
|
|
||||||
|
tlst.append('<h3><a href="' + startname + '">Start of Book</a></h3>\n')
|
||||||
|
# build up a table of contents for the svg xhtml output
|
||||||
|
tocentries = "".join(elst)
|
||||||
|
elst = None
|
||||||
|
toclst = tocentries.split('\n')
|
||||||
|
toclst.pop()
|
||||||
|
for entry in toclst:
|
||||||
|
print entry
|
||||||
|
title, pagenum = entry.split('|')
|
||||||
|
id = pageidnums[int(pagenum)]
|
||||||
|
if (raw):
|
||||||
|
fname = 'page%04d.svg' % id
|
||||||
|
else:
|
||||||
|
fname = 'page%04d.xhtml' % id
|
||||||
|
tlst.append('<h3><a href="'+ fname + '">' + title + '</a></h3>\n')
|
||||||
|
tlst.append('</body>\n')
|
||||||
|
tlst.append('</html>\n')
|
||||||
|
tochtml = "".join(tlst)
|
||||||
|
file(os.path.join(svgDir, 'toc.xhtml'), 'wb').write(tochtml)
|
||||||
|
|
||||||
|
|
||||||
|
# now create index_svg.xhtml that points to all required files
|
||||||
|
slst = []
|
||||||
|
slst.append('<?xml version="1.0" encoding="utf-8"?>\n')
|
||||||
|
slst.append('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n')
|
||||||
|
slst.append('<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" >')
|
||||||
|
slst.append('<head>\n')
|
||||||
|
slst.append('<title>' + meta_array['Title'] + '</title>\n')
|
||||||
|
slst.append('<meta name="Author" content="' + meta_array['Authors'] + '" />\n')
|
||||||
|
slst.append('<meta name="Title" content="' + meta_array['Title'] + '" />\n')
|
||||||
|
if 'ASIN' in meta_array:
|
||||||
|
slst.append('<meta name="ASIN" content="' + meta_array['ASIN'] + '" />\n')
|
||||||
|
if 'GUID' in meta_array:
|
||||||
|
slst.append('<meta name="GUID" content="' + meta_array['GUID'] + '" />\n')
|
||||||
|
slst.append('</head>\n')
|
||||||
|
slst.append('<body>\n')
|
||||||
|
|
||||||
|
print "Building svg images of each book page"
|
||||||
|
slst.append('<h2>List of Pages</h2>\n')
|
||||||
|
slst.append('<div>\n')
|
||||||
|
idlst = sorted(pageIDMap.keys())
|
||||||
|
numids = len(idlst)
|
||||||
|
cnt = len(idlst)
|
||||||
|
previd = None
|
||||||
|
for j in range(cnt):
|
||||||
|
pageid = idlst[j]
|
||||||
|
if j < cnt - 1:
|
||||||
|
nextid = idlst[j+1]
|
||||||
|
else:
|
||||||
|
nextid = None
|
||||||
|
print '.',
|
||||||
|
pagelst = pageIDMap[pageid]
|
||||||
|
flst = []
|
||||||
|
for page in pagelst:
|
||||||
|
flst.append(xmllst[page])
|
||||||
|
flat_svg = "".join(flst)
|
||||||
|
flst=None
|
||||||
|
svgxml = flatxml2svg.convert2SVG(gd, flat_svg, pageid, previd, nextid, svgDir, raw, meta_array, scaledpi)
|
||||||
|
if (raw) :
|
||||||
|
pfile = open(os.path.join(svgDir,'page%04d.svg' % pageid),'w')
|
||||||
|
slst.append('<a href="svg/page%04d.svg">Page %d</a>\n' % (pageid, pageid))
|
||||||
|
else :
|
||||||
|
pfile = open(os.path.join(svgDir,'page%04d.xhtml' % pageid), 'w')
|
||||||
|
slst.append('<a href="svg/page%04d.xhtml">Page %d</a>\n' % (pageid, pageid))
|
||||||
|
previd = pageid
|
||||||
pfile.write(svgxml)
|
pfile.write(svgxml)
|
||||||
pfile.close()
|
pfile.close()
|
||||||
|
|
||||||
counter += 1
|
counter += 1
|
||||||
|
slst.append('</div>\n')
|
||||||
|
slst.append('<h2><a href="svg/toc.xhtml">Table of Contents</a></h2>\n')
|
||||||
|
slst.append('</body>\n</html>\n')
|
||||||
|
svgindex = "".join(slst)
|
||||||
|
slst = None
|
||||||
|
file(os.path.join(bookDir, 'index_svg.xhtml'), 'wb').write(svgindex)
|
||||||
|
|
||||||
print " "
|
print " "
|
||||||
|
|
||||||
# finish up the html string and output it
|
|
||||||
htmlstr += '</body>\n</html>\n'
|
|
||||||
file(os.path.join(bookDir, htmlFileName), 'wb').write(htmlstr)
|
|
||||||
|
|
||||||
# finish up the svg index string and output it
|
|
||||||
svgindex += '</body>\n</html>\n'
|
|
||||||
file(os.path.join(bookDir, 'index_svg.xhtml'), 'wb').write(svgindex)
|
|
||||||
|
|
||||||
# build the opf file
|
# build the opf file
|
||||||
opfname = os.path.join(bookDir, 'book.opf')
|
opfname = os.path.join(bookDir, 'book.opf')
|
||||||
opfstr = '<?xml version="1.0" encoding="utf-8"?>\n'
|
olst = []
|
||||||
opfstr += '<package xmlns="http://www.idpf.org/2007/opf" unique-identifier="guid_id">\n'
|
olst.append('<?xml version="1.0" encoding="utf-8"?>\n')
|
||||||
|
olst.append('<package xmlns="http://www.idpf.org/2007/opf" unique-identifier="guid_id">\n')
|
||||||
# adding metadata
|
# adding metadata
|
||||||
opfstr += ' <metadata xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:opf="http://www.idpf.org/2007/opf">\n'
|
olst.append(' <metadata xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:opf="http://www.idpf.org/2007/opf">\n')
|
||||||
if 'GUID' in meta_array:
|
if 'GUID' in meta_array:
|
||||||
opfstr += ' <dc:identifier opf:scheme="GUID" id="guid_id">' + meta_array['GUID'] + '</dc:identifier>\n'
|
olst.append(' <dc:identifier opf:scheme="GUID" id="guid_id">' + meta_array['GUID'] + '</dc:identifier>\n')
|
||||||
if 'ASIN' in meta_array:
|
if 'ASIN' in meta_array:
|
||||||
opfstr += ' <dc:identifier opf:scheme="ASIN">' + meta_array['ASIN'] + '</dc:identifier>\n'
|
olst.append(' <dc:identifier opf:scheme="ASIN">' + meta_array['ASIN'] + '</dc:identifier>\n')
|
||||||
if 'oASIN' in meta_array:
|
if 'oASIN' in meta_array:
|
||||||
opfstr += ' <dc:identifier opf:scheme="oASIN">' + meta_array['oASIN'] + '</dc:identifier>\n'
|
olst.append(' <dc:identifier opf:scheme="oASIN">' + meta_array['oASIN'] + '</dc:identifier>\n')
|
||||||
opfstr += ' <dc:title>' + meta_array['Title'] + '</dc:title>\n'
|
olst.append(' <dc:title>' + meta_array['Title'] + '</dc:title>\n')
|
||||||
opfstr += ' <dc:creator opf:role="aut">' + meta_array['Authors'] + '</dc:creator>\n'
|
olst.append(' <dc:creator opf:role="aut">' + meta_array['Authors'] + '</dc:creator>\n')
|
||||||
opfstr += ' <dc:language>en</dc:language>\n'
|
olst.append(' <dc:language>en</dc:language>\n')
|
||||||
opfstr += ' <dc:date>' + meta_array['UpdateTime'] + '</dc:date>\n'
|
olst.append(' <dc:date>' + meta_array['UpdateTime'] + '</dc:date>\n')
|
||||||
if isCover:
|
if isCover:
|
||||||
opfstr += ' <meta name="cover" content="bookcover"/>\n'
|
olst.append(' <meta name="cover" content="bookcover"/>\n')
|
||||||
opfstr += ' </metadata>\n'
|
olst.append(' </metadata>\n')
|
||||||
opfstr += '<manifest>\n'
|
olst.append('<manifest>\n')
|
||||||
opfstr += ' <item id="book" href="book.html" media-type="application/xhtml+xml"/>\n'
|
olst.append(' <item id="book" href="book.html" media-type="application/xhtml+xml"/>\n')
|
||||||
opfstr += ' <item id="stylesheet" href="style.css" media-type="text/css"/>\n'
|
olst.append(' <item id="stylesheet" href="style.css" media-type="text/css"/>\n')
|
||||||
# adding image files to manifest
|
# adding image files to manifest
|
||||||
filenames = os.listdir(imgDir)
|
filenames = os.listdir(imgDir)
|
||||||
filenames = sorted(filenames)
|
filenames = sorted(filenames)
|
||||||
@@ -525,17 +652,19 @@ def generateBook(bookDir, raw, fixedimage):
|
|||||||
imgext = 'jpeg'
|
imgext = 'jpeg'
|
||||||
if imgext == '.svg':
|
if imgext == '.svg':
|
||||||
imgext = 'svg+xml'
|
imgext = 'svg+xml'
|
||||||
opfstr += ' <item id="' + imgname + '" href="img/' + filename + '" media-type="image/' + imgext + '"/>\n'
|
olst.append(' <item id="' + imgname + '" href="img/' + filename + '" media-type="image/' + imgext + '"/>\n')
|
||||||
if isCover:
|
if isCover:
|
||||||
opfstr += ' <item id="bookcover" href="cover.jpg" media-type="image/jpeg" />\n'
|
olst.append(' <item id="bookcover" href="cover.jpg" media-type="image/jpeg" />\n')
|
||||||
opfstr += '</manifest>\n'
|
olst.append('</manifest>\n')
|
||||||
# adding spine
|
# adding spine
|
||||||
opfstr += '<spine>\n <itemref idref="book" />\n</spine>\n'
|
olst.append('<spine>\n <itemref idref="book" />\n</spine>\n')
|
||||||
if isCover:
|
if isCover:
|
||||||
opfstr += ' <guide>\n'
|
olst.append(' <guide>\n')
|
||||||
opfstr += ' <reference href="cover.jpg" type="cover" title="Cover"/>\n'
|
olst.append(' <reference href="cover.jpg" type="cover" title="Cover"/>\n')
|
||||||
opfstr += ' </guide>\n'
|
olst.append(' </guide>\n')
|
||||||
opfstr += '</package>\n'
|
olst.append('</package>\n')
|
||||||
|
opfstr = "".join(olst)
|
||||||
|
olst = None
|
||||||
file(opfname, 'wb').write(opfstr)
|
file(opfname, 'wb').write(opfstr)
|
||||||
|
|
||||||
print 'Processing Complete'
|
print 'Processing Complete'
|
||||||
@@ -556,7 +685,6 @@ def usage():
|
|||||||
|
|
||||||
def main(argv):
|
def main(argv):
|
||||||
bookDir = ''
|
bookDir = ''
|
||||||
|
|
||||||
if len(argv) == 0:
|
if len(argv) == 0:
|
||||||
argv = sys.argv
|
argv = sys.argv
|
||||||
|
|
||||||
@@ -573,7 +701,7 @@ def main(argv):
|
|||||||
return 1
|
return 1
|
||||||
|
|
||||||
raw = 0
|
raw = 0
|
||||||
fixedimage = False
|
fixedimage = True
|
||||||
for o, a in opts:
|
for o, a in opts:
|
||||||
if o =="-h":
|
if o =="-h":
|
||||||
usage()
|
usage()
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ from __future__ import with_statement
|
|||||||
# and many many others
|
# and many many others
|
||||||
|
|
||||||
|
|
||||||
__version__ = '3.1'
|
__version__ = '4.2'
|
||||||
|
|
||||||
class Unbuffered:
|
class Unbuffered:
|
||||||
def __init__(self, stream):
|
def __init__(self, stream):
|
||||||
@@ -32,6 +32,9 @@ import sys
|
|||||||
import os, csv, getopt
|
import os, csv, getopt
|
||||||
import string
|
import string
|
||||||
import re
|
import re
|
||||||
|
import traceback
|
||||||
|
|
||||||
|
buildXML = False
|
||||||
|
|
||||||
class DrmException(Exception):
|
class DrmException(Exception):
|
||||||
pass
|
pass
|
||||||
@@ -74,9 +77,11 @@ def cleanup_name(name):
|
|||||||
return one
|
return one
|
||||||
|
|
||||||
def decryptBook(infile, outdir, k4, kInfoFiles, serials, pids):
|
def decryptBook(infile, outdir, k4, kInfoFiles, serials, pids):
|
||||||
|
global buildXML
|
||||||
|
|
||||||
# handle the obvious cases at the beginning
|
# handle the obvious cases at the beginning
|
||||||
if not os.path.isfile(infile):
|
if not os.path.isfile(infile):
|
||||||
print "Error: Input file does not exist"
|
print >>sys.stderr, ('K4MobiDeDrm v%(__version__)s\n' % globals()) + "Error: Input file does not exist"
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
mobi = True
|
mobi = True
|
||||||
@@ -95,8 +100,14 @@ def decryptBook(infile, outdir, k4, kInfoFiles, serials, pids):
|
|||||||
print "Processing Book: ", title
|
print "Processing Book: ", title
|
||||||
filenametitle = cleanup_name(title)
|
filenametitle = cleanup_name(title)
|
||||||
outfilename = bookname
|
outfilename = bookname
|
||||||
if len(bookname)>4 and len(filenametitle)>4 and bookname[:4] != filenametitle[:4]:
|
if len(outfilename)<=8 or len(filenametitle)<=8:
|
||||||
outfilename = outfilename + "_" + filenametitle
|
outfilename = outfilename + "_" + filenametitle
|
||||||
|
elif outfilename[:8] != filenametitle[:8]:
|
||||||
|
outfilename = outfilename[:8] + "_" + filenametitle
|
||||||
|
|
||||||
|
# avoid excessively long file names
|
||||||
|
if len(outfilename)>150:
|
||||||
|
outfilename = outfilename[:150]
|
||||||
|
|
||||||
# build pid list
|
# build pid list
|
||||||
md1, md2 = mb.getPIDMetaInfo()
|
md1, md2 = mb.getPIDMetaInfo()
|
||||||
@@ -106,16 +117,21 @@ def decryptBook(infile, outdir, k4, kInfoFiles, serials, pids):
|
|||||||
mb.processBook(pidlst)
|
mb.processBook(pidlst)
|
||||||
|
|
||||||
except mobidedrm.DrmException, e:
|
except mobidedrm.DrmException, e:
|
||||||
print "Error: " + str(e) + "\nDRM Removal Failed.\n"
|
print >>sys.stderr, ('K4MobiDeDrm v%(__version__)s\n' % globals()) + "Error: " + str(e) + "\nDRM Removal Failed.\n"
|
||||||
return 1
|
return 1
|
||||||
except topazextract.TpzDRMError, e:
|
except topazextract.TpzDRMError, e:
|
||||||
print "Error: " + str(e) + "\nDRM Removal Failed.\n"
|
print >>sys.stderr, ('K4MobiDeDrm v%(__version__)s\n' % globals()) + "Error: " + str(e) + "\nDRM Removal Failed.\n"
|
||||||
return 1
|
return 1
|
||||||
except Exception, e:
|
except Exception, e:
|
||||||
print "Error: " + str(e) + "\nDRM Removal Failed.\n"
|
print >>sys.stderr, ('K4MobiDeDrm v%(__version__)s\n' % globals()) + "Error: " + str(e) + "\nDRM Removal Failed.\n"
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
if mobi:
|
if mobi:
|
||||||
|
if mb.getPrintReplica():
|
||||||
|
outfile = os.path.join(outdir, outfilename + '_nodrm' + '.azw4')
|
||||||
|
elif mb.getMobiVersion() >= 8:
|
||||||
|
outfile = os.path.join(outdir, outfilename + '_nodrm' + '.azw3')
|
||||||
|
else:
|
||||||
outfile = os.path.join(outdir, outfilename + '_nodrm' + '.mobi')
|
outfile = os.path.join(outdir, outfilename + '_nodrm' + '.mobi')
|
||||||
mb.getMobiFile(outfile)
|
mb.getMobiFile(outfile)
|
||||||
return 0
|
return 0
|
||||||
@@ -125,10 +141,11 @@ def decryptBook(infile, outdir, k4, kInfoFiles, serials, pids):
|
|||||||
zipname = os.path.join(outdir, outfilename + '_nodrm' + '.htmlz')
|
zipname = os.path.join(outdir, outfilename + '_nodrm' + '.htmlz')
|
||||||
mb.getHTMLZip(zipname)
|
mb.getHTMLZip(zipname)
|
||||||
|
|
||||||
print " Creating SVG HTMLZ Archive"
|
print " Creating SVG ZIP Archive"
|
||||||
zipname = os.path.join(outdir, outfilename + '_SVG' + '.htmlz')
|
zipname = os.path.join(outdir, outfilename + '_SVG' + '.zip')
|
||||||
mb.getSVGZip(zipname)
|
mb.getSVGZip(zipname)
|
||||||
|
|
||||||
|
if buildXML:
|
||||||
print " Creating XML ZIP Archive"
|
print " Creating XML ZIP Archive"
|
||||||
zipname = os.path.join(outdir, outfilename + '_XML' + '.zip')
|
zipname = os.path.join(outdir, outfilename + '_XML' + '.zip')
|
||||||
mb.getXMLZip(zipname)
|
mb.getXMLZip(zipname)
|
||||||
@@ -158,7 +175,6 @@ def main(argv=sys.argv):
|
|||||||
print ('K4MobiDeDrm v%(__version__)s '
|
print ('K4MobiDeDrm v%(__version__)s '
|
||||||
'provided by the work of many including DiapDealer, SomeUpdates, IHeartCabbages, CMBDTC, Skindle, DarkReverser, ApprenticeAlf, etc .' % globals())
|
'provided by the work of many including DiapDealer, SomeUpdates, IHeartCabbages, CMBDTC, Skindle, DarkReverser, ApprenticeAlf, etc .' % globals())
|
||||||
|
|
||||||
print ' '
|
|
||||||
try:
|
try:
|
||||||
opts, args = getopt.getopt(sys.argv[1:], "k:p:s:")
|
opts, args = getopt.getopt(sys.argv[1:], "k:p:s:")
|
||||||
except getopt.GetoptError, err:
|
except getopt.GetoptError, err:
|
||||||
@@ -196,4 +212,3 @@ def main(argv=sys.argv):
|
|||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
sys.stdout=Unbuffered(sys.stdout)
|
sys.stdout=Unbuffered(sys.stdout)
|
||||||
sys.exit(main())
|
sys.exit(main())
|
||||||
|
|
||||||
|
|||||||
@@ -22,16 +22,16 @@ else:
|
|||||||
|
|
||||||
if inCalibre:
|
if inCalibre:
|
||||||
if sys.platform.startswith('win'):
|
if sys.platform.startswith('win'):
|
||||||
from calibre_plugins.k4mobidedrm.k4pcutils import getKindleInfoFiles, getDBfromFile, GetUserName, GetVolumeSerialNumber
|
from calibre_plugins.k4mobidedrm.k4pcutils import getKindleInfoFiles, getDBfromFile, GetUserName, GetIDString
|
||||||
|
|
||||||
if sys.platform.startswith('darwin'):
|
if sys.platform.startswith('darwin'):
|
||||||
from calibre_plugins.k4mobidedrm.k4mutils import getKindleInfoFiles, getDBfromFile, GetUserName, GetVolumeSerialNumber
|
from calibre_plugins.k4mobidedrm.k4mutils import getKindleInfoFiles, getDBfromFile, GetUserName, GetIDString
|
||||||
else:
|
else:
|
||||||
if sys.platform.startswith('win'):
|
if sys.platform.startswith('win'):
|
||||||
from k4pcutils import getKindleInfoFiles, getDBfromFile, GetUserName, GetVolumeSerialNumber
|
from k4pcutils import getKindleInfoFiles, getDBfromFile, GetUserName, GetIDString
|
||||||
|
|
||||||
if sys.platform.startswith('darwin'):
|
if sys.platform.startswith('darwin'):
|
||||||
from k4mutils import getKindleInfoFiles, getDBfromFile, GetUserName, GetVolumeSerialNumber
|
from k4mutils import getKindleInfoFiles, getDBfromFile, GetUserName, GetIDString
|
||||||
|
|
||||||
|
|
||||||
charMap1 = "n5Pr6St7Uv8Wx9YzAb0Cd1Ef2Gh3Jk4M"
|
charMap1 = "n5Pr6St7Uv8Wx9YzAb0Cd1Ef2Gh3Jk4M"
|
||||||
@@ -218,14 +218,14 @@ def getK4Pids(pidlst, rec209, token, kInfoFile):
|
|||||||
print "Keys not found in " + kInfoFile
|
print "Keys not found in " + kInfoFile
|
||||||
return pidlst
|
return pidlst
|
||||||
|
|
||||||
# Get the HDD serial
|
# Get the ID string used
|
||||||
encodedSystemVolumeSerialNumber = encodeHash(GetVolumeSerialNumber(),charMap1)
|
encodedIDString = encodeHash(GetIDString(),charMap1)
|
||||||
|
|
||||||
# Get the current user name
|
# Get the current user name
|
||||||
encodedUsername = encodeHash(GetUserName(),charMap1)
|
encodedUsername = encodeHash(GetUserName(),charMap1)
|
||||||
|
|
||||||
# concat, hash and encode to calculate the DSN
|
# concat, hash and encode to calculate the DSN
|
||||||
DSN = encode(SHA1(MazamaRandomNumber+encodedSystemVolumeSerialNumber+encodedUsername),charMap1)
|
DSN = encode(SHA1(MazamaRandomNumber+encodedIDString+encodedUsername),charMap1)
|
||||||
|
|
||||||
# Compute the device PID (for which I can tell, is used for nothing).
|
# Compute the device PID (for which I can tell, is used for nothing).
|
||||||
table = generatePidEncryptionTable()
|
table = generatePidEncryptionTable()
|
||||||
|
|||||||
BIN
Calibre_Plugins/K4MobiDeDRM_plugin/libalfcrypto.dylib
Normal file
BIN
Calibre_Plugins/K4MobiDeDRM_plugin/libalfcrypto.dylib
Normal file
Binary file not shown.
BIN
Calibre_Plugins/K4MobiDeDRM_plugin/libalfcrypto32.so
Normal file
BIN
Calibre_Plugins/K4MobiDeDRM_plugin/libalfcrypto32.so
Normal file
Binary file not shown.
BIN
Calibre_Plugins/K4MobiDeDRM_plugin/libalfcrypto64.so
Normal file
BIN
Calibre_Plugins/K4MobiDeDRM_plugin/libalfcrypto64.so
Normal file
Binary file not shown.
68
Calibre_Plugins/K4MobiDeDRM_plugin/pbkdf2.py
Normal file
68
Calibre_Plugins/K4MobiDeDRM_plugin/pbkdf2.py
Normal file
@@ -0,0 +1,68 @@
|
|||||||
|
# A simple implementation of pbkdf2 using stock python modules. See RFC2898
|
||||||
|
# for details. Basically, it derives a key from a password and salt.
|
||||||
|
|
||||||
|
# Copyright 2004 Matt Johnston <matt @ ucc asn au>
|
||||||
|
# Copyright 2009 Daniel Holth <dholth@fastmail.fm>
|
||||||
|
# This code may be freely used and modified for any purpose.
|
||||||
|
|
||||||
|
# Revision history
|
||||||
|
# v0.1 October 2004 - Initial release
|
||||||
|
# v0.2 8 March 2007 - Make usable with hashlib in Python 2.5 and use
|
||||||
|
# v0.3 "" the correct digest_size rather than always 20
|
||||||
|
# v0.4 Oct 2009 - Rescue from chandler svn, test and optimize.
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import hmac
|
||||||
|
from struct import pack
|
||||||
|
try:
|
||||||
|
# only in python 2.5
|
||||||
|
import hashlib
|
||||||
|
sha = hashlib.sha1
|
||||||
|
md5 = hashlib.md5
|
||||||
|
sha256 = hashlib.sha256
|
||||||
|
except ImportError: # pragma: NO COVERAGE
|
||||||
|
# fallback
|
||||||
|
import sha
|
||||||
|
import md5
|
||||||
|
|
||||||
|
# this is what you want to call.
|
||||||
|
def pbkdf2( password, salt, itercount, keylen, hashfn = sha ):
|
||||||
|
try:
|
||||||
|
# depending whether the hashfn is from hashlib or sha/md5
|
||||||
|
digest_size = hashfn().digest_size
|
||||||
|
except TypeError: # pragma: NO COVERAGE
|
||||||
|
digest_size = hashfn.digest_size
|
||||||
|
# l - number of output blocks to produce
|
||||||
|
l = keylen / digest_size
|
||||||
|
if keylen % digest_size != 0:
|
||||||
|
l += 1
|
||||||
|
|
||||||
|
h = hmac.new( password, None, hashfn )
|
||||||
|
|
||||||
|
T = ""
|
||||||
|
for i in range(1, l+1):
|
||||||
|
T += pbkdf2_F( h, salt, itercount, i )
|
||||||
|
|
||||||
|
return T[0: keylen]
|
||||||
|
|
||||||
|
def xorstr( a, b ):
|
||||||
|
if len(a) != len(b):
|
||||||
|
raise ValueError("xorstr(): lengths differ")
|
||||||
|
return ''.join((chr(ord(x)^ord(y)) for x, y in zip(a, b)))
|
||||||
|
|
||||||
|
def prf( h, data ):
|
||||||
|
hm = h.copy()
|
||||||
|
hm.update( data )
|
||||||
|
return hm.digest()
|
||||||
|
|
||||||
|
# Helper as per the spec. h is a hmac which has been created seeded with the
|
||||||
|
# password, it will be copy()ed and not modified.
|
||||||
|
def pbkdf2_F( h, salt, itercount, blocknum ):
|
||||||
|
U = prf( h, salt + pack('>i',blocknum ) )
|
||||||
|
T = U
|
||||||
|
|
||||||
|
for i in range(2, itercount+1):
|
||||||
|
U = prf( h, U )
|
||||||
|
T = xorstr( T, U )
|
||||||
|
|
||||||
|
return T
|
||||||
@@ -6,6 +6,7 @@ import csv
|
|||||||
import sys
|
import sys
|
||||||
import os
|
import os
|
||||||
import getopt
|
import getopt
|
||||||
|
import re
|
||||||
from struct import pack
|
from struct import pack
|
||||||
from struct import unpack
|
from struct import unpack
|
||||||
|
|
||||||
@@ -81,6 +82,21 @@ class DocParser(object):
|
|||||||
pos = foundpos + 1
|
pos = foundpos + 1
|
||||||
return startpos
|
return startpos
|
||||||
|
|
||||||
|
# returns a vector of integers for the tagpath
|
||||||
|
def getData(self, tagpath, pos, end, clean=False):
|
||||||
|
if clean:
|
||||||
|
digits_only = re.compile(r'''([0-9]+)''')
|
||||||
|
argres=[]
|
||||||
|
(foundat, argt) = self.findinDoc(tagpath, pos, end)
|
||||||
|
if (argt != None) and (len(argt) > 0) :
|
||||||
|
argList = argt.split('|')
|
||||||
|
for strval in argList:
|
||||||
|
if clean:
|
||||||
|
m = re.search(digits_only, strval)
|
||||||
|
if m != None:
|
||||||
|
strval = m.group()
|
||||||
|
argres.append(int(strval))
|
||||||
|
return argres
|
||||||
|
|
||||||
def process(self):
|
def process(self):
|
||||||
|
|
||||||
@@ -237,7 +253,11 @@ def convert2CSS(flatxml, fontsize, ph, pw):
|
|||||||
|
|
||||||
# create a document parser
|
# create a document parser
|
||||||
dp = DocParser(flatxml, fontsize, ph, pw)
|
dp = DocParser(flatxml, fontsize, ph, pw)
|
||||||
|
|
||||||
csspage = dp.process()
|
csspage = dp.process()
|
||||||
|
|
||||||
return csspage
|
return csspage
|
||||||
|
|
||||||
|
|
||||||
|
def getpageIDMap(flatxml):
|
||||||
|
dp = DocParser(flatxml, 0, 0, 0)
|
||||||
|
pageidnumbers = dp.getData('info.original.pid', 0, -1, True)
|
||||||
|
return pageidnumbers
|
||||||
|
|||||||
@@ -146,4 +146,3 @@ class Process(object):
|
|||||||
self.__quit = True
|
self.__quit = True
|
||||||
self.__inputsem.release()
|
self.__inputsem.release()
|
||||||
self.__lock.release()
|
self.__lock.release()
|
||||||
|
|
||||||
@@ -16,10 +16,13 @@ if 'calibre' in sys.modules:
|
|||||||
else:
|
else:
|
||||||
inCalibre = False
|
inCalibre = False
|
||||||
|
|
||||||
|
buildXML = False
|
||||||
|
|
||||||
import os, csv, getopt
|
import os, csv, getopt
|
||||||
import zlib, zipfile, tempfile, shutil
|
import zlib, zipfile, tempfile, shutil
|
||||||
from struct import pack
|
from struct import pack
|
||||||
from struct import unpack
|
from struct import unpack
|
||||||
|
from alfcrypto import Topaz_Cipher
|
||||||
|
|
||||||
class TpzDRMError(Exception):
|
class TpzDRMError(Exception):
|
||||||
pass
|
pass
|
||||||
@@ -81,25 +84,28 @@ def bookReadString(fo):
|
|||||||
|
|
||||||
# Context initialisation for the Topaz Crypto
|
# Context initialisation for the Topaz Crypto
|
||||||
def topazCryptoInit(key):
|
def topazCryptoInit(key):
|
||||||
ctx1 = 0x0CAFFE19E
|
return Topaz_Cipher().ctx_init(key)
|
||||||
for keyChar in key:
|
|
||||||
keyByte = ord(keyChar)
|
# ctx1 = 0x0CAFFE19E
|
||||||
ctx2 = ctx1
|
# for keyChar in key:
|
||||||
ctx1 = ((((ctx1 >>2) * (ctx1 >>7))&0xFFFFFFFF) ^ (keyByte * keyByte * 0x0F902007)& 0xFFFFFFFF )
|
# keyByte = ord(keyChar)
|
||||||
return [ctx1,ctx2]
|
# ctx2 = ctx1
|
||||||
|
# ctx1 = ((((ctx1 >>2) * (ctx1 >>7))&0xFFFFFFFF) ^ (keyByte * keyByte * 0x0F902007)& 0xFFFFFFFF )
|
||||||
|
# return [ctx1,ctx2]
|
||||||
|
|
||||||
# decrypt data with the context prepared by topazCryptoInit()
|
# decrypt data with the context prepared by topazCryptoInit()
|
||||||
def topazCryptoDecrypt(data, ctx):
|
def topazCryptoDecrypt(data, ctx):
|
||||||
ctx1 = ctx[0]
|
return Topaz_Cipher().decrypt(data, ctx)
|
||||||
ctx2 = ctx[1]
|
# ctx1 = ctx[0]
|
||||||
plainText = ""
|
# ctx2 = ctx[1]
|
||||||
for dataChar in data:
|
# plainText = ""
|
||||||
dataByte = ord(dataChar)
|
# for dataChar in data:
|
||||||
m = (dataByte ^ ((ctx1 >> 3) &0xFF) ^ ((ctx2<<3) & 0xFF)) &0xFF
|
# dataByte = ord(dataChar)
|
||||||
ctx2 = ctx1
|
# m = (dataByte ^ ((ctx1 >> 3) &0xFF) ^ ((ctx2<<3) & 0xFF)) &0xFF
|
||||||
ctx1 = (((ctx1 >> 2) * (ctx1 >> 7)) &0xFFFFFFFF) ^((m * m * 0x0F902007) &0xFFFFFFFF)
|
# ctx2 = ctx1
|
||||||
plainText += chr(m)
|
# ctx1 = (((ctx1 >> 2) * (ctx1 >> 7)) &0xFFFFFFFF) ^((m * m * 0x0F902007) &0xFFFFFFFF)
|
||||||
return plainText
|
# plainText += chr(m)
|
||||||
|
# return plainText
|
||||||
|
|
||||||
# Decrypt data with the PID
|
# Decrypt data with the PID
|
||||||
def decryptRecord(data,PID):
|
def decryptRecord(data,PID):
|
||||||
@@ -140,6 +146,7 @@ class TopazBook:
|
|||||||
def __init__(self, filename):
|
def __init__(self, filename):
|
||||||
self.fo = file(filename, 'rb')
|
self.fo = file(filename, 'rb')
|
||||||
self.outdir = tempfile.mkdtemp()
|
self.outdir = tempfile.mkdtemp()
|
||||||
|
# self.outdir = 'rawdat'
|
||||||
self.bookPayloadOffset = 0
|
self.bookPayloadOffset = 0
|
||||||
self.bookHeaderRecords = {}
|
self.bookHeaderRecords = {}
|
||||||
self.bookMetadata = {}
|
self.bookMetadata = {}
|
||||||
@@ -380,6 +387,7 @@ def usage(progname):
|
|||||||
|
|
||||||
# Main
|
# Main
|
||||||
def main(argv=sys.argv):
|
def main(argv=sys.argv):
|
||||||
|
global buildXML
|
||||||
progname = os.path.basename(argv[0])
|
progname = os.path.basename(argv[0])
|
||||||
k4 = False
|
k4 = False
|
||||||
pids = []
|
pids = []
|
||||||
@@ -438,9 +446,10 @@ def main(argv=sys.argv):
|
|||||||
tb.getHTMLZip(zipname)
|
tb.getHTMLZip(zipname)
|
||||||
|
|
||||||
print " Creating SVG ZIP Archive"
|
print " Creating SVG ZIP Archive"
|
||||||
zipname = os.path.join(outdir, bookname + '_SVG' + '.htmlz')
|
zipname = os.path.join(outdir, bookname + '_SVG' + '.zip')
|
||||||
tb.getSVGZip(zipname)
|
tb.getSVGZip(zipname)
|
||||||
|
|
||||||
|
if buildXML:
|
||||||
print " Creating XML ZIP Archive"
|
print " Creating XML ZIP Archive"
|
||||||
zipname = os.path.join(outdir, bookname + '_XML' + '.zip')
|
zipname = os.path.join(outdir, bookname + '_XML' + '.zip')
|
||||||
tb.getXMLZip(zipname)
|
tb.getXMLZip(zipname)
|
||||||
@@ -450,12 +459,12 @@ def main(argv=sys.argv):
|
|||||||
|
|
||||||
except TpzDRMError, e:
|
except TpzDRMError, e:
|
||||||
print str(e)
|
print str(e)
|
||||||
tb.cleanup()
|
# tb.cleanup()
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
except Exception, e:
|
except Exception, e:
|
||||||
print str(e)
|
print str(e)
|
||||||
tb.cleanup
|
# tb.cleanup
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
return 0
|
return 0
|
||||||
@@ -464,4 +473,3 @@ def main(argv=sys.argv):
|
|||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
sys.stdout=Unbuffered(sys.stdout)
|
sys.stdout=Unbuffered(sys.stdout)
|
||||||
sys.exit(main())
|
sys.exit(main())
|
||||||
|
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ This plugin is meant to decrypt Adobe Digital Edition PDFs that are protected wi
|
|||||||
|
|
||||||
I had the much easier job of converting them to a Calibre plugin.
|
I had the much easier job of converting them to a Calibre plugin.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
This plugin is meant to decrypt Adobe Digital Edition PDFs that are protected with Adobe's Adept encryption. It is meant to function without having to install any dependencies... other than having Calibre installed, of course. It will still work if you have Python, PyCrypto and/or OpenSSL already installed, but they aren't necessary.
|
This plugin is meant to decrypt Adobe Digital Edition PDFs that are protected with Adobe's Adept encryption. It is meant to function without having to install any dependencies... other than having Calibre installed, of course. It will still work if you have Python, PyCrypto and/or OpenSSL already installed, but they aren't necessary.
|
||||||
|
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ This plugin supersedes MobiDeDRM, K4DeDRM, and K4PCDeDRM and K4X plugins. If yo
|
|||||||
|
|
||||||
|
|
||||||
This plugin is meant to remove the DRM from .prc, .azw, .azw1, and .tpz ebooks. Calibre can then convert them to whatever format you desire. It is meant to function without having to install any dependencies except for Calibre being on your same machine and in the same account as your "Kindle for PC" or "Kindle for Mac" application if you are going to remove the DRM from those types of books.
|
This plugin is meant to remove the DRM from .prc, .azw, .azw1, and .tpz ebooks. Calibre can then convert them to whatever format you desire. It is meant to function without having to install any dependencies except for Calibre being on your same machine and in the same account as your "Kindle for PC" or "Kindle for Mac" application if you are going to remove the DRM from those types of books.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Installation:
|
Installation:
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ All credit given to The Dark Reverser for the original standalone script. I had
|
|||||||
All credit given to The Dark Reverser for the original standalone script. I had the much easier job of converting it to a Calibre plugin.
|
All credit given to The Dark Reverser for the original standalone script. I had the much easier job of converting it to a Calibre plugin.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
This plugin is meant to convert secure Ereader files (PDB) to unsecured PMLZ files. Calibre can then convert it to whatever format you desire. It is meant to function without having to install any dependencies... other than having Calibre installed, of course. I've included the psyco libraries (compiled for each platform) for speed. If your system can use them, great! Otherwise, they won't be used and things will just work slower.
|
This plugin is meant to convert secure Ereader files (PDB) to unsecured PMLZ files. Calibre can then convert it to whatever format you desire. It is meant to function without having to install any dependencies... other than having Calibre installed, of course. I've included the psyco libraries (compiled for each platform) for speed. If your system can use them, great! Otherwise, they won't be used and things will just work slower.
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ with Adobe's Adept encryption. It is meant to function without having to install
|
|||||||
I had the much easier job of converting them to a Calibre plugin.
|
I had the much easier job of converting them to a Calibre plugin.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
This plugin is meant to decrypt Barnes & Noble Epubs that are protected
|
This plugin is meant to decrypt Barnes & Noble Epubs that are protected
|
||||||
|
|
||||||
with Adobe's Adept encryption. It is meant to function without having to install any dependencies... other than having Calibre installed, of course. It will still work if you have Python and PyCrypto already installed, but they aren't necessary.
|
with Adobe's Adept encryption. It is meant to function without having to install any dependencies... other than having Calibre installed, of course. It will still work if you have Python and PyCrypto already installed, but they aren't necessary.
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ This plugin is meant to decrypt Adobe Digital Edition Epubs that are protected w
|
|||||||
|
|
||||||
I had the much easier job of converting them to a Calibre plugin.
|
I had the much easier job of converting them to a Calibre plugin.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
This plugin is meant to decrypt Adobe Digital Edition Epubs that are protected with Adobe's Adept encryption. It is meant to function without having to install any dependencies... other than having Calibre installed, of course. It will still work if you have Python and PyCrypto already installed, but they aren't necessary.
|
This plugin is meant to decrypt Adobe Digital Edition Epubs that are protected with Adobe's Adept encryption. It is meant to function without having to install any dependencies... other than having Calibre installed, of course. It will still work if you have Python and PyCrypto already installed, but they aren't necessary.
|
||||||
|
|
||||||
|
|||||||
Binary file not shown.
@@ -47,7 +47,7 @@ class eRdrDeDRM(FileTypePlugin):
|
|||||||
Credit given to The Dark Reverser for the original standalone script.'
|
Credit given to The Dark Reverser for the original standalone script.'
|
||||||
supported_platforms = ['linux', 'osx', 'windows'] # Platforms this plugin will run on
|
supported_platforms = ['linux', 'osx', 'windows'] # Platforms this plugin will run on
|
||||||
author = 'DiapDealer' # The author of this plugin
|
author = 'DiapDealer' # The author of this plugin
|
||||||
version = (0, 0, 5) # The version number of this plugin
|
version = (0, 0, 6) # The version number of this plugin
|
||||||
file_types = set(['pdb']) # The file types that this plugin will be applied to
|
file_types = set(['pdb']) # The file types that this plugin will be applied to
|
||||||
on_import = True # Run this plugin during the import
|
on_import = True # Run this plugin during the import
|
||||||
minimum_calibre_version = (0, 7, 55)
|
minimum_calibre_version = (0, 7, 55)
|
||||||
@@ -114,7 +114,7 @@ class eRdrDeDRM(FileTypePlugin):
|
|||||||
|
|
||||||
print " Decoding File"
|
print " Decoding File"
|
||||||
sect = erdr2pml.Sectionizer(infile, 'PNRdPPrs')
|
sect = erdr2pml.Sectionizer(infile, 'PNRdPPrs')
|
||||||
er = erdr2pml.EreaderProcessor(sect.loadSection, name, cc)
|
er = erdr2pml.EreaderProcessor(sect, name, cc)
|
||||||
|
|
||||||
if er.getNumImages() > 0:
|
if er.getNumImages() > 0:
|
||||||
print " Extracting images"
|
print " Extracting images"
|
||||||
|
|||||||
@@ -59,8 +59,11 @@
|
|||||||
# 0.18 - on Windows try PyCrypto first and OpenSSL next
|
# 0.18 - on Windows try PyCrypto first and OpenSSL next
|
||||||
# 0.19 - Modify the interface to allow use of import
|
# 0.19 - Modify the interface to allow use of import
|
||||||
# 0.20 - modify to allow use inside new interface for calibre plugins
|
# 0.20 - modify to allow use inside new interface for calibre plugins
|
||||||
|
# 0.21 - Support eReader (drm) version 11.
|
||||||
|
# - Don't reject dictionary format.
|
||||||
|
# - Ignore sidebars for dictionaries (different format?)
|
||||||
|
|
||||||
__version__='0.20'
|
__version__='0.21'
|
||||||
|
|
||||||
class Unbuffered:
|
class Unbuffered:
|
||||||
def __init__(self, stream):
|
def __init__(self, stream):
|
||||||
@@ -140,11 +143,17 @@ logging.basicConfig()
|
|||||||
|
|
||||||
|
|
||||||
class Sectionizer(object):
|
class Sectionizer(object):
|
||||||
|
bkType = "Book"
|
||||||
|
|
||||||
def __init__(self, filename, ident):
|
def __init__(self, filename, ident):
|
||||||
self.contents = file(filename, 'rb').read()
|
self.contents = file(filename, 'rb').read()
|
||||||
self.header = self.contents[0:72]
|
self.header = self.contents[0:72]
|
||||||
self.num_sections, = struct.unpack('>H', self.contents[76:78])
|
self.num_sections, = struct.unpack('>H', self.contents[76:78])
|
||||||
|
# Dictionary or normal content (TODO: Not hard-coded)
|
||||||
if self.header[0x3C:0x3C+8] != ident:
|
if self.header[0x3C:0x3C+8] != ident:
|
||||||
|
if self.header[0x3C:0x3C+8] == "PDctPPrs":
|
||||||
|
self.bkType = "Dict"
|
||||||
|
else:
|
||||||
raise ValueError('Invalid file format')
|
raise ValueError('Invalid file format')
|
||||||
self.sections = []
|
self.sections = []
|
||||||
for i in xrange(self.num_sections):
|
for i in xrange(self.num_sections):
|
||||||
@@ -182,15 +191,15 @@ def deXOR(text, sp, table):
|
|||||||
return r
|
return r
|
||||||
|
|
||||||
class EreaderProcessor(object):
|
class EreaderProcessor(object):
|
||||||
def __init__(self, section_reader, username, creditcard):
|
def __init__(self, sect, username, creditcard):
|
||||||
self.section_reader = section_reader
|
self.section_reader = sect.loadSection
|
||||||
data = section_reader(0)
|
data = self.section_reader(0)
|
||||||
version, = struct.unpack('>H', data[0:2])
|
version, = struct.unpack('>H', data[0:2])
|
||||||
self.version = version
|
self.version = version
|
||||||
logging.info('eReader file format version %s', version)
|
logging.info('eReader file format version %s', version)
|
||||||
if version != 272 and version != 260 and version != 259:
|
if version != 272 and version != 260 and version != 259:
|
||||||
raise ValueError('incorrect eReader version %d (error 1)' % version)
|
raise ValueError('incorrect eReader version %d (error 1)' % version)
|
||||||
data = section_reader(1)
|
data = self.section_reader(1)
|
||||||
self.data = data
|
self.data = data
|
||||||
des = Des(fixKey(data[0:8]))
|
des = Des(fixKey(data[0:8]))
|
||||||
cookie_shuf, cookie_size = struct.unpack('>LL', des.decrypt(data[-8:]))
|
cookie_shuf, cookie_size = struct.unpack('>LL', des.decrypt(data[-8:]))
|
||||||
@@ -219,9 +228,15 @@ class EreaderProcessor(object):
|
|||||||
self.num_text_pages = struct.unpack('>H', r[2:4])[0] - 1
|
self.num_text_pages = struct.unpack('>H', r[2:4])[0] - 1
|
||||||
self.num_image_pages = struct.unpack('>H', r[26:26+2])[0]
|
self.num_image_pages = struct.unpack('>H', r[26:26+2])[0]
|
||||||
self.first_image_page = struct.unpack('>H', r[24:24+2])[0]
|
self.first_image_page = struct.unpack('>H', r[24:24+2])[0]
|
||||||
|
# Default values
|
||||||
|
self.num_footnote_pages = 0
|
||||||
|
self.num_sidebar_pages = 0
|
||||||
|
self.first_footnote_page = -1
|
||||||
|
self.first_sidebar_page = -1
|
||||||
if self.version == 272:
|
if self.version == 272:
|
||||||
self.num_footnote_pages = struct.unpack('>H', r[46:46+2])[0]
|
self.num_footnote_pages = struct.unpack('>H', r[46:46+2])[0]
|
||||||
self.first_footnote_page = struct.unpack('>H', r[44:44+2])[0]
|
self.first_footnote_page = struct.unpack('>H', r[44:44+2])[0]
|
||||||
|
if (sect.bkType == "Book"):
|
||||||
self.num_sidebar_pages = struct.unpack('>H', r[38:38+2])[0]
|
self.num_sidebar_pages = struct.unpack('>H', r[38:38+2])[0]
|
||||||
self.first_sidebar_page = struct.unpack('>H', r[36:36+2])[0]
|
self.first_sidebar_page = struct.unpack('>H', r[36:36+2])[0]
|
||||||
# self.num_bookinfo_pages = struct.unpack('>H', r[34:34+2])[0]
|
# self.num_bookinfo_pages = struct.unpack('>H', r[34:34+2])[0]
|
||||||
@@ -239,10 +254,8 @@ class EreaderProcessor(object):
|
|||||||
self.xortable_size = struct.unpack('>H', r[42:42+2])[0]
|
self.xortable_size = struct.unpack('>H', r[42:42+2])[0]
|
||||||
self.xortable = self.data[self.xortable_offset:self.xortable_offset + self.xortable_size]
|
self.xortable = self.data[self.xortable_offset:self.xortable_offset + self.xortable_size]
|
||||||
else:
|
else:
|
||||||
self.num_footnote_pages = 0
|
# Nothing needs to be done
|
||||||
self.num_sidebar_pages = 0
|
pass
|
||||||
self.first_footnote_page = -1
|
|
||||||
self.first_sidebar_page = -1
|
|
||||||
# self.num_bookinfo_pages = 0
|
# self.num_bookinfo_pages = 0
|
||||||
# self.num_chapter_pages = 0
|
# self.num_chapter_pages = 0
|
||||||
# self.num_link_pages = 0
|
# self.num_link_pages = 0
|
||||||
@@ -267,10 +280,14 @@ class EreaderProcessor(object):
|
|||||||
encrypted_key_sha = r[44:44+20]
|
encrypted_key_sha = r[44:44+20]
|
||||||
encrypted_key = r[64:64+8]
|
encrypted_key = r[64:64+8]
|
||||||
elif version == 260:
|
elif version == 260:
|
||||||
if drm_sub_version != 13:
|
if drm_sub_version != 13 and drm_sub_version != 11:
|
||||||
raise ValueError('incorrect eReader version %d (error 3)' % drm_sub_version)
|
raise ValueError('incorrect eReader version %d (error 3)' % drm_sub_version)
|
||||||
|
if drm_sub_version == 13:
|
||||||
encrypted_key = r[44:44+8]
|
encrypted_key = r[44:44+8]
|
||||||
encrypted_key_sha = r[52:52+20]
|
encrypted_key_sha = r[52:52+20]
|
||||||
|
else:
|
||||||
|
encrypted_key = r[64:64+8]
|
||||||
|
encrypted_key_sha = r[44:44+20]
|
||||||
elif version == 272:
|
elif version == 272:
|
||||||
encrypted_key = r[172:172+8]
|
encrypted_key = r[172:172+8]
|
||||||
encrypted_key_sha = r[56:56+20]
|
encrypted_key_sha = r[56:56+20]
|
||||||
@@ -356,6 +373,12 @@ class EreaderProcessor(object):
|
|||||||
r += fmarker
|
r += fmarker
|
||||||
fnote_ids = fnote_ids[id_len+4:]
|
fnote_ids = fnote_ids[id_len+4:]
|
||||||
|
|
||||||
|
# TODO: Handle dictionary index (?) pages - which are also marked as
|
||||||
|
# sidebar_pages (?). For now dictionary sidebars are ignored
|
||||||
|
# For dictionaries - record 0 is null terminated strings, followed by
|
||||||
|
# blocks of around 62000 bytes and a final block. Not sure of the
|
||||||
|
# encoding
|
||||||
|
|
||||||
# now handle sidebar pages
|
# now handle sidebar pages
|
||||||
if self.num_sidebar_pages > 0:
|
if self.num_sidebar_pages > 0:
|
||||||
r += '\n'
|
r += '\n'
|
||||||
@@ -368,7 +391,7 @@ class EreaderProcessor(object):
|
|||||||
id_len = ord(sbar_ids[2])
|
id_len = ord(sbar_ids[2])
|
||||||
id = sbar_ids[3:3+id_len]
|
id = sbar_ids[3:3+id_len]
|
||||||
smarker = '<sidebar id="%s">\n' % id
|
smarker = '<sidebar id="%s">\n' % id
|
||||||
smarker += zlib.decompress(des.decrypt(self.section_reader(self.first_footnote_page + i)))
|
smarker += zlib.decompress(des.decrypt(self.section_reader(self.first_sidebar_page + i)))
|
||||||
smarker += '\n</sidebar>\n'
|
smarker += '\n</sidebar>\n'
|
||||||
r += smarker
|
r += smarker
|
||||||
sbar_ids = sbar_ids[id_len+4:]
|
sbar_ids = sbar_ids[id_len+4:]
|
||||||
@@ -389,7 +412,7 @@ def convertEreaderToPml(infile, name, cc, outdir):
|
|||||||
bookname = os.path.splitext(os.path.basename(infile))[0]
|
bookname = os.path.splitext(os.path.basename(infile))[0]
|
||||||
print " Decoding File"
|
print " Decoding File"
|
||||||
sect = Sectionizer(infile, 'PNRdPPrs')
|
sect = Sectionizer(infile, 'PNRdPPrs')
|
||||||
er = EreaderProcessor(sect.loadSection, name, cc)
|
er = EreaderProcessor(sect, name, cc)
|
||||||
|
|
||||||
if er.getNumImages() > 0:
|
if er.getNumImages() > 0:
|
||||||
print " Extracting images"
|
print " Extracting images"
|
||||||
|
|||||||
Binary file not shown.
@@ -1,11 +1,13 @@
|
|||||||
# standlone set of Mac OSX specific routines needed for K4DeDRM
|
# standlone set of Mac OSX specific routines needed for KindleBooks
|
||||||
|
|
||||||
from __future__ import with_statement
|
from __future__ import with_statement
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
import os
|
import os
|
||||||
|
import os.path
|
||||||
|
import re
|
||||||
|
import copy
|
||||||
import subprocess
|
import subprocess
|
||||||
|
|
||||||
from struct import pack, unpack, unpack_from
|
from struct import pack, unpack, unpack_from
|
||||||
|
|
||||||
class DrmException(Exception):
|
class DrmException(Exception):
|
||||||
@@ -23,6 +25,25 @@ def _load_crypto_libcrypto():
|
|||||||
raise DrmException('libcrypto not found')
|
raise DrmException('libcrypto not found')
|
||||||
libcrypto = CDLL(libcrypto)
|
libcrypto = CDLL(libcrypto)
|
||||||
|
|
||||||
|
# From OpenSSL's crypto aes header
|
||||||
|
#
|
||||||
|
# AES_ENCRYPT 1
|
||||||
|
# AES_DECRYPT 0
|
||||||
|
# AES_MAXNR 14 (in bytes)
|
||||||
|
# AES_BLOCK_SIZE 16 (in bytes)
|
||||||
|
#
|
||||||
|
# struct aes_key_st {
|
||||||
|
# unsigned long rd_key[4 *(AES_MAXNR + 1)];
|
||||||
|
# int rounds;
|
||||||
|
# };
|
||||||
|
# typedef struct aes_key_st AES_KEY;
|
||||||
|
#
|
||||||
|
# int AES_set_decrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key);
|
||||||
|
#
|
||||||
|
# note: the ivec string, and output buffer are both mutable
|
||||||
|
# void AES_cbc_encrypt(const unsigned char *in, unsigned char *out,
|
||||||
|
# const unsigned long length, const AES_KEY *key, unsigned char *ivec, const int enc);
|
||||||
|
|
||||||
AES_MAXNR = 14
|
AES_MAXNR = 14
|
||||||
c_char_pp = POINTER(c_char_p)
|
c_char_pp = POINTER(c_char_p)
|
||||||
c_int_p = POINTER(c_int)
|
c_int_p = POINTER(c_int)
|
||||||
@@ -41,6 +62,12 @@ def _load_crypto_libcrypto():
|
|||||||
|
|
||||||
AES_set_decrypt_key = F(c_int, 'AES_set_decrypt_key',[c_char_p, c_int, AES_KEY_p])
|
AES_set_decrypt_key = F(c_int, 'AES_set_decrypt_key',[c_char_p, c_int, AES_KEY_p])
|
||||||
|
|
||||||
|
# From OpenSSL's Crypto evp/p5_crpt2.c
|
||||||
|
#
|
||||||
|
# int PKCS5_PBKDF2_HMAC_SHA1(const char *pass, int passlen,
|
||||||
|
# const unsigned char *salt, int saltlen, int iter,
|
||||||
|
# int keylen, unsigned char *out);
|
||||||
|
|
||||||
PKCS5_PBKDF2_HMAC_SHA1 = F(c_int, 'PKCS5_PBKDF2_HMAC_SHA1',
|
PKCS5_PBKDF2_HMAC_SHA1 = F(c_int, 'PKCS5_PBKDF2_HMAC_SHA1',
|
||||||
[c_char_p, c_ulong, c_char_p, c_ulong, c_ulong, c_ulong, c_char_p])
|
[c_char_p, c_ulong, c_char_p, c_ulong, c_ulong, c_ulong, c_char_p])
|
||||||
|
|
||||||
@@ -48,7 +75,7 @@ def _load_crypto_libcrypto():
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
self._blocksize = 0
|
self._blocksize = 0
|
||||||
self._keyctx = None
|
self._keyctx = None
|
||||||
self.iv = 0
|
self._iv = 0
|
||||||
|
|
||||||
def set_decrypt_key(self, userkey, iv):
|
def set_decrypt_key(self, userkey, iv):
|
||||||
self._blocksize = len(userkey)
|
self._blocksize = len(userkey)
|
||||||
@@ -56,23 +83,24 @@ def _load_crypto_libcrypto():
|
|||||||
raise DrmException('AES improper key used')
|
raise DrmException('AES improper key used')
|
||||||
return
|
return
|
||||||
keyctx = self._keyctx = AES_KEY()
|
keyctx = self._keyctx = AES_KEY()
|
||||||
self.iv = iv
|
self._iv = iv
|
||||||
|
self._userkey = userkey
|
||||||
rv = AES_set_decrypt_key(userkey, len(userkey) * 8, keyctx)
|
rv = AES_set_decrypt_key(userkey, len(userkey) * 8, keyctx)
|
||||||
if rv < 0:
|
if rv < 0:
|
||||||
raise DrmException('Failed to initialize AES key')
|
raise DrmException('Failed to initialize AES key')
|
||||||
|
|
||||||
def decrypt(self, data):
|
def decrypt(self, data):
|
||||||
out = create_string_buffer(len(data))
|
out = create_string_buffer(len(data))
|
||||||
rv = AES_cbc_encrypt(data, out, len(data), self._keyctx, self.iv, 0)
|
mutable_iv = create_string_buffer(self._iv, len(self._iv))
|
||||||
|
keyctx = self._keyctx
|
||||||
|
rv = AES_cbc_encrypt(data, out, len(data), keyctx, mutable_iv, 0)
|
||||||
if rv == 0:
|
if rv == 0:
|
||||||
raise DrmException('AES decryption failed')
|
raise DrmException('AES decryption failed')
|
||||||
return out.raw
|
return out.raw
|
||||||
|
|
||||||
def keyivgen(self, passwd, salt):
|
def keyivgen(self, passwd, salt, iter, keylen):
|
||||||
saltlen = len(salt)
|
saltlen = len(salt)
|
||||||
passlen = len(passwd)
|
passlen = len(passwd)
|
||||||
iter = 0x3e8
|
|
||||||
keylen = 80
|
|
||||||
out = create_string_buffer(keylen)
|
out = create_string_buffer(keylen)
|
||||||
rv = PKCS5_PBKDF2_HMAC_SHA1(passwd, passlen, salt, saltlen, iter, keylen, out)
|
rv = PKCS5_PBKDF2_HMAC_SHA1(passwd, passlen, salt, saltlen, iter, keylen, out)
|
||||||
return out.raw
|
return out.raw
|
||||||
@@ -114,8 +142,13 @@ def SHA256(message):
|
|||||||
charMap1 = "n5Pr6St7Uv8Wx9YzAb0Cd1Ef2Gh3Jk4M"
|
charMap1 = "n5Pr6St7Uv8Wx9YzAb0Cd1Ef2Gh3Jk4M"
|
||||||
charMap2 = "ZB0bYyc1xDdW2wEV3Ff7KkPpL8UuGA4gz-Tme9Nn_tHh5SvXCsIiR6rJjQaqlOoM"
|
charMap2 = "ZB0bYyc1xDdW2wEV3Ff7KkPpL8UuGA4gz-Tme9Nn_tHh5SvXCsIiR6rJjQaqlOoM"
|
||||||
|
|
||||||
# For Future Reference from .kinf approach of K4PC
|
# For kinf approach of K4Mac 1.6.X or later
|
||||||
charMap5 = "AzB0bYyCeVvaZ3FfUuG4g-TtHh5SsIiR6rJjQq7KkPpL8lOoMm9Nn_c1XxDdW2wE"
|
# On K4PC charMap5 = "AzB0bYyCeVvaZ3FfUuG4g-TtHh5SsIiR6rJjQq7KkPpL8lOoMm9Nn_c1XxDdW2wE"
|
||||||
|
# For Mac they seem to re-use charMap2 here
|
||||||
|
charMap5 = charMap2
|
||||||
|
|
||||||
|
# new in K4M 1.9.X
|
||||||
|
testMap8 = "YvaZ3FfUm9Nn_c1XuG4yCAzB0beVg-TtHh5SsIiR6rJjQdW2wEq7KkPpL8lOoMxD"
|
||||||
|
|
||||||
|
|
||||||
def encode(data, map):
|
def encode(data, map):
|
||||||
@@ -144,7 +177,7 @@ def decode(data,map):
|
|||||||
result += pack("B",value)
|
result += pack("B",value)
|
||||||
return result
|
return result
|
||||||
|
|
||||||
# For Future Reference from .kinf approach of K4PC
|
# For K4M 1.6.X and later
|
||||||
# generate table of prime number less than or equal to int n
|
# generate table of prime number less than or equal to int n
|
||||||
def primes(n):
|
def primes(n):
|
||||||
if n==2: return [2]
|
if n==2: return [2]
|
||||||
@@ -166,7 +199,6 @@ def primes(n):
|
|||||||
return [2]+[x for x in s if x]
|
return [2]+[x for x in s if x]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# uses a sub process to get the Hard Drive Serial Number using ioreg
|
# uses a sub process to get the Hard Drive Serial Number using ioreg
|
||||||
# returns with the serial number of drive whose BSD Name is "disk0"
|
# returns with the serial number of drive whose BSD Name is "disk0"
|
||||||
def GetVolumeSerialNumber():
|
def GetVolumeSerialNumber():
|
||||||
@@ -196,20 +228,234 @@ def GetVolumeSerialNumber():
|
|||||||
foundIt = True
|
foundIt = True
|
||||||
break
|
break
|
||||||
if not foundIt:
|
if not foundIt:
|
||||||
sernum = '9999999999'
|
sernum = ''
|
||||||
return sernum
|
return sernum
|
||||||
|
|
||||||
|
def GetUserHomeAppSupKindleDirParitionName():
|
||||||
|
home = os.getenv('HOME')
|
||||||
|
dpath = home + '/Library/Application Support/Kindle'
|
||||||
|
cmdline = '/sbin/mount'
|
||||||
|
cmdline = cmdline.encode(sys.getfilesystemencoding())
|
||||||
|
p = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
|
||||||
|
out1, out2 = p.communicate()
|
||||||
|
reslst = out1.split('\n')
|
||||||
|
cnt = len(reslst)
|
||||||
|
disk = ''
|
||||||
|
foundIt = False
|
||||||
|
for j in xrange(cnt):
|
||||||
|
resline = reslst[j]
|
||||||
|
if resline.startswith('/dev'):
|
||||||
|
(devpart, mpath) = resline.split(' on ')
|
||||||
|
dpart = devpart[5:]
|
||||||
|
pp = mpath.find('(')
|
||||||
|
if pp >= 0:
|
||||||
|
mpath = mpath[:pp-1]
|
||||||
|
if dpath.startswith(mpath):
|
||||||
|
disk = dpart
|
||||||
|
return disk
|
||||||
|
|
||||||
|
# uses a sub process to get the UUID of the specified disk partition using ioreg
|
||||||
|
def GetDiskPartitionUUID(diskpart):
|
||||||
|
uuidnum = os.getenv('MYUUIDNUMBER')
|
||||||
|
if uuidnum != None:
|
||||||
|
return uuidnum
|
||||||
|
cmdline = '/usr/sbin/ioreg -l -S -w 0 -r -c AppleAHCIDiskDriver'
|
||||||
|
cmdline = cmdline.encode(sys.getfilesystemencoding())
|
||||||
|
p = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
|
||||||
|
out1, out2 = p.communicate()
|
||||||
|
reslst = out1.split('\n')
|
||||||
|
cnt = len(reslst)
|
||||||
|
bsdname = None
|
||||||
|
uuidnum = None
|
||||||
|
foundIt = False
|
||||||
|
nest = 0
|
||||||
|
uuidnest = -1
|
||||||
|
partnest = -2
|
||||||
|
for j in xrange(cnt):
|
||||||
|
resline = reslst[j]
|
||||||
|
if resline.find('{') >= 0:
|
||||||
|
nest += 1
|
||||||
|
if resline.find('}') >= 0:
|
||||||
|
nest -= 1
|
||||||
|
pp = resline.find('"UUID" = "')
|
||||||
|
if pp >= 0:
|
||||||
|
uuidnum = resline[pp+10:-1]
|
||||||
|
uuidnum = uuidnum.strip()
|
||||||
|
uuidnest = nest
|
||||||
|
if partnest == uuidnest and uuidnest > 0:
|
||||||
|
foundIt = True
|
||||||
|
break
|
||||||
|
bb = resline.find('"BSD Name" = "')
|
||||||
|
if bb >= 0:
|
||||||
|
bsdname = resline[bb+14:-1]
|
||||||
|
bsdname = bsdname.strip()
|
||||||
|
if (bsdname == diskpart):
|
||||||
|
partnest = nest
|
||||||
|
else :
|
||||||
|
partnest = -2
|
||||||
|
if partnest == uuidnest and partnest > 0:
|
||||||
|
foundIt = True
|
||||||
|
break
|
||||||
|
if nest == 0:
|
||||||
|
partnest = -2
|
||||||
|
uuidnest = -1
|
||||||
|
uuidnum = None
|
||||||
|
bsdname = None
|
||||||
|
if not foundIt:
|
||||||
|
uuidnum = ''
|
||||||
|
return uuidnum
|
||||||
|
|
||||||
|
def GetMACAddressMunged():
|
||||||
|
macnum = os.getenv('MYMACNUM')
|
||||||
|
if macnum != None:
|
||||||
|
return macnum
|
||||||
|
cmdline = '/sbin/ifconfig en0'
|
||||||
|
cmdline = cmdline.encode(sys.getfilesystemencoding())
|
||||||
|
p = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
|
||||||
|
out1, out2 = p.communicate()
|
||||||
|
reslst = out1.split('\n')
|
||||||
|
cnt = len(reslst)
|
||||||
|
macnum = None
|
||||||
|
foundIt = False
|
||||||
|
for j in xrange(cnt):
|
||||||
|
resline = reslst[j]
|
||||||
|
pp = resline.find('ether ')
|
||||||
|
if pp >= 0:
|
||||||
|
macnum = resline[pp+6:-1]
|
||||||
|
macnum = macnum.strip()
|
||||||
|
# print "original mac", macnum
|
||||||
|
# now munge it up the way Kindle app does
|
||||||
|
# by xoring it with 0xa5 and swapping elements 3 and 4
|
||||||
|
maclst = macnum.split(':')
|
||||||
|
n = len(maclst)
|
||||||
|
if n != 6:
|
||||||
|
fountIt = False
|
||||||
|
break
|
||||||
|
for i in range(6):
|
||||||
|
maclst[i] = int('0x' + maclst[i], 0)
|
||||||
|
mlst = [0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
|
||||||
|
mlst[5] = maclst[5] ^ 0xa5
|
||||||
|
mlst[4] = maclst[3] ^ 0xa5
|
||||||
|
mlst[3] = maclst[4] ^ 0xa5
|
||||||
|
mlst[2] = maclst[2] ^ 0xa5
|
||||||
|
mlst[1] = maclst[1] ^ 0xa5
|
||||||
|
mlst[0] = maclst[0] ^ 0xa5
|
||||||
|
macnum = "%0.2x%0.2x%0.2x%0.2x%0.2x%0.2x" % (mlst[0], mlst[1], mlst[2], mlst[3], mlst[4], mlst[5])
|
||||||
|
foundIt = True
|
||||||
|
break
|
||||||
|
if not foundIt:
|
||||||
|
macnum = ''
|
||||||
|
return macnum
|
||||||
|
|
||||||
|
|
||||||
# uses unix env to get username instead of using sysctlbyname
|
# uses unix env to get username instead of using sysctlbyname
|
||||||
def GetUserName():
|
def GetUserName():
|
||||||
username = os.getenv('USER')
|
username = os.getenv('USER')
|
||||||
return username
|
return username
|
||||||
|
|
||||||
|
def isNewInstall():
|
||||||
|
home = os.getenv('HOME')
|
||||||
|
# soccer game fan anyone
|
||||||
|
dpath = home + '/Library/Application Support/Kindle/storage/.pes2011'
|
||||||
|
# print dpath, os.path.exists(dpath)
|
||||||
|
if os.path.exists(dpath):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def GetIDString():
|
||||||
|
# K4Mac now has an extensive set of ids strings it uses
|
||||||
|
# in encoding pids and in creating unique passwords
|
||||||
|
# for use in its own version of CryptUnprotectDataV2
|
||||||
|
|
||||||
|
# BUT Amazon has now become nasty enough to detect when its app
|
||||||
|
# is being run under a debugger and actually changes code paths
|
||||||
|
# including which one of these strings is chosen, all to try
|
||||||
|
# to prevent reverse engineering
|
||||||
|
|
||||||
|
# Sad really ... they will only hurt their own sales ...
|
||||||
|
# true book lovers really want to keep their books forever
|
||||||
|
# and move them to their devices and DRM prevents that so they
|
||||||
|
# will just buy from someplace else that they can remove
|
||||||
|
# the DRM from
|
||||||
|
|
||||||
|
# Amazon should know by now that true book lover's are not like
|
||||||
|
# penniless kids that pirate music, we do not pirate books
|
||||||
|
|
||||||
|
if isNewInstall():
|
||||||
|
mungedmac = GetMACAddressMunged()
|
||||||
|
if len(mungedmac) > 7:
|
||||||
|
return mungedmac
|
||||||
|
sernum = GetVolumeSerialNumber()
|
||||||
|
if len(sernum) > 7:
|
||||||
|
return sernum
|
||||||
|
diskpart = GetUserHomeAppSupKindleDirParitionName()
|
||||||
|
uuidnum = GetDiskPartitionUUID(diskpart)
|
||||||
|
if len(uuidnum) > 7:
|
||||||
|
return uuidnum
|
||||||
|
mungedmac = GetMACAddressMunged()
|
||||||
|
if len(mungedmac) > 7:
|
||||||
|
return mungedmac
|
||||||
|
return '9999999999'
|
||||||
|
|
||||||
|
|
||||||
# implements an Pseudo Mac Version of Windows built-in Crypto routine
|
# implements an Pseudo Mac Version of Windows built-in Crypto routine
|
||||||
def CryptUnprotectData(encryptedData, salt):
|
# used by Kindle for Mac versions < 1.6.0
|
||||||
sp = GetVolumeSerialNumber() + '!@#' + GetUserName()
|
class CryptUnprotectData(object):
|
||||||
|
def __init__(self):
|
||||||
|
sernum = GetVolumeSerialNumber()
|
||||||
|
if sernum == '':
|
||||||
|
sernum = '9999999999'
|
||||||
|
sp = sernum + '!@#' + GetUserName()
|
||||||
passwdData = encode(SHA256(sp),charMap1)
|
passwdData = encode(SHA256(sp),charMap1)
|
||||||
|
salt = '16743'
|
||||||
|
self.crp = LibCrypto()
|
||||||
|
iter = 0x3e8
|
||||||
|
keylen = 0x80
|
||||||
|
key_iv = self.crp.keyivgen(passwdData, salt, iter, keylen)
|
||||||
|
self.key = key_iv[0:32]
|
||||||
|
self.iv = key_iv[32:48]
|
||||||
|
self.crp.set_decrypt_key(self.key, self.iv)
|
||||||
|
|
||||||
|
def decrypt(self, encryptedData):
|
||||||
|
cleartext = self.crp.decrypt(encryptedData)
|
||||||
|
cleartext = decode(cleartext,charMap1)
|
||||||
|
return cleartext
|
||||||
|
|
||||||
|
|
||||||
|
# implements an Pseudo Mac Version of Windows built-in Crypto routine
|
||||||
|
# used for Kindle for Mac Versions >= 1.6.0
|
||||||
|
class CryptUnprotectDataV2(object):
|
||||||
|
def __init__(self):
|
||||||
|
sp = GetUserName() + ':&%:' + GetIDString()
|
||||||
|
passwdData = encode(SHA256(sp),charMap5)
|
||||||
|
# salt generation as per the code
|
||||||
|
salt = 0x0512981d * 2 * 1 * 1
|
||||||
|
salt = str(salt) + GetUserName()
|
||||||
|
salt = encode(salt,charMap5)
|
||||||
|
self.crp = LibCrypto()
|
||||||
|
iter = 0x800
|
||||||
|
keylen = 0x400
|
||||||
|
key_iv = self.crp.keyivgen(passwdData, salt, iter, keylen)
|
||||||
|
self.key = key_iv[0:32]
|
||||||
|
self.iv = key_iv[32:48]
|
||||||
|
self.crp.set_decrypt_key(self.key, self.iv)
|
||||||
|
|
||||||
|
def decrypt(self, encryptedData):
|
||||||
|
cleartext = self.crp.decrypt(encryptedData)
|
||||||
|
cleartext = decode(cleartext, charMap5)
|
||||||
|
return cleartext
|
||||||
|
|
||||||
|
|
||||||
|
# unprotect the new header blob in .kinf2011
|
||||||
|
# used in Kindle for Mac Version >= 1.9.0
|
||||||
|
def UnprotectHeaderData(encryptedData):
|
||||||
|
passwdData = 'header_key_data'
|
||||||
|
salt = 'HEADER.2011'
|
||||||
|
iter = 0x80
|
||||||
|
keylen = 0x100
|
||||||
crp = LibCrypto()
|
crp = LibCrypto()
|
||||||
key_iv = crp.keyivgen(passwdData, salt)
|
key_iv = crp.keyivgen(passwdData, salt, iter, keylen)
|
||||||
key = key_iv[0:32]
|
key = key_iv[0:32]
|
||||||
iv = key_iv[32:48]
|
iv = key_iv[32:48]
|
||||||
crp.set_decrypt_key(key,iv)
|
crp.set_decrypt_key(key,iv)
|
||||||
@@ -217,6 +463,27 @@ def CryptUnprotectData(encryptedData, salt):
|
|||||||
return cleartext
|
return cleartext
|
||||||
|
|
||||||
|
|
||||||
|
# implements an Pseudo Mac Version of Windows built-in Crypto routine
|
||||||
|
# used for Kindle for Mac Versions >= 1.9.0
|
||||||
|
class CryptUnprotectDataV3(object):
|
||||||
|
def __init__(self, entropy):
|
||||||
|
sp = GetUserName() + '+@#$%+' + GetIDString()
|
||||||
|
passwdData = encode(SHA256(sp),charMap2)
|
||||||
|
salt = entropy
|
||||||
|
self.crp = LibCrypto()
|
||||||
|
iter = 0x800
|
||||||
|
keylen = 0x400
|
||||||
|
key_iv = self.crp.keyivgen(passwdData, salt, iter, keylen)
|
||||||
|
self.key = key_iv[0:32]
|
||||||
|
self.iv = key_iv[32:48]
|
||||||
|
self.crp.set_decrypt_key(self.key, self.iv)
|
||||||
|
|
||||||
|
def decrypt(self, encryptedData):
|
||||||
|
cleartext = self.crp.decrypt(encryptedData)
|
||||||
|
cleartext = decode(cleartext, charMap2)
|
||||||
|
return cleartext
|
||||||
|
|
||||||
|
|
||||||
# Locate the .kindle-info files
|
# Locate the .kindle-info files
|
||||||
def getKindleInfoFiles(kInfoFiles):
|
def getKindleInfoFiles(kInfoFiles):
|
||||||
# first search for current .kindle-info files
|
# first search for current .kindle-info files
|
||||||
@@ -232,18 +499,26 @@ def getKindleInfoFiles(kInfoFiles):
|
|||||||
if os.path.isfile(resline):
|
if os.path.isfile(resline):
|
||||||
kInfoFiles.append(resline)
|
kInfoFiles.append(resline)
|
||||||
found = True
|
found = True
|
||||||
# For Future Reference
|
# add any .rainier*-kinf files
|
||||||
#
|
cmdline = 'find "' + home + '/Library/Application Support" -name ".rainier*-kinf"'
|
||||||
# # add any .kinf files
|
cmdline = cmdline.encode(sys.getfilesystemencoding())
|
||||||
# cmdline = 'find "' + home + '/Library/Application Support" -name "rainier*.kinf"'
|
p1 = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
|
||||||
# cmdline = cmdline.encode(sys.getfilesystemencoding())
|
out1, out2 = p1.communicate()
|
||||||
# p1 = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
|
reslst = out1.split('\n')
|
||||||
# out1, out2 = p1.communicate()
|
for resline in reslst:
|
||||||
# reslst = out1.split('\n')
|
if os.path.isfile(resline):
|
||||||
# for resline in reslst:
|
kInfoFiles.append(resline)
|
||||||
# if os.path.isfile(resline):
|
found = True
|
||||||
# kInfoFiles.append(resline)
|
# add any .kinf2011 files
|
||||||
# found = True
|
cmdline = 'find "' + home + '/Library/Application Support" -name ".kinf2011"'
|
||||||
|
cmdline = cmdline.encode(sys.getfilesystemencoding())
|
||||||
|
p1 = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
|
||||||
|
out1, out2 = p1.communicate()
|
||||||
|
reslst = out1.split('\n')
|
||||||
|
for resline in reslst:
|
||||||
|
if os.path.isfile(resline):
|
||||||
|
kInfoFiles.append(resline)
|
||||||
|
found = True
|
||||||
if not found:
|
if not found:
|
||||||
print('No kindle-info files have been found.')
|
print('No kindle-info files have been found.')
|
||||||
return kInfoFiles
|
return kInfoFiles
|
||||||
@@ -251,7 +526,7 @@ def getKindleInfoFiles(kInfoFiles):
|
|||||||
# determine type of kindle info provided and return a
|
# determine type of kindle info provided and return a
|
||||||
# database of keynames and values
|
# database of keynames and values
|
||||||
def getDBfromFile(kInfoFile):
|
def getDBfromFile(kInfoFile):
|
||||||
names = ["kindle.account.tokens","kindle.cookie.item","eulaVersionAccepted","login_date","kindle.token.item","login","kindle.key.item","kindle.name.info","kindle.device.info", "MazamaRandomNumber"]
|
names = ["kindle.account.tokens","kindle.cookie.item","eulaVersionAccepted","login_date","kindle.token.item","login","kindle.key.item","kindle.name.info","kindle.device.info", "MazamaRandomNumber", "max_date", "SIGVERIF"]
|
||||||
DB = {}
|
DB = {}
|
||||||
cnt = 0
|
cnt = 0
|
||||||
infoReader = open(kInfoFile, 'r')
|
infoReader = open(kInfoFile, 'r')
|
||||||
@@ -261,6 +536,7 @@ def getDBfromFile(kInfoFile):
|
|||||||
if data.find('[') != -1 :
|
if data.find('[') != -1 :
|
||||||
|
|
||||||
# older style kindle-info file
|
# older style kindle-info file
|
||||||
|
cud = CryptUnprotectData()
|
||||||
items = data.split('[')
|
items = data.split('[')
|
||||||
for item in items:
|
for item in items:
|
||||||
if item != '':
|
if item != '':
|
||||||
@@ -273,84 +549,177 @@ def getDBfromFile(kInfoFile):
|
|||||||
if keyname == "unknown":
|
if keyname == "unknown":
|
||||||
keyname = keyhash
|
keyname = keyhash
|
||||||
encryptedValue = decode(rawdata,charMap2)
|
encryptedValue = decode(rawdata,charMap2)
|
||||||
salt = '16743'
|
cleartext = cud.decrypt(encryptedValue)
|
||||||
cleartext = CryptUnprotectData(encryptedValue, salt)
|
DB[keyname] = cleartext
|
||||||
DB[keyname] = decode(cleartext,charMap1)
|
|
||||||
cnt = cnt + 1
|
cnt = cnt + 1
|
||||||
if cnt == 0:
|
if cnt == 0:
|
||||||
DB = None
|
DB = None
|
||||||
return DB
|
return DB
|
||||||
|
|
||||||
# For Future Reference taken from K4PC 1.5.0 .kinf
|
if hdr == '/':
|
||||||
#
|
|
||||||
# # else newer style .kinf file
|
# else newer style .kinf file used by K4Mac >= 1.6.0
|
||||||
# # the .kinf file uses "/" to separate it into records
|
# the .kinf file uses "/" to separate it into records
|
||||||
# # so remove the trailing "/" to make it easy to use split
|
# so remove the trailing "/" to make it easy to use split
|
||||||
# data = data[:-1]
|
data = data[:-1]
|
||||||
# items = data.split('/')
|
items = data.split('/')
|
||||||
#
|
cud = CryptUnprotectDataV2()
|
||||||
# # loop through the item records until all are processed
|
|
||||||
# while len(items) > 0:
|
# loop through the item records until all are processed
|
||||||
#
|
while len(items) > 0:
|
||||||
# # get the first item record
|
|
||||||
# item = items.pop(0)
|
# get the first item record
|
||||||
#
|
item = items.pop(0)
|
||||||
# # the first 32 chars of the first record of a group
|
|
||||||
# # is the MD5 hash of the key name encoded by charMap5
|
# the first 32 chars of the first record of a group
|
||||||
# keyhash = item[0:32]
|
# is the MD5 hash of the key name encoded by charMap5
|
||||||
#
|
keyhash = item[0:32]
|
||||||
# # the raw keyhash string is also used to create entropy for the actual
|
keyname = "unknown"
|
||||||
# # CryptProtectData Blob that represents that keys contents
|
|
||||||
|
# the raw keyhash string is also used to create entropy for the actual
|
||||||
|
# CryptProtectData Blob that represents that keys contents
|
||||||
|
# "entropy" not used for K4Mac only K4PC
|
||||||
# entropy = SHA1(keyhash)
|
# entropy = SHA1(keyhash)
|
||||||
#
|
|
||||||
# # the remainder of the first record when decoded with charMap5
|
# the remainder of the first record when decoded with charMap5
|
||||||
# # has the ':' split char followed by the string representation
|
# has the ':' split char followed by the string representation
|
||||||
# # of the number of records that follow
|
# of the number of records that follow
|
||||||
# # and make up the contents
|
# and make up the contents
|
||||||
# srcnt = decode(item[34:],charMap5)
|
srcnt = decode(item[34:],charMap5)
|
||||||
# rcnt = int(srcnt)
|
rcnt = int(srcnt)
|
||||||
#
|
|
||||||
# # read and store in rcnt records of data
|
# read and store in rcnt records of data
|
||||||
# # that make up the contents value
|
# that make up the contents value
|
||||||
# edlst = []
|
edlst = []
|
||||||
# for i in xrange(rcnt):
|
for i in xrange(rcnt):
|
||||||
# item = items.pop(0)
|
item = items.pop(0)
|
||||||
# edlst.append(item)
|
edlst.append(item)
|
||||||
#
|
|
||||||
# keyname = "unknown"
|
keyname = "unknown"
|
||||||
# for name in names:
|
for name in names:
|
||||||
# if encodeHash(name,charMap5) == keyhash:
|
if encodeHash(name,charMap5) == keyhash:
|
||||||
# keyname = name
|
keyname = name
|
||||||
# break
|
break
|
||||||
# if keyname == "unknown":
|
if keyname == "unknown":
|
||||||
# keyname = keyhash
|
keyname = keyhash
|
||||||
#
|
|
||||||
# # the charMap5 encoded contents data has had a length
|
# the charMap5 encoded contents data has had a length
|
||||||
# # of chars (always odd) cut off of the front and moved
|
# of chars (always odd) cut off of the front and moved
|
||||||
# # to the end to prevent decoding using charMap5 from
|
# to the end to prevent decoding using charMap5 from
|
||||||
# # working properly, and thereby preventing the ensuing
|
# working properly, and thereby preventing the ensuing
|
||||||
# # CryptUnprotectData call from succeeding.
|
# CryptUnprotectData call from succeeding.
|
||||||
#
|
|
||||||
# # The offset into the charMap5 encoded contents seems to be:
|
# The offset into the charMap5 encoded contents seems to be:
|
||||||
# # len(contents) - largest prime number less than or equal to int(len(content)/3)
|
# len(contents) - largest prime number less than or equal to int(len(content)/3)
|
||||||
# # (in other words split "about" 2/3rds of the way through)
|
# (in other words split "about" 2/3rds of the way through)
|
||||||
#
|
|
||||||
# # move first offsets chars to end to align for decode by charMap5
|
# move first offsets chars to end to align for decode by charMap5
|
||||||
# encdata = "".join(edlst)
|
encdata = "".join(edlst)
|
||||||
# contlen = len(encdata)
|
contlen = len(encdata)
|
||||||
# noffset = contlen - primes(int(contlen/3))[-1]
|
|
||||||
#
|
# now properly split and recombine
|
||||||
# # now properly split and recombine
|
# by moving noffset chars from the start of the
|
||||||
# # by moving noffset chars from the start of the
|
# string to the end of the string
|
||||||
# # string to the end of the string
|
noffset = contlen - primes(int(contlen/3))[-1]
|
||||||
# pfx = encdata[0:noffset]
|
pfx = encdata[0:noffset]
|
||||||
# encdata = encdata[noffset:]
|
encdata = encdata[noffset:]
|
||||||
# encdata = encdata + pfx
|
encdata = encdata + pfx
|
||||||
#
|
|
||||||
# # decode using Map5 to get the CryptProtect Data
|
# decode using charMap5 to get the CryptProtect Data
|
||||||
# encryptedValue = decode(encdata,charMap5)
|
encryptedValue = decode(encdata,charMap5)
|
||||||
# DB[keyname] = CryptUnprotectData(encryptedValue, entropy, 1)
|
cleartext = cud.decrypt(encryptedValue)
|
||||||
# cnt = cnt + 1
|
DB[keyname] = cleartext
|
||||||
|
cnt = cnt + 1
|
||||||
|
|
||||||
|
if cnt == 0:
|
||||||
|
DB = None
|
||||||
|
return DB
|
||||||
|
|
||||||
|
# the latest .kinf2011 version for K4M 1.9.1
|
||||||
|
# put back the hdr char, it is needed
|
||||||
|
data = hdr + data
|
||||||
|
data = data[:-1]
|
||||||
|
items = data.split('/')
|
||||||
|
|
||||||
|
# the headerblob is the encrypted information needed to build the entropy string
|
||||||
|
headerblob = items.pop(0)
|
||||||
|
encryptedValue = decode(headerblob, charMap1)
|
||||||
|
cleartext = UnprotectHeaderData(encryptedValue)
|
||||||
|
|
||||||
|
# now extract the pieces in the same way
|
||||||
|
# this version is different from K4PC it scales the build number by multipying by 735
|
||||||
|
pattern = re.compile(r'''\[Version:(\d+)\]\[Build:(\d+)\]\[Cksum:([^\]]+)\]\[Guid:([\{\}a-z0-9\-]+)\]''', re.IGNORECASE)
|
||||||
|
for m in re.finditer(pattern, cleartext):
|
||||||
|
entropy = str(int(m.group(2)) * 0x2df) + m.group(4)
|
||||||
|
|
||||||
|
cud = CryptUnprotectDataV3(entropy)
|
||||||
|
|
||||||
|
# loop through the item records until all are processed
|
||||||
|
while len(items) > 0:
|
||||||
|
|
||||||
|
# get the first item record
|
||||||
|
item = items.pop(0)
|
||||||
|
|
||||||
|
# the first 32 chars of the first record of a group
|
||||||
|
# is the MD5 hash of the key name encoded by charMap5
|
||||||
|
keyhash = item[0:32]
|
||||||
|
keyname = "unknown"
|
||||||
|
|
||||||
|
# unlike K4PC the keyhash is not used in generating entropy
|
||||||
|
# entropy = SHA1(keyhash) + added_entropy
|
||||||
|
# entropy = added_entropy
|
||||||
|
|
||||||
|
# the remainder of the first record when decoded with charMap5
|
||||||
|
# has the ':' split char followed by the string representation
|
||||||
|
# of the number of records that follow
|
||||||
|
# and make up the contents
|
||||||
|
srcnt = decode(item[34:],charMap5)
|
||||||
|
rcnt = int(srcnt)
|
||||||
|
|
||||||
|
# read and store in rcnt records of data
|
||||||
|
# that make up the contents value
|
||||||
|
edlst = []
|
||||||
|
for i in xrange(rcnt):
|
||||||
|
item = items.pop(0)
|
||||||
|
edlst.append(item)
|
||||||
|
|
||||||
|
keyname = "unknown"
|
||||||
|
for name in names:
|
||||||
|
if encodeHash(name,testMap8) == keyhash:
|
||||||
|
keyname = name
|
||||||
|
break
|
||||||
|
if keyname == "unknown":
|
||||||
|
keyname = keyhash
|
||||||
|
|
||||||
|
# the testMap8 encoded contents data has had a length
|
||||||
|
# of chars (always odd) cut off of the front and moved
|
||||||
|
# to the end to prevent decoding using testMap8 from
|
||||||
|
# working properly, and thereby preventing the ensuing
|
||||||
|
# CryptUnprotectData call from succeeding.
|
||||||
|
|
||||||
|
# The offset into the testMap8 encoded contents seems to be:
|
||||||
|
# len(contents) - largest prime number less than or equal to int(len(content)/3)
|
||||||
|
# (in other words split "about" 2/3rds of the way through)
|
||||||
|
|
||||||
|
# move first offsets chars to end to align for decode by testMap8
|
||||||
|
encdata = "".join(edlst)
|
||||||
|
contlen = len(encdata)
|
||||||
|
|
||||||
|
# now properly split and recombine
|
||||||
|
# by moving noffset chars from the start of the
|
||||||
|
# string to the end of the string
|
||||||
|
noffset = contlen - primes(int(contlen/3))[-1]
|
||||||
|
pfx = encdata[0:noffset]
|
||||||
|
encdata = encdata[noffset:]
|
||||||
|
encdata = encdata + pfx
|
||||||
|
|
||||||
|
# decode using testMap8 to get the CryptProtect Data
|
||||||
|
encryptedValue = decode(encdata,testMap8)
|
||||||
|
cleartext = cud.decrypt(encryptedValue)
|
||||||
|
# print keyname
|
||||||
|
# print cleartext
|
||||||
|
DB[keyname] = cleartext
|
||||||
|
cnt = cnt + 1
|
||||||
|
|
||||||
if cnt == 0:
|
if cnt == 0:
|
||||||
DB = None
|
DB = None
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
|
|
||||||
from __future__ import with_statement
|
from __future__ import with_statement
|
||||||
|
|
||||||
import sys, os
|
import sys, os, re
|
||||||
from struct import pack, unpack, unpack_from
|
from struct import pack, unpack, unpack_from
|
||||||
|
|
||||||
from ctypes import windll, c_char_p, c_wchar_p, c_uint, POINTER, byref, \
|
from ctypes import windll, c_char_p, c_wchar_p, c_uint, POINTER, byref, \
|
||||||
@@ -11,9 +11,7 @@ from ctypes import windll, c_char_p, c_wchar_p, c_uint, POINTER, byref, \
|
|||||||
string_at, Structure, c_void_p, cast
|
string_at, Structure, c_void_p, cast
|
||||||
|
|
||||||
import _winreg as winreg
|
import _winreg as winreg
|
||||||
|
|
||||||
MAX_PATH = 255
|
MAX_PATH = 255
|
||||||
|
|
||||||
kernel32 = windll.kernel32
|
kernel32 = windll.kernel32
|
||||||
advapi32 = windll.advapi32
|
advapi32 = windll.advapi32
|
||||||
crypt32 = windll.crypt32
|
crypt32 = windll.crypt32
|
||||||
@@ -33,6 +31,32 @@ def SHA1(message):
|
|||||||
ctx.update(message)
|
ctx.update(message)
|
||||||
return ctx.digest()
|
return ctx.digest()
|
||||||
|
|
||||||
|
def SHA256(message):
|
||||||
|
ctx = hashlib.sha256()
|
||||||
|
ctx.update(message)
|
||||||
|
return ctx.digest()
|
||||||
|
|
||||||
|
# For K4PC 1.9.X
|
||||||
|
# use routines in alfcrypto:
|
||||||
|
# AES_cbc_encrypt
|
||||||
|
# AES_set_decrypt_key
|
||||||
|
# PKCS5_PBKDF2_HMAC_SHA1
|
||||||
|
|
||||||
|
from alfcrypto import AES_CBC, KeyIVGen
|
||||||
|
|
||||||
|
def UnprotectHeaderData(encryptedData):
|
||||||
|
passwdData = 'header_key_data'
|
||||||
|
salt = 'HEADER.2011'
|
||||||
|
iter = 0x80
|
||||||
|
keylen = 0x100
|
||||||
|
key_iv = KeyIVGen().pbkdf2(passwdData, salt, iter, keylen)
|
||||||
|
key = key_iv[0:32]
|
||||||
|
iv = key_iv[32:48]
|
||||||
|
aes=AES_CBC()
|
||||||
|
aes.set_decrypt_key(key, iv)
|
||||||
|
cleartext = aes.decrypt(encryptedData)
|
||||||
|
return cleartext
|
||||||
|
|
||||||
|
|
||||||
# simple primes table (<= n) calculator
|
# simple primes table (<= n) calculator
|
||||||
def primes(n):
|
def primes(n):
|
||||||
@@ -59,6 +83,10 @@ def primes(n):
|
|||||||
# Probably supposed to act as obfuscation
|
# Probably supposed to act as obfuscation
|
||||||
charMap2 = "AaZzB0bYyCc1XxDdW2wEeVv3FfUuG4g-TtHh5SsIiR6rJjQq7KkPpL8lOoMm9Nn_"
|
charMap2 = "AaZzB0bYyCc1XxDdW2wEeVv3FfUuG4g-TtHh5SsIiR6rJjQq7KkPpL8lOoMm9Nn_"
|
||||||
charMap5 = "AzB0bYyCeVvaZ3FfUuG4g-TtHh5SsIiR6rJjQq7KkPpL8lOoMm9Nn_c1XxDdW2wE"
|
charMap5 = "AzB0bYyCeVvaZ3FfUuG4g-TtHh5SsIiR6rJjQq7KkPpL8lOoMm9Nn_c1XxDdW2wE"
|
||||||
|
# New maps in K4PC 1.9.0
|
||||||
|
testMap1 = "n5Pr6St7Uv8Wx9YzAb0Cd1Ef2Gh3Jk4M"
|
||||||
|
testMap6 = "9YzAb0Cd1Ef2n5Pr6St7Uvh3Jk4M8WxG"
|
||||||
|
testMap8 = "YvaZ3FfUm9Nn_c1XuG4yCAzB0beVg-TtHh5SsIiR6rJjQdW2wEq7KkPpL8lOoMxD"
|
||||||
|
|
||||||
class DrmException(Exception):
|
class DrmException(Exception):
|
||||||
pass
|
pass
|
||||||
@@ -122,6 +150,9 @@ def GetVolumeSerialNumber():
|
|||||||
return GetVolumeSerialNumber
|
return GetVolumeSerialNumber
|
||||||
GetVolumeSerialNumber = GetVolumeSerialNumber()
|
GetVolumeSerialNumber = GetVolumeSerialNumber()
|
||||||
|
|
||||||
|
def GetIDString():
|
||||||
|
return GetVolumeSerialNumber()
|
||||||
|
|
||||||
def getLastError():
|
def getLastError():
|
||||||
GetLastError = kernel32.GetLastError
|
GetLastError = kernel32.GetLastError
|
||||||
GetLastError.argtypes = None
|
GetLastError.argtypes = None
|
||||||
@@ -162,7 +193,8 @@ def CryptUnprotectData():
|
|||||||
outdata = DataBlob()
|
outdata = DataBlob()
|
||||||
if not _CryptUnprotectData(byref(indata), None, byref(entropy),
|
if not _CryptUnprotectData(byref(indata), None, byref(entropy),
|
||||||
None, None, flags, byref(outdata)):
|
None, None, flags, byref(outdata)):
|
||||||
raise DrmException("Failed to Unprotect Data")
|
# raise DrmException("Failed to Unprotect Data")
|
||||||
|
return 'failed'
|
||||||
return string_at(outdata.pbData, outdata.cbData)
|
return string_at(outdata.pbData, outdata.cbData)
|
||||||
return CryptUnprotectData
|
return CryptUnprotectData
|
||||||
CryptUnprotectData = CryptUnprotectData()
|
CryptUnprotectData = CryptUnprotectData()
|
||||||
@@ -173,6 +205,13 @@ def getKindleInfoFiles(kInfoFiles):
|
|||||||
regkey = winreg.OpenKey(winreg.HKEY_CURRENT_USER, "Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Shell Folders\\")
|
regkey = winreg.OpenKey(winreg.HKEY_CURRENT_USER, "Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Shell Folders\\")
|
||||||
path = winreg.QueryValueEx(regkey, 'Local AppData')[0]
|
path = winreg.QueryValueEx(regkey, 'Local AppData')[0]
|
||||||
|
|
||||||
|
# some 64 bit machines do not have the proper registry key for some reason
|
||||||
|
# or the pythonn interface to the 32 vs 64 bit registry is broken
|
||||||
|
if 'LOCALAPPDATA' in os.environ.keys():
|
||||||
|
path = os.environ['LOCALAPPDATA']
|
||||||
|
|
||||||
|
print "searching for kinfoFiles in ", path
|
||||||
|
|
||||||
# first look for older kindle-info files
|
# first look for older kindle-info files
|
||||||
kinfopath = path +'\\Amazon\\Kindle For PC\\{AMAwzsaPaaZAzmZzZQzgZCAkZ3AjA_AY}\\kindle.info'
|
kinfopath = path +'\\Amazon\\Kindle For PC\\{AMAwzsaPaaZAzmZzZQzgZCAkZ3AjA_AY}\\kindle.info'
|
||||||
if not os.path.isfile(kinfopath):
|
if not os.path.isfile(kinfopath):
|
||||||
@@ -181,18 +220,34 @@ def getKindleInfoFiles(kInfoFiles):
|
|||||||
kInfoFiles.append(kinfopath)
|
kInfoFiles.append(kinfopath)
|
||||||
|
|
||||||
# now look for newer (K4PC 1.5.0 and later rainier.2.1.1.kinf file
|
# now look for newer (K4PC 1.5.0 and later rainier.2.1.1.kinf file
|
||||||
|
|
||||||
kinfopath = path +'\\Amazon\\Kindle For PC\\storage\\rainier.2.1.1.kinf'
|
kinfopath = path +'\\Amazon\\Kindle For PC\\storage\\rainier.2.1.1.kinf'
|
||||||
if not os.path.isfile(kinfopath):
|
if not os.path.isfile(kinfopath):
|
||||||
print('No .kinf files have not been found.')
|
print('No K4PC 1.5.X .kinf files have not been found.')
|
||||||
else:
|
else:
|
||||||
kInfoFiles.append(kinfopath)
|
kInfoFiles.append(kinfopath)
|
||||||
|
|
||||||
|
# now look for even newer (K4PC 1.6.0 and later) rainier.2.1.1.kinf file
|
||||||
|
kinfopath = path +'\\Amazon\\Kindle\\storage\\rainier.2.1.1.kinf'
|
||||||
|
if not os.path.isfile(kinfopath):
|
||||||
|
print('No K4PC 1.6.X .kinf files have not been found.')
|
||||||
|
else:
|
||||||
|
kInfoFiles.append(kinfopath)
|
||||||
|
|
||||||
|
# now look for even newer (K4PC 1.9.0 and later) .kinf2011 file
|
||||||
|
kinfopath = path +'\\Amazon\\Kindle\\storage\\.kinf2011'
|
||||||
|
if not os.path.isfile(kinfopath):
|
||||||
|
print('No K4PC 1.9.X .kinf files have not been found.')
|
||||||
|
else:
|
||||||
|
kInfoFiles.append(kinfopath)
|
||||||
|
|
||||||
return kInfoFiles
|
return kInfoFiles
|
||||||
|
|
||||||
|
|
||||||
# determine type of kindle info provided and return a
|
# determine type of kindle info provided and return a
|
||||||
# database of keynames and values
|
# database of keynames and values
|
||||||
def getDBfromFile(kInfoFile):
|
def getDBfromFile(kInfoFile):
|
||||||
names = ["kindle.account.tokens","kindle.cookie.item","eulaVersionAccepted","login_date","kindle.token.item","login","kindle.key.item","kindle.name.info","kindle.device.info", "MazamaRandomNumber"]
|
names = ["kindle.account.tokens","kindle.cookie.item","eulaVersionAccepted","login_date","kindle.token.item","login","kindle.key.item","kindle.name.info","kindle.device.info", "MazamaRandomNumber", "max_date", "SIGVERIF"]
|
||||||
DB = {}
|
DB = {}
|
||||||
cnt = 0
|
cnt = 0
|
||||||
infoReader = open(kInfoFile, 'r')
|
infoReader = open(kInfoFile, 'r')
|
||||||
@@ -220,7 +275,8 @@ def getDBfromFile(kInfoFile):
|
|||||||
DB = None
|
DB = None
|
||||||
return DB
|
return DB
|
||||||
|
|
||||||
# else newer style .kinf file
|
if hdr == '/':
|
||||||
|
# else rainier-2-1-1 .kinf file
|
||||||
# the .kinf file uses "/" to separate it into records
|
# the .kinf file uses "/" to separate it into records
|
||||||
# so remove the trailing "/" to make it easy to use split
|
# so remove the trailing "/" to make it easy to use split
|
||||||
data = data[:-1]
|
data = data[:-1]
|
||||||
@@ -236,7 +292,7 @@ def getDBfromFile(kInfoFile):
|
|||||||
# is the MD5 hash of the key name encoded by charMap5
|
# is the MD5 hash of the key name encoded by charMap5
|
||||||
keyhash = item[0:32]
|
keyhash = item[0:32]
|
||||||
|
|
||||||
# the raw keyhash string is also used to create entropy for the actual
|
# the raw keyhash string is used to create entropy for the actual
|
||||||
# CryptProtectData Blob that represents that keys contents
|
# CryptProtectData Blob that represents that keys contents
|
||||||
entropy = SHA1(keyhash)
|
entropy = SHA1(keyhash)
|
||||||
|
|
||||||
@@ -261,7 +317,6 @@ def getDBfromFile(kInfoFile):
|
|||||||
break
|
break
|
||||||
if keyname == "unknown":
|
if keyname == "unknown":
|
||||||
keyname = keyhash
|
keyname = keyhash
|
||||||
|
|
||||||
# the charMap5 encoded contents data has had a length
|
# the charMap5 encoded contents data has had a length
|
||||||
# of chars (always odd) cut off of the front and moved
|
# of chars (always odd) cut off of the front and moved
|
||||||
# to the end to prevent decoding using charMap5 from
|
# to the end to prevent decoding using charMap5 from
|
||||||
@@ -269,7 +324,7 @@ def getDBfromFile(kInfoFile):
|
|||||||
# CryptUnprotectData call from succeeding.
|
# CryptUnprotectData call from succeeding.
|
||||||
|
|
||||||
# The offset into the charMap5 encoded contents seems to be:
|
# The offset into the charMap5 encoded contents seems to be:
|
||||||
# len(contents) - largest prime number less than or equal to int(len(content)/3)
|
# len(contents)-largest prime number <= int(len(content)/3)
|
||||||
# (in other words split "about" 2/3rds of the way through)
|
# (in other words split "about" 2/3rds of the way through)
|
||||||
|
|
||||||
# move first offsets chars to end to align for decode by charMap5
|
# move first offsets chars to end to align for decode by charMap5
|
||||||
@@ -293,4 +348,85 @@ def getDBfromFile(kInfoFile):
|
|||||||
DB = None
|
DB = None
|
||||||
return DB
|
return DB
|
||||||
|
|
||||||
|
# else newest .kinf2011 style .kinf file
|
||||||
|
# the .kinf file uses "/" to separate it into records
|
||||||
|
# so remove the trailing "/" to make it easy to use split
|
||||||
|
# need to put back the first char read because it it part
|
||||||
|
# of the added entropy blob
|
||||||
|
data = hdr + data[:-1]
|
||||||
|
items = data.split('/')
|
||||||
|
|
||||||
|
# starts with and encoded and encrypted header blob
|
||||||
|
headerblob = items.pop(0)
|
||||||
|
encryptedValue = decode(headerblob, testMap1)
|
||||||
|
cleartext = UnprotectHeaderData(encryptedValue)
|
||||||
|
# now extract the pieces that form the added entropy
|
||||||
|
pattern = re.compile(r'''\[Version:(\d+)\]\[Build:(\d+)\]\[Cksum:([^\]]+)\]\[Guid:([\{\}a-z0-9\-]+)\]''', re.IGNORECASE)
|
||||||
|
for m in re.finditer(pattern, cleartext):
|
||||||
|
added_entropy = m.group(2) + m.group(4)
|
||||||
|
|
||||||
|
|
||||||
|
# loop through the item records until all are processed
|
||||||
|
while len(items) > 0:
|
||||||
|
|
||||||
|
# get the first item record
|
||||||
|
item = items.pop(0)
|
||||||
|
|
||||||
|
# the first 32 chars of the first record of a group
|
||||||
|
# is the MD5 hash of the key name encoded by charMap5
|
||||||
|
keyhash = item[0:32]
|
||||||
|
|
||||||
|
# the sha1 of raw keyhash string is used to create entropy along
|
||||||
|
# with the added entropy provided above from the headerblob
|
||||||
|
entropy = SHA1(keyhash) + added_entropy
|
||||||
|
|
||||||
|
# the remainder of the first record when decoded with charMap5
|
||||||
|
# has the ':' split char followed by the string representation
|
||||||
|
# of the number of records that follow
|
||||||
|
# and make up the contents
|
||||||
|
srcnt = decode(item[34:],charMap5)
|
||||||
|
rcnt = int(srcnt)
|
||||||
|
|
||||||
|
# read and store in rcnt records of data
|
||||||
|
# that make up the contents value
|
||||||
|
edlst = []
|
||||||
|
for i in xrange(rcnt):
|
||||||
|
item = items.pop(0)
|
||||||
|
edlst.append(item)
|
||||||
|
|
||||||
|
# key names now use the new testMap8 encoding
|
||||||
|
keyname = "unknown"
|
||||||
|
for name in names:
|
||||||
|
if encodeHash(name,testMap8) == keyhash:
|
||||||
|
keyname = name
|
||||||
|
break
|
||||||
|
|
||||||
|
# the testMap8 encoded contents data has had a length
|
||||||
|
# of chars (always odd) cut off of the front and moved
|
||||||
|
# to the end to prevent decoding using testMap8 from
|
||||||
|
# working properly, and thereby preventing the ensuing
|
||||||
|
# CryptUnprotectData call from succeeding.
|
||||||
|
|
||||||
|
# The offset into the testMap8 encoded contents seems to be:
|
||||||
|
# len(contents)-largest prime number <= int(len(content)/3)
|
||||||
|
# (in other words split "about" 2/3rds of the way through)
|
||||||
|
|
||||||
|
# move first offsets chars to end to align for decode by testMap8
|
||||||
|
# by moving noffset chars from the start of the
|
||||||
|
# string to the end of the string
|
||||||
|
encdata = "".join(edlst)
|
||||||
|
contlen = len(encdata)
|
||||||
|
noffset = contlen - primes(int(contlen/3))[-1]
|
||||||
|
pfx = encdata[0:noffset]
|
||||||
|
encdata = encdata[noffset:]
|
||||||
|
encdata = encdata + pfx
|
||||||
|
|
||||||
|
# decode using new testMap8 to get the original CryptProtect Data
|
||||||
|
encryptedValue = decode(encdata,testMap8)
|
||||||
|
cleartext = CryptUnprotectData(encryptedValue, entropy, 1)
|
||||||
|
DB[keyname] = cleartext
|
||||||
|
cnt = cnt + 1
|
||||||
|
|
||||||
|
if cnt == 0:
|
||||||
|
DB = None
|
||||||
|
return DB
|
||||||
|
|||||||
@@ -49,11 +49,16 @@
|
|||||||
# 0.27 - Correct pid metadata token generation to match that used by skindle (Thank You Bart!)
|
# 0.27 - Correct pid metadata token generation to match that used by skindle (Thank You Bart!)
|
||||||
# 0.28 - slight additional changes to metadata token generation (None -> '')
|
# 0.28 - slight additional changes to metadata token generation (None -> '')
|
||||||
# 0.29 - It seems that the ideas about when multibyte trailing characters were
|
# 0.29 - It seems that the ideas about when multibyte trailing characters were
|
||||||
# included in the encryption were wrong. They aren't for DOC compressed
|
# included in the encryption were wrong. They are for DOC compressed
|
||||||
# files, but they are for HUFF/CDIC compress files!
|
# files, but they are not for HUFF/CDIC compress files!
|
||||||
# 0.30 - Modified interface slightly to work better with new calibre plugin style
|
# 0.30 - Modified interface slightly to work better with new calibre plugin style
|
||||||
|
# 0.31 - The multibyte encrytion info is true for version 7 files too.
|
||||||
|
# 0.32 - Added support for "Print Replica" Kindle ebooks
|
||||||
|
# 0.33 - Performance improvements for large files (concatenation)
|
||||||
|
# 0.34 - Performance improvements in decryption (libalfcrypto)
|
||||||
|
# 0.35 - add interface to get mobi_version
|
||||||
|
|
||||||
__version__ = '0.30'
|
__version__ = '0.35'
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
@@ -70,6 +75,7 @@ sys.stdout=Unbuffered(sys.stdout)
|
|||||||
import os
|
import os
|
||||||
import struct
|
import struct
|
||||||
import binascii
|
import binascii
|
||||||
|
from alfcrypto import Pukall_Cipher
|
||||||
|
|
||||||
class DrmException(Exception):
|
class DrmException(Exception):
|
||||||
pass
|
pass
|
||||||
@@ -81,36 +87,37 @@ class DrmException(Exception):
|
|||||||
|
|
||||||
# Implementation of Pukall Cipher 1
|
# Implementation of Pukall Cipher 1
|
||||||
def PC1(key, src, decryption=True):
|
def PC1(key, src, decryption=True):
|
||||||
sum1 = 0;
|
return Pukall_Cipher().PC1(key,src,decryption)
|
||||||
sum2 = 0;
|
# sum1 = 0;
|
||||||
keyXorVal = 0;
|
# sum2 = 0;
|
||||||
if len(key)!=16:
|
# keyXorVal = 0;
|
||||||
print "Bad key length!"
|
# if len(key)!=16:
|
||||||
return None
|
# print "Bad key length!"
|
||||||
wkey = []
|
# return None
|
||||||
for i in xrange(8):
|
# wkey = []
|
||||||
wkey.append(ord(key[i*2])<<8 | ord(key[i*2+1]))
|
# for i in xrange(8):
|
||||||
dst = ""
|
# wkey.append(ord(key[i*2])<<8 | ord(key[i*2+1]))
|
||||||
for i in xrange(len(src)):
|
# dst = ""
|
||||||
temp1 = 0;
|
# for i in xrange(len(src)):
|
||||||
byteXorVal = 0;
|
# temp1 = 0;
|
||||||
for j in xrange(8):
|
# byteXorVal = 0;
|
||||||
temp1 ^= wkey[j]
|
# for j in xrange(8):
|
||||||
sum2 = (sum2+j)*20021 + sum1
|
# temp1 ^= wkey[j]
|
||||||
sum1 = (temp1*346)&0xFFFF
|
# sum2 = (sum2+j)*20021 + sum1
|
||||||
sum2 = (sum2+sum1)&0xFFFF
|
# sum1 = (temp1*346)&0xFFFF
|
||||||
temp1 = (temp1*20021+1)&0xFFFF
|
# sum2 = (sum2+sum1)&0xFFFF
|
||||||
byteXorVal ^= temp1 ^ sum2
|
# temp1 = (temp1*20021+1)&0xFFFF
|
||||||
curByte = ord(src[i])
|
# byteXorVal ^= temp1 ^ sum2
|
||||||
if not decryption:
|
# curByte = ord(src[i])
|
||||||
keyXorVal = curByte * 257;
|
# if not decryption:
|
||||||
curByte = ((curByte ^ (byteXorVal >> 8)) ^ byteXorVal) & 0xFF
|
# keyXorVal = curByte * 257;
|
||||||
if decryption:
|
# curByte = ((curByte ^ (byteXorVal >> 8)) ^ byteXorVal) & 0xFF
|
||||||
keyXorVal = curByte * 257;
|
# if decryption:
|
||||||
for j in xrange(8):
|
# keyXorVal = curByte * 257;
|
||||||
wkey[j] ^= keyXorVal;
|
# for j in xrange(8):
|
||||||
dst+=chr(curByte)
|
# wkey[j] ^= keyXorVal;
|
||||||
return dst
|
# dst+=chr(curByte)
|
||||||
|
# return dst
|
||||||
|
|
||||||
def checksumPid(s):
|
def checksumPid(s):
|
||||||
letters = "ABCDEFGHIJKLMNPQRSTUVWXYZ123456789"
|
letters = "ABCDEFGHIJKLMNPQRSTUVWXYZ123456789"
|
||||||
@@ -162,6 +169,9 @@ class MobiBook:
|
|||||||
return self.data_file[off:endoff]
|
return self.data_file[off:endoff]
|
||||||
|
|
||||||
def __init__(self, infile):
|
def __init__(self, infile):
|
||||||
|
print ('MobiDeDrm v%(__version__)s. '
|
||||||
|
'Copyright 2008-2011 The Dark Reverser et al.' % globals())
|
||||||
|
|
||||||
# initial sanity check on file
|
# initial sanity check on file
|
||||||
self.data_file = file(infile, 'rb').read()
|
self.data_file = file(infile, 'rb').read()
|
||||||
self.mobi_data = ''
|
self.mobi_data = ''
|
||||||
@@ -192,14 +202,15 @@ class MobiBook:
|
|||||||
self.meta_array = {}
|
self.meta_array = {}
|
||||||
return
|
return
|
||||||
self.mobi_length, = struct.unpack('>L',self.sect[0x14:0x18])
|
self.mobi_length, = struct.unpack('>L',self.sect[0x14:0x18])
|
||||||
|
self.mobi_codepage, = struct.unpack('>L',self.sect[0x1c:0x20])
|
||||||
self.mobi_version, = struct.unpack('>L',self.sect[0x68:0x6C])
|
self.mobi_version, = struct.unpack('>L',self.sect[0x68:0x6C])
|
||||||
print "MOBI header version = %d, length = %d" %(self.mobi_version, self.mobi_length)
|
print "MOBI header version = %d, length = %d" %(self.mobi_version, self.mobi_length)
|
||||||
self.extra_data_flags = 0
|
self.extra_data_flags = 0
|
||||||
if (self.mobi_length >= 0xE4) and (self.mobi_version >= 5):
|
if (self.mobi_length >= 0xE4) and (self.mobi_version >= 5):
|
||||||
self.extra_data_flags, = struct.unpack('>H', self.sect[0xF2:0xF4])
|
self.extra_data_flags, = struct.unpack('>H', self.sect[0xF2:0xF4])
|
||||||
print "Extra Data Flags = %d" % self.extra_data_flags
|
print "Extra Data Flags = %d" % self.extra_data_flags
|
||||||
if (self.mobi_version < 7) and (self.compression != 17480):
|
if (self.compression != 17480):
|
||||||
# multibyte utf8 data is included in the encryption for mobi_version 6 and below
|
# multibyte utf8 data is included in the encryption for PalmDoc compression
|
||||||
# so clear that byte so that we leave it to be decrypted.
|
# so clear that byte so that we leave it to be decrypted.
|
||||||
self.extra_data_flags &= 0xFFFE
|
self.extra_data_flags &= 0xFFFE
|
||||||
|
|
||||||
@@ -229,8 +240,13 @@ class MobiBook:
|
|||||||
except:
|
except:
|
||||||
self.meta_array = {}
|
self.meta_array = {}
|
||||||
pass
|
pass
|
||||||
|
self.print_replica = False
|
||||||
|
|
||||||
def getBookTitle(self):
|
def getBookTitle(self):
|
||||||
|
codec_map = {
|
||||||
|
1252 : 'windows-1252',
|
||||||
|
65001 : 'utf-8',
|
||||||
|
}
|
||||||
title = ''
|
title = ''
|
||||||
if 503 in self.meta_array:
|
if 503 in self.meta_array:
|
||||||
title = self.meta_array[503]
|
title = self.meta_array[503]
|
||||||
@@ -241,7 +257,10 @@ class MobiBook:
|
|||||||
if title == '':
|
if title == '':
|
||||||
title = self.header[:32]
|
title = self.header[:32]
|
||||||
title = title.split("\0")[0]
|
title = title.split("\0")[0]
|
||||||
return title
|
codec = 'windows-1252'
|
||||||
|
if self.mobi_codepage in codec_map.keys():
|
||||||
|
codec = codec_map[self.mobi_codepage]
|
||||||
|
return unicode(title, codec).encode('utf-8')
|
||||||
|
|
||||||
def getPIDMetaInfo(self):
|
def getPIDMetaInfo(self):
|
||||||
rec209 = ''
|
rec209 = ''
|
||||||
@@ -306,16 +325,29 @@ class MobiBook:
|
|||||||
def getMobiFile(self, outpath):
|
def getMobiFile(self, outpath):
|
||||||
file(outpath,'wb').write(self.mobi_data)
|
file(outpath,'wb').write(self.mobi_data)
|
||||||
|
|
||||||
|
def getMobiVersion(self):
|
||||||
|
return self.mobi_version
|
||||||
|
|
||||||
|
def getPrintReplica(self):
|
||||||
|
return self.print_replica
|
||||||
|
|
||||||
def processBook(self, pidlist):
|
def processBook(self, pidlist):
|
||||||
crypto_type, = struct.unpack('>H', self.sect[0xC:0xC+2])
|
crypto_type, = struct.unpack('>H', self.sect[0xC:0xC+2])
|
||||||
print 'Crypto Type is: ', crypto_type
|
print 'Crypto Type is: ', crypto_type
|
||||||
self.crypto_type = crypto_type
|
self.crypto_type = crypto_type
|
||||||
if crypto_type == 0:
|
if crypto_type == 0:
|
||||||
print "This book is not encrypted."
|
print "This book is not encrypted."
|
||||||
|
# we must still check for Print Replica
|
||||||
|
self.print_replica = (self.loadSection(1)[0:4] == '%MOP')
|
||||||
self.mobi_data = self.data_file
|
self.mobi_data = self.data_file
|
||||||
return
|
return
|
||||||
if crypto_type != 2 and crypto_type != 1:
|
if crypto_type != 2 and crypto_type != 1:
|
||||||
raise DrmException("Cannot decode unknown Mobipocket encryption type %d" % crypto_type)
|
raise DrmException("Cannot decode unknown Mobipocket encryption type %d" % crypto_type)
|
||||||
|
if 406 in self.meta_array:
|
||||||
|
data406 = self.meta_array[406]
|
||||||
|
val406, = struct.unpack('>Q',data406)
|
||||||
|
if val406 != 0:
|
||||||
|
raise DrmException("Cannot decode library or rented ebooks.")
|
||||||
|
|
||||||
goodpids = []
|
goodpids = []
|
||||||
for pid in pidlist:
|
for pid in pidlist:
|
||||||
@@ -343,7 +375,7 @@ class MobiBook:
|
|||||||
raise DrmException("Not yet initialised with PID. Must be opened with Mobipocket Reader first.")
|
raise DrmException("Not yet initialised with PID. Must be opened with Mobipocket Reader first.")
|
||||||
found_key, pid = self.parseDRM(self.sect[drm_ptr:drm_ptr+drm_size], drm_count, goodpids)
|
found_key, pid = self.parseDRM(self.sect[drm_ptr:drm_ptr+drm_size], drm_count, goodpids)
|
||||||
if not found_key:
|
if not found_key:
|
||||||
raise DrmException("No key found. Most likely the correct PID has not been given.")
|
raise DrmException("No key found. Please report this failure for help.")
|
||||||
# kill the drm keys
|
# kill the drm keys
|
||||||
self.patchSection(0, "\0" * drm_size, drm_ptr)
|
self.patchSection(0, "\0" * drm_size, drm_ptr)
|
||||||
# kill the drm pointers
|
# kill the drm pointers
|
||||||
@@ -359,18 +391,23 @@ class MobiBook:
|
|||||||
|
|
||||||
# decrypt sections
|
# decrypt sections
|
||||||
print "Decrypting. Please wait . . .",
|
print "Decrypting. Please wait . . .",
|
||||||
self.mobi_data = self.data_file[:self.sections[1][0]]
|
mobidataList = []
|
||||||
|
mobidataList.append(self.data_file[:self.sections[1][0]])
|
||||||
for i in xrange(1, self.records+1):
|
for i in xrange(1, self.records+1):
|
||||||
data = self.loadSection(i)
|
data = self.loadSection(i)
|
||||||
extra_size = getSizeOfTrailingDataEntries(data, len(data), self.extra_data_flags)
|
extra_size = getSizeOfTrailingDataEntries(data, len(data), self.extra_data_flags)
|
||||||
if i%100 == 0:
|
if i%100 == 0:
|
||||||
print ".",
|
print ".",
|
||||||
# print "record %d, extra_size %d" %(i,extra_size)
|
# print "record %d, extra_size %d" %(i,extra_size)
|
||||||
self.mobi_data += PC1(found_key, data[0:len(data) - extra_size])
|
decoded_data = PC1(found_key, data[0:len(data) - extra_size])
|
||||||
|
if i==1:
|
||||||
|
self.print_replica = (decoded_data[0:4] == '%MOP')
|
||||||
|
mobidataList.append(decoded_data)
|
||||||
if extra_size > 0:
|
if extra_size > 0:
|
||||||
self.mobi_data += data[-extra_size:]
|
mobidataList.append(data[-extra_size:])
|
||||||
if self.num_sections > self.records+1:
|
if self.num_sections > self.records+1:
|
||||||
self.mobi_data += self.data_file[self.sections[self.records+1][0]:]
|
mobidataList.append(self.data_file[self.sections[self.records+1][0]:])
|
||||||
|
self.mobi_data = "".join(mobidataList)
|
||||||
print "done"
|
print "done"
|
||||||
return
|
return
|
||||||
|
|
||||||
@@ -391,9 +428,9 @@ def getUnencryptedBookWithList(infile,pidlist):
|
|||||||
|
|
||||||
def main(argv=sys.argv):
|
def main(argv=sys.argv):
|
||||||
print ('MobiDeDrm v%(__version__)s. '
|
print ('MobiDeDrm v%(__version__)s. '
|
||||||
'Copyright 2008-2010 The Dark Reverser.' % globals())
|
'Copyright 2008-2011 The Dark Reverser et al.' % globals())
|
||||||
if len(argv)<3 or len(argv)>4:
|
if len(argv)<3 or len(argv)>4:
|
||||||
print "Removes protection from Mobipocket books"
|
print "Removes protection from Kindle/Mobipocket and Kindle/Print Replica ebooks"
|
||||||
print "Usage:"
|
print "Usage:"
|
||||||
print " %s <infile> <outfile> [<Comma separated list of PIDs to try>]" % sys.argv[0]
|
print " %s <infile> <outfile> [<Comma separated list of PIDs to try>]" % sys.argv[0]
|
||||||
return 1
|
return 1
|
||||||
|
|||||||
Binary file not shown.
@@ -24,17 +24,17 @@
|
|||||||
<key>CFBundleExecutable</key>
|
<key>CFBundleExecutable</key>
|
||||||
<string>droplet</string>
|
<string>droplet</string>
|
||||||
<key>CFBundleGetInfoString</key>
|
<key>CFBundleGetInfoString</key>
|
||||||
<string>DeDRM 2.7, Written 2010–2011 by Apprentice Alf and others.</string>
|
<string>DeDRM 5.1, Written 2010–2012 by Apprentice Alf and others.</string>
|
||||||
<key>CFBundleIconFile</key>
|
<key>CFBundleIconFile</key>
|
||||||
<string>droplet</string>
|
<string>droplet</string>
|
||||||
<key>CFBundleInfoDictionaryVersion</key>
|
<key>CFBundleInfoDictionaryVersion</key>
|
||||||
<string>6.0</string>
|
<string>6.0</string>
|
||||||
<key>CFBundleName</key>
|
<key>CFBundleName</key>
|
||||||
<string>DeDRM</string>
|
<string>DeDRM 5.1</string>
|
||||||
<key>CFBundlePackageType</key>
|
<key>CFBundlePackageType</key>
|
||||||
<string>APPL</string>
|
<string>APPL</string>
|
||||||
<key>CFBundleShortVersionString</key>
|
<key>CFBundleShortVersionString</key>
|
||||||
<string>2.7</string>
|
<string>5.1</string>
|
||||||
<key>CFBundleSignature</key>
|
<key>CFBundleSignature</key>
|
||||||
<string>dplt</string>
|
<string>dplt</string>
|
||||||
<key>LSMinimumSystemVersion</key>
|
<key>LSMinimumSystemVersion</key>
|
||||||
@@ -44,15 +44,15 @@
|
|||||||
<key>WindowState</key>
|
<key>WindowState</key>
|
||||||
<dict>
|
<dict>
|
||||||
<key>dividerCollapsed</key>
|
<key>dividerCollapsed</key>
|
||||||
<true/>
|
<false/>
|
||||||
<key>eventLogLevel</key>
|
<key>eventLogLevel</key>
|
||||||
<integer>-1</integer>
|
<integer>-1</integer>
|
||||||
<key>name</key>
|
<key>name</key>
|
||||||
<string>ScriptWindowState</string>
|
<string>ScriptWindowState</string>
|
||||||
<key>positionOfDivider</key>
|
<key>positionOfDivider</key>
|
||||||
<real>0</real>
|
<real>460</real>
|
||||||
<key>savedFrame</key>
|
<key>savedFrame</key>
|
||||||
<string>1578 27 862 788 1440 -150 1680 1050 </string>
|
<string>1518 90 1316 746 1440 -150 1680 1050 </string>
|
||||||
<key>selectedTabView</key>
|
<key>selectedTabView</key>
|
||||||
<string>event log</string>
|
<string>event log</string>
|
||||||
</dict>
|
</dict>
|
||||||
|
|||||||
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,568 @@
|
|||||||
|
#! /usr/bin/env python
|
||||||
|
|
||||||
|
"""
|
||||||
|
Routines for doing AES CBC in one file
|
||||||
|
|
||||||
|
Modified by some_updates to extract
|
||||||
|
and combine only those parts needed for AES CBC
|
||||||
|
into one simple to add python file
|
||||||
|
|
||||||
|
Original Version
|
||||||
|
Copyright (c) 2002 by Paul A. Lambert
|
||||||
|
Under:
|
||||||
|
CryptoPy Artisitic License Version 1.0
|
||||||
|
See the wonderful pure python package cryptopy-1.2.5
|
||||||
|
and read its LICENSE.txt for complete license details.
|
||||||
|
"""
|
||||||
|
|
||||||
|
class CryptoError(Exception):
|
||||||
|
""" Base class for crypto exceptions """
|
||||||
|
def __init__(self,errorMessage='Error!'):
|
||||||
|
self.message = errorMessage
|
||||||
|
def __str__(self):
|
||||||
|
return self.message
|
||||||
|
|
||||||
|
class InitCryptoError(CryptoError):
|
||||||
|
""" Crypto errors during algorithm initialization """
|
||||||
|
class BadKeySizeError(InitCryptoError):
|
||||||
|
""" Bad key size error """
|
||||||
|
class EncryptError(CryptoError):
|
||||||
|
""" Error in encryption processing """
|
||||||
|
class DecryptError(CryptoError):
|
||||||
|
""" Error in decryption processing """
|
||||||
|
class DecryptNotBlockAlignedError(DecryptError):
|
||||||
|
""" Error in decryption processing """
|
||||||
|
|
||||||
|
def xorS(a,b):
|
||||||
|
""" XOR two strings """
|
||||||
|
assert len(a)==len(b)
|
||||||
|
x = []
|
||||||
|
for i in range(len(a)):
|
||||||
|
x.append( chr(ord(a[i])^ord(b[i])))
|
||||||
|
return ''.join(x)
|
||||||
|
|
||||||
|
def xor(a,b):
|
||||||
|
""" XOR two strings """
|
||||||
|
x = []
|
||||||
|
for i in range(min(len(a),len(b))):
|
||||||
|
x.append( chr(ord(a[i])^ord(b[i])))
|
||||||
|
return ''.join(x)
|
||||||
|
|
||||||
|
"""
|
||||||
|
Base 'BlockCipher' and Pad classes for cipher instances.
|
||||||
|
BlockCipher supports automatic padding and type conversion. The BlockCipher
|
||||||
|
class was written to make the actual algorithm code more readable and
|
||||||
|
not for performance.
|
||||||
|
"""
|
||||||
|
|
||||||
|
class BlockCipher:
|
||||||
|
""" Block ciphers """
|
||||||
|
def __init__(self):
|
||||||
|
self.reset()
|
||||||
|
|
||||||
|
def reset(self):
|
||||||
|
self.resetEncrypt()
|
||||||
|
self.resetDecrypt()
|
||||||
|
def resetEncrypt(self):
|
||||||
|
self.encryptBlockCount = 0
|
||||||
|
self.bytesToEncrypt = ''
|
||||||
|
def resetDecrypt(self):
|
||||||
|
self.decryptBlockCount = 0
|
||||||
|
self.bytesToDecrypt = ''
|
||||||
|
|
||||||
|
def encrypt(self, plainText, more = None):
|
||||||
|
""" Encrypt a string and return a binary string """
|
||||||
|
self.bytesToEncrypt += plainText # append plainText to any bytes from prior encrypt
|
||||||
|
numBlocks, numExtraBytes = divmod(len(self.bytesToEncrypt), self.blockSize)
|
||||||
|
cipherText = ''
|
||||||
|
for i in range(numBlocks):
|
||||||
|
bStart = i*self.blockSize
|
||||||
|
ctBlock = self.encryptBlock(self.bytesToEncrypt[bStart:bStart+self.blockSize])
|
||||||
|
self.encryptBlockCount += 1
|
||||||
|
cipherText += ctBlock
|
||||||
|
if numExtraBytes > 0: # save any bytes that are not block aligned
|
||||||
|
self.bytesToEncrypt = self.bytesToEncrypt[-numExtraBytes:]
|
||||||
|
else:
|
||||||
|
self.bytesToEncrypt = ''
|
||||||
|
|
||||||
|
if more == None: # no more data expected from caller
|
||||||
|
finalBytes = self.padding.addPad(self.bytesToEncrypt,self.blockSize)
|
||||||
|
if len(finalBytes) > 0:
|
||||||
|
ctBlock = self.encryptBlock(finalBytes)
|
||||||
|
self.encryptBlockCount += 1
|
||||||
|
cipherText += ctBlock
|
||||||
|
self.resetEncrypt()
|
||||||
|
return cipherText
|
||||||
|
|
||||||
|
def decrypt(self, cipherText, more = None):
|
||||||
|
""" Decrypt a string and return a string """
|
||||||
|
self.bytesToDecrypt += cipherText # append to any bytes from prior decrypt
|
||||||
|
|
||||||
|
numBlocks, numExtraBytes = divmod(len(self.bytesToDecrypt), self.blockSize)
|
||||||
|
if more == None: # no more calls to decrypt, should have all the data
|
||||||
|
if numExtraBytes != 0:
|
||||||
|
raise DecryptNotBlockAlignedError, 'Data not block aligned on decrypt'
|
||||||
|
|
||||||
|
# hold back some bytes in case last decrypt has zero len
|
||||||
|
if (more != None) and (numExtraBytes == 0) and (numBlocks >0) :
|
||||||
|
numBlocks -= 1
|
||||||
|
numExtraBytes = self.blockSize
|
||||||
|
|
||||||
|
plainText = ''
|
||||||
|
for i in range(numBlocks):
|
||||||
|
bStart = i*self.blockSize
|
||||||
|
ptBlock = self.decryptBlock(self.bytesToDecrypt[bStart : bStart+self.blockSize])
|
||||||
|
self.decryptBlockCount += 1
|
||||||
|
plainText += ptBlock
|
||||||
|
|
||||||
|
if numExtraBytes > 0: # save any bytes that are not block aligned
|
||||||
|
self.bytesToEncrypt = self.bytesToEncrypt[-numExtraBytes:]
|
||||||
|
else:
|
||||||
|
self.bytesToEncrypt = ''
|
||||||
|
|
||||||
|
if more == None: # last decrypt remove padding
|
||||||
|
plainText = self.padding.removePad(plainText, self.blockSize)
|
||||||
|
self.resetDecrypt()
|
||||||
|
return plainText
|
||||||
|
|
||||||
|
|
||||||
|
class Pad:
|
||||||
|
def __init__(self):
|
||||||
|
pass # eventually could put in calculation of min and max size extension
|
||||||
|
|
||||||
|
class padWithPadLen(Pad):
|
||||||
|
""" Pad a binary string with the length of the padding """
|
||||||
|
|
||||||
|
def addPad(self, extraBytes, blockSize):
|
||||||
|
""" Add padding to a binary string to make it an even multiple
|
||||||
|
of the block size """
|
||||||
|
blocks, numExtraBytes = divmod(len(extraBytes), blockSize)
|
||||||
|
padLength = blockSize - numExtraBytes
|
||||||
|
return extraBytes + padLength*chr(padLength)
|
||||||
|
|
||||||
|
def removePad(self, paddedBinaryString, blockSize):
|
||||||
|
""" Remove padding from a binary string """
|
||||||
|
if not(0<len(paddedBinaryString)):
|
||||||
|
raise DecryptNotBlockAlignedError, 'Expected More Data'
|
||||||
|
return paddedBinaryString[:-ord(paddedBinaryString[-1])]
|
||||||
|
|
||||||
|
class noPadding(Pad):
|
||||||
|
""" No padding. Use this to get ECB behavior from encrypt/decrypt """
|
||||||
|
|
||||||
|
def addPad(self, extraBytes, blockSize):
|
||||||
|
""" Add no padding """
|
||||||
|
return extraBytes
|
||||||
|
|
||||||
|
def removePad(self, paddedBinaryString, blockSize):
|
||||||
|
""" Remove no padding """
|
||||||
|
return paddedBinaryString
|
||||||
|
|
||||||
|
"""
|
||||||
|
Rijndael encryption algorithm
|
||||||
|
This byte oriented implementation is intended to closely
|
||||||
|
match FIPS specification for readability. It is not implemented
|
||||||
|
for performance.
|
||||||
|
"""
|
||||||
|
|
||||||
|
class Rijndael(BlockCipher):
|
||||||
|
""" Rijndael encryption algorithm """
|
||||||
|
def __init__(self, key = None, padding = padWithPadLen(), keySize=16, blockSize=16 ):
|
||||||
|
self.name = 'RIJNDAEL'
|
||||||
|
self.keySize = keySize
|
||||||
|
self.strength = keySize*8
|
||||||
|
self.blockSize = blockSize # blockSize is in bytes
|
||||||
|
self.padding = padding # change default to noPadding() to get normal ECB behavior
|
||||||
|
|
||||||
|
assert( keySize%4==0 and NrTable[4].has_key(keySize/4)),'key size must be 16,20,24,29 or 32 bytes'
|
||||||
|
assert( blockSize%4==0 and NrTable.has_key(blockSize/4)), 'block size must be 16,20,24,29 or 32 bytes'
|
||||||
|
|
||||||
|
self.Nb = self.blockSize/4 # Nb is number of columns of 32 bit words
|
||||||
|
self.Nk = keySize/4 # Nk is the key length in 32-bit words
|
||||||
|
self.Nr = NrTable[self.Nb][self.Nk] # The number of rounds (Nr) is a function of
|
||||||
|
# the block (Nb) and key (Nk) sizes.
|
||||||
|
if key != None:
|
||||||
|
self.setKey(key)
|
||||||
|
|
||||||
|
def setKey(self, key):
|
||||||
|
""" Set a key and generate the expanded key """
|
||||||
|
assert( len(key) == (self.Nk*4) ), 'Key length must be same as keySize parameter'
|
||||||
|
self.__expandedKey = keyExpansion(self, key)
|
||||||
|
self.reset() # BlockCipher.reset()
|
||||||
|
|
||||||
|
def encryptBlock(self, plainTextBlock):
|
||||||
|
""" Encrypt a block, plainTextBlock must be a array of bytes [Nb by 4] """
|
||||||
|
self.state = self._toBlock(plainTextBlock)
|
||||||
|
AddRoundKey(self, self.__expandedKey[0:self.Nb])
|
||||||
|
for round in range(1,self.Nr): #for round = 1 step 1 to Nr
|
||||||
|
SubBytes(self)
|
||||||
|
ShiftRows(self)
|
||||||
|
MixColumns(self)
|
||||||
|
AddRoundKey(self, self.__expandedKey[round*self.Nb:(round+1)*self.Nb])
|
||||||
|
SubBytes(self)
|
||||||
|
ShiftRows(self)
|
||||||
|
AddRoundKey(self, self.__expandedKey[self.Nr*self.Nb:(self.Nr+1)*self.Nb])
|
||||||
|
return self._toBString(self.state)
|
||||||
|
|
||||||
|
|
||||||
|
def decryptBlock(self, encryptedBlock):
|
||||||
|
""" decrypt a block (array of bytes) """
|
||||||
|
self.state = self._toBlock(encryptedBlock)
|
||||||
|
AddRoundKey(self, self.__expandedKey[self.Nr*self.Nb:(self.Nr+1)*self.Nb])
|
||||||
|
for round in range(self.Nr-1,0,-1):
|
||||||
|
InvShiftRows(self)
|
||||||
|
InvSubBytes(self)
|
||||||
|
AddRoundKey(self, self.__expandedKey[round*self.Nb:(round+1)*self.Nb])
|
||||||
|
InvMixColumns(self)
|
||||||
|
InvShiftRows(self)
|
||||||
|
InvSubBytes(self)
|
||||||
|
AddRoundKey(self, self.__expandedKey[0:self.Nb])
|
||||||
|
return self._toBString(self.state)
|
||||||
|
|
||||||
|
def _toBlock(self, bs):
|
||||||
|
""" Convert binary string to array of bytes, state[col][row]"""
|
||||||
|
assert ( len(bs) == 4*self.Nb ), 'Rijndarl blocks must be of size blockSize'
|
||||||
|
return [[ord(bs[4*i]),ord(bs[4*i+1]),ord(bs[4*i+2]),ord(bs[4*i+3])] for i in range(self.Nb)]
|
||||||
|
|
||||||
|
def _toBString(self, block):
|
||||||
|
""" Convert block (array of bytes) to binary string """
|
||||||
|
l = []
|
||||||
|
for col in block:
|
||||||
|
for rowElement in col:
|
||||||
|
l.append(chr(rowElement))
|
||||||
|
return ''.join(l)
|
||||||
|
#-------------------------------------
|
||||||
|
""" Number of rounds Nr = NrTable[Nb][Nk]
|
||||||
|
|
||||||
|
Nb Nk=4 Nk=5 Nk=6 Nk=7 Nk=8
|
||||||
|
------------------------------------- """
|
||||||
|
NrTable = {4: {4:10, 5:11, 6:12, 7:13, 8:14},
|
||||||
|
5: {4:11, 5:11, 6:12, 7:13, 8:14},
|
||||||
|
6: {4:12, 5:12, 6:12, 7:13, 8:14},
|
||||||
|
7: {4:13, 5:13, 6:13, 7:13, 8:14},
|
||||||
|
8: {4:14, 5:14, 6:14, 7:14, 8:14}}
|
||||||
|
#-------------------------------------
|
||||||
|
def keyExpansion(algInstance, keyString):
|
||||||
|
""" Expand a string of size keySize into a larger array """
|
||||||
|
Nk, Nb, Nr = algInstance.Nk, algInstance.Nb, algInstance.Nr # for readability
|
||||||
|
key = [ord(byte) for byte in keyString] # convert string to list
|
||||||
|
w = [[key[4*i],key[4*i+1],key[4*i+2],key[4*i+3]] for i in range(Nk)]
|
||||||
|
for i in range(Nk,Nb*(Nr+1)):
|
||||||
|
temp = w[i-1] # a four byte column
|
||||||
|
if (i%Nk) == 0 :
|
||||||
|
temp = temp[1:]+[temp[0]] # RotWord(temp)
|
||||||
|
temp = [ Sbox[byte] for byte in temp ]
|
||||||
|
temp[0] ^= Rcon[i/Nk]
|
||||||
|
elif Nk > 6 and i%Nk == 4 :
|
||||||
|
temp = [ Sbox[byte] for byte in temp ] # SubWord(temp)
|
||||||
|
w.append( [ w[i-Nk][byte]^temp[byte] for byte in range(4) ] )
|
||||||
|
return w
|
||||||
|
|
||||||
|
Rcon = (0,0x01,0x02,0x04,0x08,0x10,0x20,0x40,0x80,0x1b,0x36, # note extra '0' !!!
|
||||||
|
0x6c,0xd8,0xab,0x4d,0x9a,0x2f,0x5e,0xbc,0x63,0xc6,
|
||||||
|
0x97,0x35,0x6a,0xd4,0xb3,0x7d,0xfa,0xef,0xc5,0x91)
|
||||||
|
|
||||||
|
#-------------------------------------
|
||||||
|
def AddRoundKey(algInstance, keyBlock):
|
||||||
|
""" XOR the algorithm state with a block of key material """
|
||||||
|
for column in range(algInstance.Nb):
|
||||||
|
for row in range(4):
|
||||||
|
algInstance.state[column][row] ^= keyBlock[column][row]
|
||||||
|
#-------------------------------------
|
||||||
|
|
||||||
|
def SubBytes(algInstance):
|
||||||
|
for column in range(algInstance.Nb):
|
||||||
|
for row in range(4):
|
||||||
|
algInstance.state[column][row] = Sbox[algInstance.state[column][row]]
|
||||||
|
|
||||||
|
def InvSubBytes(algInstance):
|
||||||
|
for column in range(algInstance.Nb):
|
||||||
|
for row in range(4):
|
||||||
|
algInstance.state[column][row] = InvSbox[algInstance.state[column][row]]
|
||||||
|
|
||||||
|
Sbox = (0x63,0x7c,0x77,0x7b,0xf2,0x6b,0x6f,0xc5,
|
||||||
|
0x30,0x01,0x67,0x2b,0xfe,0xd7,0xab,0x76,
|
||||||
|
0xca,0x82,0xc9,0x7d,0xfa,0x59,0x47,0xf0,
|
||||||
|
0xad,0xd4,0xa2,0xaf,0x9c,0xa4,0x72,0xc0,
|
||||||
|
0xb7,0xfd,0x93,0x26,0x36,0x3f,0xf7,0xcc,
|
||||||
|
0x34,0xa5,0xe5,0xf1,0x71,0xd8,0x31,0x15,
|
||||||
|
0x04,0xc7,0x23,0xc3,0x18,0x96,0x05,0x9a,
|
||||||
|
0x07,0x12,0x80,0xe2,0xeb,0x27,0xb2,0x75,
|
||||||
|
0x09,0x83,0x2c,0x1a,0x1b,0x6e,0x5a,0xa0,
|
||||||
|
0x52,0x3b,0xd6,0xb3,0x29,0xe3,0x2f,0x84,
|
||||||
|
0x53,0xd1,0x00,0xed,0x20,0xfc,0xb1,0x5b,
|
||||||
|
0x6a,0xcb,0xbe,0x39,0x4a,0x4c,0x58,0xcf,
|
||||||
|
0xd0,0xef,0xaa,0xfb,0x43,0x4d,0x33,0x85,
|
||||||
|
0x45,0xf9,0x02,0x7f,0x50,0x3c,0x9f,0xa8,
|
||||||
|
0x51,0xa3,0x40,0x8f,0x92,0x9d,0x38,0xf5,
|
||||||
|
0xbc,0xb6,0xda,0x21,0x10,0xff,0xf3,0xd2,
|
||||||
|
0xcd,0x0c,0x13,0xec,0x5f,0x97,0x44,0x17,
|
||||||
|
0xc4,0xa7,0x7e,0x3d,0x64,0x5d,0x19,0x73,
|
||||||
|
0x60,0x81,0x4f,0xdc,0x22,0x2a,0x90,0x88,
|
||||||
|
0x46,0xee,0xb8,0x14,0xde,0x5e,0x0b,0xdb,
|
||||||
|
0xe0,0x32,0x3a,0x0a,0x49,0x06,0x24,0x5c,
|
||||||
|
0xc2,0xd3,0xac,0x62,0x91,0x95,0xe4,0x79,
|
||||||
|
0xe7,0xc8,0x37,0x6d,0x8d,0xd5,0x4e,0xa9,
|
||||||
|
0x6c,0x56,0xf4,0xea,0x65,0x7a,0xae,0x08,
|
||||||
|
0xba,0x78,0x25,0x2e,0x1c,0xa6,0xb4,0xc6,
|
||||||
|
0xe8,0xdd,0x74,0x1f,0x4b,0xbd,0x8b,0x8a,
|
||||||
|
0x70,0x3e,0xb5,0x66,0x48,0x03,0xf6,0x0e,
|
||||||
|
0x61,0x35,0x57,0xb9,0x86,0xc1,0x1d,0x9e,
|
||||||
|
0xe1,0xf8,0x98,0x11,0x69,0xd9,0x8e,0x94,
|
||||||
|
0x9b,0x1e,0x87,0xe9,0xce,0x55,0x28,0xdf,
|
||||||
|
0x8c,0xa1,0x89,0x0d,0xbf,0xe6,0x42,0x68,
|
||||||
|
0x41,0x99,0x2d,0x0f,0xb0,0x54,0xbb,0x16)
|
||||||
|
|
||||||
|
InvSbox = (0x52,0x09,0x6a,0xd5,0x30,0x36,0xa5,0x38,
|
||||||
|
0xbf,0x40,0xa3,0x9e,0x81,0xf3,0xd7,0xfb,
|
||||||
|
0x7c,0xe3,0x39,0x82,0x9b,0x2f,0xff,0x87,
|
||||||
|
0x34,0x8e,0x43,0x44,0xc4,0xde,0xe9,0xcb,
|
||||||
|
0x54,0x7b,0x94,0x32,0xa6,0xc2,0x23,0x3d,
|
||||||
|
0xee,0x4c,0x95,0x0b,0x42,0xfa,0xc3,0x4e,
|
||||||
|
0x08,0x2e,0xa1,0x66,0x28,0xd9,0x24,0xb2,
|
||||||
|
0x76,0x5b,0xa2,0x49,0x6d,0x8b,0xd1,0x25,
|
||||||
|
0x72,0xf8,0xf6,0x64,0x86,0x68,0x98,0x16,
|
||||||
|
0xd4,0xa4,0x5c,0xcc,0x5d,0x65,0xb6,0x92,
|
||||||
|
0x6c,0x70,0x48,0x50,0xfd,0xed,0xb9,0xda,
|
||||||
|
0x5e,0x15,0x46,0x57,0xa7,0x8d,0x9d,0x84,
|
||||||
|
0x90,0xd8,0xab,0x00,0x8c,0xbc,0xd3,0x0a,
|
||||||
|
0xf7,0xe4,0x58,0x05,0xb8,0xb3,0x45,0x06,
|
||||||
|
0xd0,0x2c,0x1e,0x8f,0xca,0x3f,0x0f,0x02,
|
||||||
|
0xc1,0xaf,0xbd,0x03,0x01,0x13,0x8a,0x6b,
|
||||||
|
0x3a,0x91,0x11,0x41,0x4f,0x67,0xdc,0xea,
|
||||||
|
0x97,0xf2,0xcf,0xce,0xf0,0xb4,0xe6,0x73,
|
||||||
|
0x96,0xac,0x74,0x22,0xe7,0xad,0x35,0x85,
|
||||||
|
0xe2,0xf9,0x37,0xe8,0x1c,0x75,0xdf,0x6e,
|
||||||
|
0x47,0xf1,0x1a,0x71,0x1d,0x29,0xc5,0x89,
|
||||||
|
0x6f,0xb7,0x62,0x0e,0xaa,0x18,0xbe,0x1b,
|
||||||
|
0xfc,0x56,0x3e,0x4b,0xc6,0xd2,0x79,0x20,
|
||||||
|
0x9a,0xdb,0xc0,0xfe,0x78,0xcd,0x5a,0xf4,
|
||||||
|
0x1f,0xdd,0xa8,0x33,0x88,0x07,0xc7,0x31,
|
||||||
|
0xb1,0x12,0x10,0x59,0x27,0x80,0xec,0x5f,
|
||||||
|
0x60,0x51,0x7f,0xa9,0x19,0xb5,0x4a,0x0d,
|
||||||
|
0x2d,0xe5,0x7a,0x9f,0x93,0xc9,0x9c,0xef,
|
||||||
|
0xa0,0xe0,0x3b,0x4d,0xae,0x2a,0xf5,0xb0,
|
||||||
|
0xc8,0xeb,0xbb,0x3c,0x83,0x53,0x99,0x61,
|
||||||
|
0x17,0x2b,0x04,0x7e,0xba,0x77,0xd6,0x26,
|
||||||
|
0xe1,0x69,0x14,0x63,0x55,0x21,0x0c,0x7d)
|
||||||
|
|
||||||
|
#-------------------------------------
|
||||||
|
""" For each block size (Nb), the ShiftRow operation shifts row i
|
||||||
|
by the amount Ci. Note that row 0 is not shifted.
|
||||||
|
Nb C1 C2 C3
|
||||||
|
------------------- """
|
||||||
|
shiftOffset = { 4 : ( 0, 1, 2, 3),
|
||||||
|
5 : ( 0, 1, 2, 3),
|
||||||
|
6 : ( 0, 1, 2, 3),
|
||||||
|
7 : ( 0, 1, 2, 4),
|
||||||
|
8 : ( 0, 1, 3, 4) }
|
||||||
|
def ShiftRows(algInstance):
|
||||||
|
tmp = [0]*algInstance.Nb # list of size Nb
|
||||||
|
for r in range(1,4): # row 0 reamains unchanged and can be skipped
|
||||||
|
for c in range(algInstance.Nb):
|
||||||
|
tmp[c] = algInstance.state[(c+shiftOffset[algInstance.Nb][r]) % algInstance.Nb][r]
|
||||||
|
for c in range(algInstance.Nb):
|
||||||
|
algInstance.state[c][r] = tmp[c]
|
||||||
|
def InvShiftRows(algInstance):
|
||||||
|
tmp = [0]*algInstance.Nb # list of size Nb
|
||||||
|
for r in range(1,4): # row 0 reamains unchanged and can be skipped
|
||||||
|
for c in range(algInstance.Nb):
|
||||||
|
tmp[c] = algInstance.state[(c+algInstance.Nb-shiftOffset[algInstance.Nb][r]) % algInstance.Nb][r]
|
||||||
|
for c in range(algInstance.Nb):
|
||||||
|
algInstance.state[c][r] = tmp[c]
|
||||||
|
#-------------------------------------
|
||||||
|
def MixColumns(a):
|
||||||
|
Sprime = [0,0,0,0]
|
||||||
|
for j in range(a.Nb): # for each column
|
||||||
|
Sprime[0] = mul(2,a.state[j][0])^mul(3,a.state[j][1])^mul(1,a.state[j][2])^mul(1,a.state[j][3])
|
||||||
|
Sprime[1] = mul(1,a.state[j][0])^mul(2,a.state[j][1])^mul(3,a.state[j][2])^mul(1,a.state[j][3])
|
||||||
|
Sprime[2] = mul(1,a.state[j][0])^mul(1,a.state[j][1])^mul(2,a.state[j][2])^mul(3,a.state[j][3])
|
||||||
|
Sprime[3] = mul(3,a.state[j][0])^mul(1,a.state[j][1])^mul(1,a.state[j][2])^mul(2,a.state[j][3])
|
||||||
|
for i in range(4):
|
||||||
|
a.state[j][i] = Sprime[i]
|
||||||
|
|
||||||
|
def InvMixColumns(a):
|
||||||
|
""" Mix the four bytes of every column in a linear way
|
||||||
|
This is the opposite operation of Mixcolumn """
|
||||||
|
Sprime = [0,0,0,0]
|
||||||
|
for j in range(a.Nb): # for each column
|
||||||
|
Sprime[0] = mul(0x0E,a.state[j][0])^mul(0x0B,a.state[j][1])^mul(0x0D,a.state[j][2])^mul(0x09,a.state[j][3])
|
||||||
|
Sprime[1] = mul(0x09,a.state[j][0])^mul(0x0E,a.state[j][1])^mul(0x0B,a.state[j][2])^mul(0x0D,a.state[j][3])
|
||||||
|
Sprime[2] = mul(0x0D,a.state[j][0])^mul(0x09,a.state[j][1])^mul(0x0E,a.state[j][2])^mul(0x0B,a.state[j][3])
|
||||||
|
Sprime[3] = mul(0x0B,a.state[j][0])^mul(0x0D,a.state[j][1])^mul(0x09,a.state[j][2])^mul(0x0E,a.state[j][3])
|
||||||
|
for i in range(4):
|
||||||
|
a.state[j][i] = Sprime[i]
|
||||||
|
|
||||||
|
#-------------------------------------
|
||||||
|
def mul(a, b):
|
||||||
|
""" Multiply two elements of GF(2^m)
|
||||||
|
needed for MixColumn and InvMixColumn """
|
||||||
|
if (a !=0 and b!=0):
|
||||||
|
return Alogtable[(Logtable[a] + Logtable[b])%255]
|
||||||
|
else:
|
||||||
|
return 0
|
||||||
|
|
||||||
|
Logtable = ( 0, 0, 25, 1, 50, 2, 26, 198, 75, 199, 27, 104, 51, 238, 223, 3,
|
||||||
|
100, 4, 224, 14, 52, 141, 129, 239, 76, 113, 8, 200, 248, 105, 28, 193,
|
||||||
|
125, 194, 29, 181, 249, 185, 39, 106, 77, 228, 166, 114, 154, 201, 9, 120,
|
||||||
|
101, 47, 138, 5, 33, 15, 225, 36, 18, 240, 130, 69, 53, 147, 218, 142,
|
||||||
|
150, 143, 219, 189, 54, 208, 206, 148, 19, 92, 210, 241, 64, 70, 131, 56,
|
||||||
|
102, 221, 253, 48, 191, 6, 139, 98, 179, 37, 226, 152, 34, 136, 145, 16,
|
||||||
|
126, 110, 72, 195, 163, 182, 30, 66, 58, 107, 40, 84, 250, 133, 61, 186,
|
||||||
|
43, 121, 10, 21, 155, 159, 94, 202, 78, 212, 172, 229, 243, 115, 167, 87,
|
||||||
|
175, 88, 168, 80, 244, 234, 214, 116, 79, 174, 233, 213, 231, 230, 173, 232,
|
||||||
|
44, 215, 117, 122, 235, 22, 11, 245, 89, 203, 95, 176, 156, 169, 81, 160,
|
||||||
|
127, 12, 246, 111, 23, 196, 73, 236, 216, 67, 31, 45, 164, 118, 123, 183,
|
||||||
|
204, 187, 62, 90, 251, 96, 177, 134, 59, 82, 161, 108, 170, 85, 41, 157,
|
||||||
|
151, 178, 135, 144, 97, 190, 220, 252, 188, 149, 207, 205, 55, 63, 91, 209,
|
||||||
|
83, 57, 132, 60, 65, 162, 109, 71, 20, 42, 158, 93, 86, 242, 211, 171,
|
||||||
|
68, 17, 146, 217, 35, 32, 46, 137, 180, 124, 184, 38, 119, 153, 227, 165,
|
||||||
|
103, 74, 237, 222, 197, 49, 254, 24, 13, 99, 140, 128, 192, 247, 112, 7)
|
||||||
|
|
||||||
|
Alogtable= ( 1, 3, 5, 15, 17, 51, 85, 255, 26, 46, 114, 150, 161, 248, 19, 53,
|
||||||
|
95, 225, 56, 72, 216, 115, 149, 164, 247, 2, 6, 10, 30, 34, 102, 170,
|
||||||
|
229, 52, 92, 228, 55, 89, 235, 38, 106, 190, 217, 112, 144, 171, 230, 49,
|
||||||
|
83, 245, 4, 12, 20, 60, 68, 204, 79, 209, 104, 184, 211, 110, 178, 205,
|
||||||
|
76, 212, 103, 169, 224, 59, 77, 215, 98, 166, 241, 8, 24, 40, 120, 136,
|
||||||
|
131, 158, 185, 208, 107, 189, 220, 127, 129, 152, 179, 206, 73, 219, 118, 154,
|
||||||
|
181, 196, 87, 249, 16, 48, 80, 240, 11, 29, 39, 105, 187, 214, 97, 163,
|
||||||
|
254, 25, 43, 125, 135, 146, 173, 236, 47, 113, 147, 174, 233, 32, 96, 160,
|
||||||
|
251, 22, 58, 78, 210, 109, 183, 194, 93, 231, 50, 86, 250, 21, 63, 65,
|
||||||
|
195, 94, 226, 61, 71, 201, 64, 192, 91, 237, 44, 116, 156, 191, 218, 117,
|
||||||
|
159, 186, 213, 100, 172, 239, 42, 126, 130, 157, 188, 223, 122, 142, 137, 128,
|
||||||
|
155, 182, 193, 88, 232, 35, 101, 175, 234, 37, 111, 177, 200, 67, 197, 84,
|
||||||
|
252, 31, 33, 99, 165, 244, 7, 9, 27, 45, 119, 153, 176, 203, 70, 202,
|
||||||
|
69, 207, 74, 222, 121, 139, 134, 145, 168, 227, 62, 66, 198, 81, 243, 14,
|
||||||
|
18, 54, 90, 238, 41, 123, 141, 140, 143, 138, 133, 148, 167, 242, 13, 23,
|
||||||
|
57, 75, 221, 124, 132, 151, 162, 253, 28, 36, 108, 180, 199, 82, 246, 1)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
AES Encryption Algorithm
|
||||||
|
The AES algorithm is just Rijndael algorithm restricted to the default
|
||||||
|
blockSize of 128 bits.
|
||||||
|
"""
|
||||||
|
|
||||||
|
class AES(Rijndael):
|
||||||
|
""" The AES algorithm is the Rijndael block cipher restricted to block
|
||||||
|
sizes of 128 bits and key sizes of 128, 192 or 256 bits
|
||||||
|
"""
|
||||||
|
def __init__(self, key = None, padding = padWithPadLen(), keySize=16):
|
||||||
|
""" Initialize AES, keySize is in bytes """
|
||||||
|
if not (keySize == 16 or keySize == 24 or keySize == 32) :
|
||||||
|
raise BadKeySizeError, 'Illegal AES key size, must be 16, 24, or 32 bytes'
|
||||||
|
|
||||||
|
Rijndael.__init__( self, key, padding=padding, keySize=keySize, blockSize=16 )
|
||||||
|
|
||||||
|
self.name = 'AES'
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
CBC mode of encryption for block ciphers.
|
||||||
|
This algorithm mode wraps any BlockCipher to make a
|
||||||
|
Cipher Block Chaining mode.
|
||||||
|
"""
|
||||||
|
from random import Random # should change to crypto.random!!!
|
||||||
|
|
||||||
|
|
||||||
|
class CBC(BlockCipher):
|
||||||
|
""" The CBC class wraps block ciphers to make cipher block chaining (CBC) mode
|
||||||
|
algorithms. The initialization (IV) is automatic if set to None. Padding
|
||||||
|
is also automatic based on the Pad class used to initialize the algorithm
|
||||||
|
"""
|
||||||
|
def __init__(self, blockCipherInstance, padding = padWithPadLen()):
|
||||||
|
""" CBC algorithms are created by initializing with a BlockCipher instance """
|
||||||
|
self.baseCipher = blockCipherInstance
|
||||||
|
self.name = self.baseCipher.name + '_CBC'
|
||||||
|
self.blockSize = self.baseCipher.blockSize
|
||||||
|
self.keySize = self.baseCipher.keySize
|
||||||
|
self.padding = padding
|
||||||
|
self.baseCipher.padding = noPadding() # baseCipher should NOT pad!!
|
||||||
|
self.r = Random() # for IV generation, currently uses
|
||||||
|
# mediocre standard distro version <----------------
|
||||||
|
import time
|
||||||
|
newSeed = time.ctime()+str(self.r) # seed with instance location
|
||||||
|
self.r.seed(newSeed) # to make unique
|
||||||
|
self.reset()
|
||||||
|
|
||||||
|
def setKey(self, key):
|
||||||
|
self.baseCipher.setKey(key)
|
||||||
|
|
||||||
|
# Overload to reset both CBC state and the wrapped baseCipher
|
||||||
|
def resetEncrypt(self):
|
||||||
|
BlockCipher.resetEncrypt(self) # reset CBC encrypt state (super class)
|
||||||
|
self.baseCipher.resetEncrypt() # reset base cipher encrypt state
|
||||||
|
|
||||||
|
def resetDecrypt(self):
|
||||||
|
BlockCipher.resetDecrypt(self) # reset CBC state (super class)
|
||||||
|
self.baseCipher.resetDecrypt() # reset base cipher decrypt state
|
||||||
|
|
||||||
|
def encrypt(self, plainText, iv=None, more=None):
|
||||||
|
""" CBC encryption - overloads baseCipher to allow optional explicit IV
|
||||||
|
when iv=None, iv is auto generated!
|
||||||
|
"""
|
||||||
|
if self.encryptBlockCount == 0:
|
||||||
|
self.iv = iv
|
||||||
|
else:
|
||||||
|
assert(iv==None), 'IV used only on first call to encrypt'
|
||||||
|
|
||||||
|
return BlockCipher.encrypt(self,plainText, more=more)
|
||||||
|
|
||||||
|
def decrypt(self, cipherText, iv=None, more=None):
|
||||||
|
""" CBC decryption - overloads baseCipher to allow optional explicit IV
|
||||||
|
when iv=None, iv is auto generated!
|
||||||
|
"""
|
||||||
|
if self.decryptBlockCount == 0:
|
||||||
|
self.iv = iv
|
||||||
|
else:
|
||||||
|
assert(iv==None), 'IV used only on first call to decrypt'
|
||||||
|
|
||||||
|
return BlockCipher.decrypt(self, cipherText, more=more)
|
||||||
|
|
||||||
|
def encryptBlock(self, plainTextBlock):
|
||||||
|
""" CBC block encryption, IV is set with 'encrypt' """
|
||||||
|
auto_IV = ''
|
||||||
|
if self.encryptBlockCount == 0:
|
||||||
|
if self.iv == None:
|
||||||
|
# generate IV and use
|
||||||
|
self.iv = ''.join([chr(self.r.randrange(256)) for i in range(self.blockSize)])
|
||||||
|
self.prior_encr_CT_block = self.iv
|
||||||
|
auto_IV = self.prior_encr_CT_block # prepend IV if it's automatic
|
||||||
|
else: # application provided IV
|
||||||
|
assert(len(self.iv) == self.blockSize ),'IV must be same length as block'
|
||||||
|
self.prior_encr_CT_block = self.iv
|
||||||
|
""" encrypt the prior CT XORed with the PT """
|
||||||
|
ct = self.baseCipher.encryptBlock( xor(self.prior_encr_CT_block, plainTextBlock) )
|
||||||
|
self.prior_encr_CT_block = ct
|
||||||
|
return auto_IV+ct
|
||||||
|
|
||||||
|
def decryptBlock(self, encryptedBlock):
|
||||||
|
""" Decrypt a single block """
|
||||||
|
|
||||||
|
if self.decryptBlockCount == 0: # first call, process IV
|
||||||
|
if self.iv == None: # auto decrypt IV?
|
||||||
|
self.prior_CT_block = encryptedBlock
|
||||||
|
return ''
|
||||||
|
else:
|
||||||
|
assert(len(self.iv)==self.blockSize),"Bad IV size on CBC decryption"
|
||||||
|
self.prior_CT_block = self.iv
|
||||||
|
|
||||||
|
dct = self.baseCipher.decryptBlock(encryptedBlock)
|
||||||
|
""" XOR the prior decrypted CT with the prior CT """
|
||||||
|
dct_XOR_priorCT = xor( self.prior_CT_block, dct )
|
||||||
|
|
||||||
|
self.prior_CT_block = encryptedBlock
|
||||||
|
|
||||||
|
return dct_XOR_priorCT
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
AES_CBC Encryption Algorithm
|
||||||
|
"""
|
||||||
|
|
||||||
|
class AES_CBC(CBC):
|
||||||
|
""" AES encryption in CBC feedback mode """
|
||||||
|
def __init__(self, key=None, padding=padWithPadLen(), keySize=16):
|
||||||
|
CBC.__init__( self, AES(key, noPadding(), keySize), padding)
|
||||||
|
self.name = 'AES_CBC'
|
||||||
@@ -0,0 +1,290 @@
|
|||||||
|
#! /usr/bin/env python
|
||||||
|
|
||||||
|
import sys, os
|
||||||
|
import hmac
|
||||||
|
from struct import pack
|
||||||
|
import hashlib
|
||||||
|
|
||||||
|
|
||||||
|
# interface to needed routines libalfcrypto
|
||||||
|
def _load_libalfcrypto():
|
||||||
|
import ctypes
|
||||||
|
from ctypes import CDLL, byref, POINTER, c_void_p, c_char_p, c_int, c_long, \
|
||||||
|
Structure, c_ulong, create_string_buffer, addressof, string_at, cast, sizeof
|
||||||
|
|
||||||
|
pointer_size = ctypes.sizeof(ctypes.c_voidp)
|
||||||
|
name_of_lib = None
|
||||||
|
if sys.platform.startswith('darwin'):
|
||||||
|
name_of_lib = 'libalfcrypto.dylib'
|
||||||
|
elif sys.platform.startswith('win'):
|
||||||
|
if pointer_size == 4:
|
||||||
|
name_of_lib = 'alfcrypto.dll'
|
||||||
|
else:
|
||||||
|
name_of_lib = 'alfcrypto64.dll'
|
||||||
|
else:
|
||||||
|
if pointer_size == 4:
|
||||||
|
name_of_lib = 'libalfcrypto32.so'
|
||||||
|
else:
|
||||||
|
name_of_lib = 'libalfcrypto64.so'
|
||||||
|
|
||||||
|
libalfcrypto = sys.path[0] + os.sep + name_of_lib
|
||||||
|
|
||||||
|
if not os.path.isfile(libalfcrypto):
|
||||||
|
raise Exception('libalfcrypto not found')
|
||||||
|
|
||||||
|
libalfcrypto = CDLL(libalfcrypto)
|
||||||
|
|
||||||
|
c_char_pp = POINTER(c_char_p)
|
||||||
|
c_int_p = POINTER(c_int)
|
||||||
|
|
||||||
|
|
||||||
|
def F(restype, name, argtypes):
|
||||||
|
func = getattr(libalfcrypto, name)
|
||||||
|
func.restype = restype
|
||||||
|
func.argtypes = argtypes
|
||||||
|
return func
|
||||||
|
|
||||||
|
# aes cbc decryption
|
||||||
|
#
|
||||||
|
# struct aes_key_st {
|
||||||
|
# unsigned long rd_key[4 *(AES_MAXNR + 1)];
|
||||||
|
# int rounds;
|
||||||
|
# };
|
||||||
|
#
|
||||||
|
# typedef struct aes_key_st AES_KEY;
|
||||||
|
#
|
||||||
|
# int AES_set_decrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key);
|
||||||
|
#
|
||||||
|
#
|
||||||
|
# void AES_cbc_encrypt(const unsigned char *in, unsigned char *out,
|
||||||
|
# const unsigned long length, const AES_KEY *key,
|
||||||
|
# unsigned char *ivec, const int enc);
|
||||||
|
|
||||||
|
AES_MAXNR = 14
|
||||||
|
|
||||||
|
class AES_KEY(Structure):
|
||||||
|
_fields_ = [('rd_key', c_long * (4 * (AES_MAXNR + 1))), ('rounds', c_int)]
|
||||||
|
|
||||||
|
AES_KEY_p = POINTER(AES_KEY)
|
||||||
|
AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',[c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p, c_int])
|
||||||
|
AES_set_decrypt_key = F(c_int, 'AES_set_decrypt_key',[c_char_p, c_int, AES_KEY_p])
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Pukall 1 Cipher
|
||||||
|
# unsigned char *PC1(const unsigned char *key, unsigned int klen, const unsigned char *src,
|
||||||
|
# unsigned char *dest, unsigned int len, int decryption);
|
||||||
|
|
||||||
|
PC1 = F(c_char_p, 'PC1', [c_char_p, c_ulong, c_char_p, c_char_p, c_ulong, c_ulong])
|
||||||
|
|
||||||
|
# Topaz Encryption
|
||||||
|
# typedef struct _TpzCtx {
|
||||||
|
# unsigned int v[2];
|
||||||
|
# } TpzCtx;
|
||||||
|
#
|
||||||
|
# void topazCryptoInit(TpzCtx *ctx, const unsigned char *key, int klen);
|
||||||
|
# void topazCryptoDecrypt(const TpzCtx *ctx, const unsigned char *in, unsigned char *out, int len);
|
||||||
|
|
||||||
|
class TPZ_CTX(Structure):
|
||||||
|
_fields_ = [('v', c_long * 2)]
|
||||||
|
|
||||||
|
TPZ_CTX_p = POINTER(TPZ_CTX)
|
||||||
|
topazCryptoInit = F(None, 'topazCryptoInit', [TPZ_CTX_p, c_char_p, c_ulong])
|
||||||
|
topazCryptoDecrypt = F(None, 'topazCryptoDecrypt', [TPZ_CTX_p, c_char_p, c_char_p, c_ulong])
|
||||||
|
|
||||||
|
|
||||||
|
class AES_CBC(object):
|
||||||
|
def __init__(self):
|
||||||
|
self._blocksize = 0
|
||||||
|
self._keyctx = None
|
||||||
|
self._iv = 0
|
||||||
|
|
||||||
|
def set_decrypt_key(self, userkey, iv):
|
||||||
|
self._blocksize = len(userkey)
|
||||||
|
if (self._blocksize != 16) and (self._blocksize != 24) and (self._blocksize != 32) :
|
||||||
|
raise Exception('AES CBC improper key used')
|
||||||
|
return
|
||||||
|
keyctx = self._keyctx = AES_KEY()
|
||||||
|
self._iv = iv
|
||||||
|
rv = AES_set_decrypt_key(userkey, len(userkey) * 8, keyctx)
|
||||||
|
if rv < 0:
|
||||||
|
raise Exception('Failed to initialize AES CBC key')
|
||||||
|
|
||||||
|
def decrypt(self, data):
|
||||||
|
out = create_string_buffer(len(data))
|
||||||
|
mutable_iv = create_string_buffer(self._iv, len(self._iv))
|
||||||
|
rv = AES_cbc_encrypt(data, out, len(data), self._keyctx, mutable_iv, 0)
|
||||||
|
if rv == 0:
|
||||||
|
raise Exception('AES CBC decryption failed')
|
||||||
|
return out.raw
|
||||||
|
|
||||||
|
class Pukall_Cipher(object):
|
||||||
|
def __init__(self):
|
||||||
|
self.key = None
|
||||||
|
|
||||||
|
def PC1(self, key, src, decryption=True):
|
||||||
|
self.key = key
|
||||||
|
out = create_string_buffer(len(src))
|
||||||
|
de = 0
|
||||||
|
if decryption:
|
||||||
|
de = 1
|
||||||
|
rv = PC1(key, len(key), src, out, len(src), de)
|
||||||
|
return out.raw
|
||||||
|
|
||||||
|
class Topaz_Cipher(object):
|
||||||
|
def __init__(self):
|
||||||
|
self._ctx = None
|
||||||
|
|
||||||
|
def ctx_init(self, key):
|
||||||
|
tpz_ctx = self._ctx = TPZ_CTX()
|
||||||
|
topazCryptoInit(tpz_ctx, key, len(key))
|
||||||
|
return tpz_ctx
|
||||||
|
|
||||||
|
def decrypt(self, data, ctx=None):
|
||||||
|
if ctx == None:
|
||||||
|
ctx = self._ctx
|
||||||
|
out = create_string_buffer(len(data))
|
||||||
|
topazCryptoDecrypt(ctx, data, out, len(data))
|
||||||
|
return out.raw
|
||||||
|
|
||||||
|
print "Using Library AlfCrypto DLL/DYLIB/SO"
|
||||||
|
return (AES_CBC, Pukall_Cipher, Topaz_Cipher)
|
||||||
|
|
||||||
|
|
||||||
|
def _load_python_alfcrypto():
|
||||||
|
|
||||||
|
import aescbc
|
||||||
|
|
||||||
|
class Pukall_Cipher(object):
|
||||||
|
def __init__(self):
|
||||||
|
self.key = None
|
||||||
|
|
||||||
|
def PC1(self, key, src, decryption=True):
|
||||||
|
sum1 = 0;
|
||||||
|
sum2 = 0;
|
||||||
|
keyXorVal = 0;
|
||||||
|
if len(key)!=16:
|
||||||
|
print "Bad key length!"
|
||||||
|
return None
|
||||||
|
wkey = []
|
||||||
|
for i in xrange(8):
|
||||||
|
wkey.append(ord(key[i*2])<<8 | ord(key[i*2+1]))
|
||||||
|
dst = ""
|
||||||
|
for i in xrange(len(src)):
|
||||||
|
temp1 = 0;
|
||||||
|
byteXorVal = 0;
|
||||||
|
for j in xrange(8):
|
||||||
|
temp1 ^= wkey[j]
|
||||||
|
sum2 = (sum2+j)*20021 + sum1
|
||||||
|
sum1 = (temp1*346)&0xFFFF
|
||||||
|
sum2 = (sum2+sum1)&0xFFFF
|
||||||
|
temp1 = (temp1*20021+1)&0xFFFF
|
||||||
|
byteXorVal ^= temp1 ^ sum2
|
||||||
|
curByte = ord(src[i])
|
||||||
|
if not decryption:
|
||||||
|
keyXorVal = curByte * 257;
|
||||||
|
curByte = ((curByte ^ (byteXorVal >> 8)) ^ byteXorVal) & 0xFF
|
||||||
|
if decryption:
|
||||||
|
keyXorVal = curByte * 257;
|
||||||
|
for j in xrange(8):
|
||||||
|
wkey[j] ^= keyXorVal;
|
||||||
|
dst+=chr(curByte)
|
||||||
|
return dst
|
||||||
|
|
||||||
|
class Topaz_Cipher(object):
|
||||||
|
def __init__(self):
|
||||||
|
self._ctx = None
|
||||||
|
|
||||||
|
def ctx_init(self, key):
|
||||||
|
ctx1 = 0x0CAFFE19E
|
||||||
|
for keyChar in key:
|
||||||
|
keyByte = ord(keyChar)
|
||||||
|
ctx2 = ctx1
|
||||||
|
ctx1 = ((((ctx1 >>2) * (ctx1 >>7))&0xFFFFFFFF) ^ (keyByte * keyByte * 0x0F902007)& 0xFFFFFFFF )
|
||||||
|
self._ctx = [ctx1, ctx2]
|
||||||
|
return [ctx1,ctx2]
|
||||||
|
|
||||||
|
def decrypt(self, data, ctx=None):
|
||||||
|
if ctx == None:
|
||||||
|
ctx = self._ctx
|
||||||
|
ctx1 = ctx[0]
|
||||||
|
ctx2 = ctx[1]
|
||||||
|
plainText = ""
|
||||||
|
for dataChar in data:
|
||||||
|
dataByte = ord(dataChar)
|
||||||
|
m = (dataByte ^ ((ctx1 >> 3) &0xFF) ^ ((ctx2<<3) & 0xFF)) &0xFF
|
||||||
|
ctx2 = ctx1
|
||||||
|
ctx1 = (((ctx1 >> 2) * (ctx1 >> 7)) &0xFFFFFFFF) ^((m * m * 0x0F902007) &0xFFFFFFFF)
|
||||||
|
plainText += chr(m)
|
||||||
|
return plainText
|
||||||
|
|
||||||
|
class AES_CBC(object):
|
||||||
|
def __init__(self):
|
||||||
|
self._key = None
|
||||||
|
self._iv = None
|
||||||
|
self.aes = None
|
||||||
|
|
||||||
|
def set_decrypt_key(self, userkey, iv):
|
||||||
|
self._key = userkey
|
||||||
|
self._iv = iv
|
||||||
|
self.aes = aescbc.AES_CBC(userkey, aescbc.noPadding(), len(userkey))
|
||||||
|
|
||||||
|
def decrypt(self, data):
|
||||||
|
iv = self._iv
|
||||||
|
cleartext = self.aes.decrypt(iv + data)
|
||||||
|
return cleartext
|
||||||
|
|
||||||
|
return (AES_CBC, Pukall_Cipher, Topaz_Cipher)
|
||||||
|
|
||||||
|
|
||||||
|
def _load_crypto():
|
||||||
|
AES_CBC = Pukall_Cipher = Topaz_Cipher = None
|
||||||
|
cryptolist = (_load_libalfcrypto, _load_python_alfcrypto)
|
||||||
|
for loader in cryptolist:
|
||||||
|
try:
|
||||||
|
AES_CBC, Pukall_Cipher, Topaz_Cipher = loader()
|
||||||
|
break
|
||||||
|
except (ImportError, Exception):
|
||||||
|
pass
|
||||||
|
return AES_CBC, Pukall_Cipher, Topaz_Cipher
|
||||||
|
|
||||||
|
AES_CBC, Pukall_Cipher, Topaz_Cipher = _load_crypto()
|
||||||
|
|
||||||
|
|
||||||
|
class KeyIVGen(object):
|
||||||
|
# this only exists in openssl so we will use pure python implementation instead
|
||||||
|
# PKCS5_PBKDF2_HMAC_SHA1 = F(c_int, 'PKCS5_PBKDF2_HMAC_SHA1',
|
||||||
|
# [c_char_p, c_ulong, c_char_p, c_ulong, c_ulong, c_ulong, c_char_p])
|
||||||
|
def pbkdf2(self, passwd, salt, iter, keylen):
|
||||||
|
|
||||||
|
def xorstr( a, b ):
|
||||||
|
if len(a) != len(b):
|
||||||
|
raise Exception("xorstr(): lengths differ")
|
||||||
|
return ''.join((chr(ord(x)^ord(y)) for x, y in zip(a, b)))
|
||||||
|
|
||||||
|
def prf( h, data ):
|
||||||
|
hm = h.copy()
|
||||||
|
hm.update( data )
|
||||||
|
return hm.digest()
|
||||||
|
|
||||||
|
def pbkdf2_F( h, salt, itercount, blocknum ):
|
||||||
|
U = prf( h, salt + pack('>i',blocknum ) )
|
||||||
|
T = U
|
||||||
|
for i in range(2, itercount+1):
|
||||||
|
U = prf( h, U )
|
||||||
|
T = xorstr( T, U )
|
||||||
|
return T
|
||||||
|
|
||||||
|
sha = hashlib.sha1
|
||||||
|
digest_size = sha().digest_size
|
||||||
|
# l - number of output blocks to produce
|
||||||
|
l = keylen / digest_size
|
||||||
|
if keylen % digest_size != 0:
|
||||||
|
l += 1
|
||||||
|
h = hmac.new( passwd, None, sha )
|
||||||
|
T = ""
|
||||||
|
for i in range(1, l+1):
|
||||||
|
T += pbkdf2_F( h, salt, iter, i )
|
||||||
|
return T[0: keylen]
|
||||||
|
|
||||||
|
|
||||||
Binary file not shown.
@@ -20,6 +20,8 @@ import getopt
|
|||||||
from struct import pack
|
from struct import pack
|
||||||
from struct import unpack
|
from struct import unpack
|
||||||
|
|
||||||
|
class TpzDRMError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
# Get a 7 bit encoded number from string. The most
|
# Get a 7 bit encoded number from string. The most
|
||||||
# significant byte comes first and has the high bit (8th) set
|
# significant byte comes first and has the high bit (8th) set
|
||||||
@@ -138,7 +140,8 @@ class Dictionary(object):
|
|||||||
return self.stable[self.pos]
|
return self.stable[self.pos]
|
||||||
else:
|
else:
|
||||||
print "Error - %d outside of string table limits" % val
|
print "Error - %d outside of string table limits" % val
|
||||||
sys.exit(-1)
|
raise TpzDRMError('outside of string table limits')
|
||||||
|
# sys.exit(-1)
|
||||||
|
|
||||||
def getSize(self):
|
def getSize(self):
|
||||||
return self.size
|
return self.size
|
||||||
@@ -243,6 +246,7 @@ class PageParser(object):
|
|||||||
'region.y' : (1, 'scalar_number', 0, 0),
|
'region.y' : (1, 'scalar_number', 0, 0),
|
||||||
'region.h' : (1, 'scalar_number', 0, 0),
|
'region.h' : (1, 'scalar_number', 0, 0),
|
||||||
'region.w' : (1, 'scalar_number', 0, 0),
|
'region.w' : (1, 'scalar_number', 0, 0),
|
||||||
|
'region.orientation' : (1, 'scalar_number', 0, 0),
|
||||||
|
|
||||||
'empty_text_region' : (1, 'snippets', 1, 0),
|
'empty_text_region' : (1, 'snippets', 1, 0),
|
||||||
|
|
||||||
@@ -258,6 +262,13 @@ class PageParser(object):
|
|||||||
'paragraph.class' : (1, 'scalar_text', 0, 0),
|
'paragraph.class' : (1, 'scalar_text', 0, 0),
|
||||||
'paragraph.firstWord' : (1, 'scalar_number', 0, 0),
|
'paragraph.firstWord' : (1, 'scalar_number', 0, 0),
|
||||||
'paragraph.lastWord' : (1, 'scalar_number', 0, 0),
|
'paragraph.lastWord' : (1, 'scalar_number', 0, 0),
|
||||||
|
'paragraph.lastWord' : (1, 'scalar_number', 0, 0),
|
||||||
|
'paragraph.gridSize' : (1, 'scalar_number', 0, 0),
|
||||||
|
'paragraph.gridBottomCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'paragraph.gridTopCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'paragraph.gridBeginCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'paragraph.gridEndCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
|
||||||
|
|
||||||
'word_semantic' : (1, 'snippets', 1, 1),
|
'word_semantic' : (1, 'snippets', 1, 1),
|
||||||
'word_semantic.type' : (1, 'scalar_text', 0, 0),
|
'word_semantic.type' : (1, 'scalar_text', 0, 0),
|
||||||
@@ -272,11 +283,21 @@ class PageParser(object):
|
|||||||
|
|
||||||
'_span' : (1, 'snippets', 1, 0),
|
'_span' : (1, 'snippets', 1, 0),
|
||||||
'_span.firstWord' : (1, 'scalar_number', 0, 0),
|
'_span.firstWord' : (1, 'scalar_number', 0, 0),
|
||||||
'-span.lastWord' : (1, 'scalar_number', 0, 0),
|
'_span.lastWord' : (1, 'scalar_number', 0, 0),
|
||||||
|
'_span.gridSize' : (1, 'scalar_number', 0, 0),
|
||||||
|
'_span.gridBottomCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'_span.gridTopCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'_span.gridBeginCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'_span.gridEndCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
|
||||||
'span' : (1, 'snippets', 1, 0),
|
'span' : (1, 'snippets', 1, 0),
|
||||||
'span.firstWord' : (1, 'scalar_number', 0, 0),
|
'span.firstWord' : (1, 'scalar_number', 0, 0),
|
||||||
'span.lastWord' : (1, 'scalar_number', 0, 0),
|
'span.lastWord' : (1, 'scalar_number', 0, 0),
|
||||||
|
'span.gridSize' : (1, 'scalar_number', 0, 0),
|
||||||
|
'span.gridBottomCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'span.gridTopCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'span.gridBeginCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'span.gridEndCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
|
||||||
'extratokens' : (1, 'snippets', 1, 0),
|
'extratokens' : (1, 'snippets', 1, 0),
|
||||||
'extratokens.type' : (1, 'scalar_text', 0, 0),
|
'extratokens.type' : (1, 'scalar_text', 0, 0),
|
||||||
@@ -595,28 +616,30 @@ class PageParser(object):
|
|||||||
nodename = fullpathname.pop()
|
nodename = fullpathname.pop()
|
||||||
ilvl = len(fullpathname)
|
ilvl = len(fullpathname)
|
||||||
indent = ' ' * (3 * ilvl)
|
indent = ' ' * (3 * ilvl)
|
||||||
result = indent + '<' + nodename + '>'
|
rlst = []
|
||||||
|
rlst.append(indent + '<' + nodename + '>')
|
||||||
if len(argList) > 0:
|
if len(argList) > 0:
|
||||||
argres = ''
|
alst = []
|
||||||
for j in argList:
|
for j in argList:
|
||||||
if (argtype == 'text') or (argtype == 'scalar_text') :
|
if (argtype == 'text') or (argtype == 'scalar_text') :
|
||||||
argres += j + '|'
|
alst.append(j + '|')
|
||||||
else :
|
else :
|
||||||
argres += str(j) + ','
|
alst.append(str(j) + ',')
|
||||||
|
argres = "".join(alst)
|
||||||
argres = argres[0:-1]
|
argres = argres[0:-1]
|
||||||
if argtype == 'snippets' :
|
if argtype == 'snippets' :
|
||||||
result += 'snippets:' + argres
|
rlst.append('snippets:' + argres)
|
||||||
else :
|
else :
|
||||||
result += argres
|
rlst.append(argres)
|
||||||
if len(subtagList) > 0 :
|
if len(subtagList) > 0 :
|
||||||
result += '\n'
|
rlst.append('\n')
|
||||||
for j in subtagList:
|
for j in subtagList:
|
||||||
if len(j) > 0 :
|
if len(j) > 0 :
|
||||||
result += self.formatTag(j)
|
rlst.append(self.formatTag(j))
|
||||||
result += indent + '</' + nodename + '>\n'
|
rlst.append(indent + '</' + nodename + '>\n')
|
||||||
else:
|
else:
|
||||||
result += '</' + nodename + '>\n'
|
rlst.append('</' + nodename + '>\n')
|
||||||
return result
|
return "".join(rlst)
|
||||||
|
|
||||||
|
|
||||||
# flatten tag
|
# flatten tag
|
||||||
@@ -625,35 +648,38 @@ class PageParser(object):
|
|||||||
subtagList = node[1]
|
subtagList = node[1]
|
||||||
argtype = node[2]
|
argtype = node[2]
|
||||||
argList = node[3]
|
argList = node[3]
|
||||||
result = name
|
rlst = []
|
||||||
|
rlst.append(name)
|
||||||
if (len(argList) > 0):
|
if (len(argList) > 0):
|
||||||
argres = ''
|
alst = []
|
||||||
for j in argList:
|
for j in argList:
|
||||||
if (argtype == 'text') or (argtype == 'scalar_text') :
|
if (argtype == 'text') or (argtype == 'scalar_text') :
|
||||||
argres += j + '|'
|
alst.append(j + '|')
|
||||||
else :
|
else :
|
||||||
argres += str(j) + '|'
|
alst.append(str(j) + '|')
|
||||||
|
argres = "".join(alst)
|
||||||
argres = argres[0:-1]
|
argres = argres[0:-1]
|
||||||
if argtype == 'snippets' :
|
if argtype == 'snippets' :
|
||||||
result += '.snippets=' + argres
|
rlst.append('.snippets=' + argres)
|
||||||
else :
|
else :
|
||||||
result += '=' + argres
|
rlst.append('=' + argres)
|
||||||
result += '\n'
|
rlst.append('\n')
|
||||||
for j in subtagList:
|
for j in subtagList:
|
||||||
if len(j) > 0 :
|
if len(j) > 0 :
|
||||||
result += self.flattenTag(j)
|
rlst.append(self.flattenTag(j))
|
||||||
return result
|
return "".join(rlst)
|
||||||
|
|
||||||
|
|
||||||
# reduce create xml output
|
# reduce create xml output
|
||||||
def formatDoc(self, flat_xml):
|
def formatDoc(self, flat_xml):
|
||||||
result = ''
|
rlst = []
|
||||||
for j in self.doc :
|
for j in self.doc :
|
||||||
if len(j) > 0:
|
if len(j) > 0:
|
||||||
if flat_xml:
|
if flat_xml:
|
||||||
result += self.flattenTag(j)
|
rlst.append(self.flattenTag(j))
|
||||||
else:
|
else:
|
||||||
result += self.formatTag(j)
|
rlst.append(self.formatTag(j))
|
||||||
|
result = "".join(rlst)
|
||||||
if self.debug : print result
|
if self.debug : print result
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
{\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf350
|
{\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360
|
||||||
{\fonttbl}
|
{\fonttbl}
|
||||||
{\colortbl;\red255\green255\blue255;}
|
{\colortbl;\red255\green255\blue255;}
|
||||||
}
|
}
|
||||||
Binary file not shown.
|
Before Width: | Height: | Size: 362 B After Width: | Height: | Size: 362 B |
@@ -43,4 +43,3 @@ def cli_main(argv=sys.argv):
|
|||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
sys.exit(cli_main())
|
sys.exit(cli_main())
|
||||||
|
|
||||||
@@ -59,8 +59,11 @@
|
|||||||
# 0.18 - on Windows try PyCrypto first and OpenSSL next
|
# 0.18 - on Windows try PyCrypto first and OpenSSL next
|
||||||
# 0.19 - Modify the interface to allow use of import
|
# 0.19 - Modify the interface to allow use of import
|
||||||
# 0.20 - modify to allow use inside new interface for calibre plugins
|
# 0.20 - modify to allow use inside new interface for calibre plugins
|
||||||
|
# 0.21 - Support eReader (drm) version 11.
|
||||||
|
# - Don't reject dictionary format.
|
||||||
|
# - Ignore sidebars for dictionaries (different format?)
|
||||||
|
|
||||||
__version__='0.20'
|
__version__='0.21'
|
||||||
|
|
||||||
class Unbuffered:
|
class Unbuffered:
|
||||||
def __init__(self, stream):
|
def __init__(self, stream):
|
||||||
@@ -140,11 +143,17 @@ logging.basicConfig()
|
|||||||
|
|
||||||
|
|
||||||
class Sectionizer(object):
|
class Sectionizer(object):
|
||||||
|
bkType = "Book"
|
||||||
|
|
||||||
def __init__(self, filename, ident):
|
def __init__(self, filename, ident):
|
||||||
self.contents = file(filename, 'rb').read()
|
self.contents = file(filename, 'rb').read()
|
||||||
self.header = self.contents[0:72]
|
self.header = self.contents[0:72]
|
||||||
self.num_sections, = struct.unpack('>H', self.contents[76:78])
|
self.num_sections, = struct.unpack('>H', self.contents[76:78])
|
||||||
|
# Dictionary or normal content (TODO: Not hard-coded)
|
||||||
if self.header[0x3C:0x3C+8] != ident:
|
if self.header[0x3C:0x3C+8] != ident:
|
||||||
|
if self.header[0x3C:0x3C+8] == "PDctPPrs":
|
||||||
|
self.bkType = "Dict"
|
||||||
|
else:
|
||||||
raise ValueError('Invalid file format')
|
raise ValueError('Invalid file format')
|
||||||
self.sections = []
|
self.sections = []
|
||||||
for i in xrange(self.num_sections):
|
for i in xrange(self.num_sections):
|
||||||
@@ -182,15 +191,15 @@ def deXOR(text, sp, table):
|
|||||||
return r
|
return r
|
||||||
|
|
||||||
class EreaderProcessor(object):
|
class EreaderProcessor(object):
|
||||||
def __init__(self, section_reader, username, creditcard):
|
def __init__(self, sect, username, creditcard):
|
||||||
self.section_reader = section_reader
|
self.section_reader = sect.loadSection
|
||||||
data = section_reader(0)
|
data = self.section_reader(0)
|
||||||
version, = struct.unpack('>H', data[0:2])
|
version, = struct.unpack('>H', data[0:2])
|
||||||
self.version = version
|
self.version = version
|
||||||
logging.info('eReader file format version %s', version)
|
logging.info('eReader file format version %s', version)
|
||||||
if version != 272 and version != 260 and version != 259:
|
if version != 272 and version != 260 and version != 259:
|
||||||
raise ValueError('incorrect eReader version %d (error 1)' % version)
|
raise ValueError('incorrect eReader version %d (error 1)' % version)
|
||||||
data = section_reader(1)
|
data = self.section_reader(1)
|
||||||
self.data = data
|
self.data = data
|
||||||
des = Des(fixKey(data[0:8]))
|
des = Des(fixKey(data[0:8]))
|
||||||
cookie_shuf, cookie_size = struct.unpack('>LL', des.decrypt(data[-8:]))
|
cookie_shuf, cookie_size = struct.unpack('>LL', des.decrypt(data[-8:]))
|
||||||
@@ -219,9 +228,15 @@ class EreaderProcessor(object):
|
|||||||
self.num_text_pages = struct.unpack('>H', r[2:4])[0] - 1
|
self.num_text_pages = struct.unpack('>H', r[2:4])[0] - 1
|
||||||
self.num_image_pages = struct.unpack('>H', r[26:26+2])[0]
|
self.num_image_pages = struct.unpack('>H', r[26:26+2])[0]
|
||||||
self.first_image_page = struct.unpack('>H', r[24:24+2])[0]
|
self.first_image_page = struct.unpack('>H', r[24:24+2])[0]
|
||||||
|
# Default values
|
||||||
|
self.num_footnote_pages = 0
|
||||||
|
self.num_sidebar_pages = 0
|
||||||
|
self.first_footnote_page = -1
|
||||||
|
self.first_sidebar_page = -1
|
||||||
if self.version == 272:
|
if self.version == 272:
|
||||||
self.num_footnote_pages = struct.unpack('>H', r[46:46+2])[0]
|
self.num_footnote_pages = struct.unpack('>H', r[46:46+2])[0]
|
||||||
self.first_footnote_page = struct.unpack('>H', r[44:44+2])[0]
|
self.first_footnote_page = struct.unpack('>H', r[44:44+2])[0]
|
||||||
|
if (sect.bkType == "Book"):
|
||||||
self.num_sidebar_pages = struct.unpack('>H', r[38:38+2])[0]
|
self.num_sidebar_pages = struct.unpack('>H', r[38:38+2])[0]
|
||||||
self.first_sidebar_page = struct.unpack('>H', r[36:36+2])[0]
|
self.first_sidebar_page = struct.unpack('>H', r[36:36+2])[0]
|
||||||
# self.num_bookinfo_pages = struct.unpack('>H', r[34:34+2])[0]
|
# self.num_bookinfo_pages = struct.unpack('>H', r[34:34+2])[0]
|
||||||
@@ -239,10 +254,8 @@ class EreaderProcessor(object):
|
|||||||
self.xortable_size = struct.unpack('>H', r[42:42+2])[0]
|
self.xortable_size = struct.unpack('>H', r[42:42+2])[0]
|
||||||
self.xortable = self.data[self.xortable_offset:self.xortable_offset + self.xortable_size]
|
self.xortable = self.data[self.xortable_offset:self.xortable_offset + self.xortable_size]
|
||||||
else:
|
else:
|
||||||
self.num_footnote_pages = 0
|
# Nothing needs to be done
|
||||||
self.num_sidebar_pages = 0
|
pass
|
||||||
self.first_footnote_page = -1
|
|
||||||
self.first_sidebar_page = -1
|
|
||||||
# self.num_bookinfo_pages = 0
|
# self.num_bookinfo_pages = 0
|
||||||
# self.num_chapter_pages = 0
|
# self.num_chapter_pages = 0
|
||||||
# self.num_link_pages = 0
|
# self.num_link_pages = 0
|
||||||
@@ -267,10 +280,14 @@ class EreaderProcessor(object):
|
|||||||
encrypted_key_sha = r[44:44+20]
|
encrypted_key_sha = r[44:44+20]
|
||||||
encrypted_key = r[64:64+8]
|
encrypted_key = r[64:64+8]
|
||||||
elif version == 260:
|
elif version == 260:
|
||||||
if drm_sub_version != 13:
|
if drm_sub_version != 13 and drm_sub_version != 11:
|
||||||
raise ValueError('incorrect eReader version %d (error 3)' % drm_sub_version)
|
raise ValueError('incorrect eReader version %d (error 3)' % drm_sub_version)
|
||||||
|
if drm_sub_version == 13:
|
||||||
encrypted_key = r[44:44+8]
|
encrypted_key = r[44:44+8]
|
||||||
encrypted_key_sha = r[52:52+20]
|
encrypted_key_sha = r[52:52+20]
|
||||||
|
else:
|
||||||
|
encrypted_key = r[64:64+8]
|
||||||
|
encrypted_key_sha = r[44:44+20]
|
||||||
elif version == 272:
|
elif version == 272:
|
||||||
encrypted_key = r[172:172+8]
|
encrypted_key = r[172:172+8]
|
||||||
encrypted_key_sha = r[56:56+20]
|
encrypted_key_sha = r[56:56+20]
|
||||||
@@ -356,6 +373,12 @@ class EreaderProcessor(object):
|
|||||||
r += fmarker
|
r += fmarker
|
||||||
fnote_ids = fnote_ids[id_len+4:]
|
fnote_ids = fnote_ids[id_len+4:]
|
||||||
|
|
||||||
|
# TODO: Handle dictionary index (?) pages - which are also marked as
|
||||||
|
# sidebar_pages (?). For now dictionary sidebars are ignored
|
||||||
|
# For dictionaries - record 0 is null terminated strings, followed by
|
||||||
|
# blocks of around 62000 bytes and a final block. Not sure of the
|
||||||
|
# encoding
|
||||||
|
|
||||||
# now handle sidebar pages
|
# now handle sidebar pages
|
||||||
if self.num_sidebar_pages > 0:
|
if self.num_sidebar_pages > 0:
|
||||||
r += '\n'
|
r += '\n'
|
||||||
@@ -368,7 +391,7 @@ class EreaderProcessor(object):
|
|||||||
id_len = ord(sbar_ids[2])
|
id_len = ord(sbar_ids[2])
|
||||||
id = sbar_ids[3:3+id_len]
|
id = sbar_ids[3:3+id_len]
|
||||||
smarker = '<sidebar id="%s">\n' % id
|
smarker = '<sidebar id="%s">\n' % id
|
||||||
smarker += zlib.decompress(des.decrypt(self.section_reader(self.first_footnote_page + i)))
|
smarker += zlib.decompress(des.decrypt(self.section_reader(self.first_sidebar_page + i)))
|
||||||
smarker += '\n</sidebar>\n'
|
smarker += '\n</sidebar>\n'
|
||||||
r += smarker
|
r += smarker
|
||||||
sbar_ids = sbar_ids[id_len+4:]
|
sbar_ids = sbar_ids[id_len+4:]
|
||||||
@@ -389,7 +412,7 @@ def convertEreaderToPml(infile, name, cc, outdir):
|
|||||||
bookname = os.path.splitext(os.path.basename(infile))[0]
|
bookname = os.path.splitext(os.path.basename(infile))[0]
|
||||||
print " Decoding File"
|
print " Decoding File"
|
||||||
sect = Sectionizer(infile, 'PNRdPPrs')
|
sect = Sectionizer(infile, 'PNRdPPrs')
|
||||||
er = EreaderProcessor(sect.loadSection, name, cc)
|
er = EreaderProcessor(sect, name, cc)
|
||||||
|
|
||||||
if er.getNumImages() > 0:
|
if er.getNumImages() > 0:
|
||||||
print " Extracting images"
|
print " Extracting images"
|
||||||
@@ -501,4 +524,3 @@ def main(argv=None):
|
|||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
sys.stdout=Unbuffered(sys.stdout)
|
sys.stdout=Unbuffered(sys.stdout)
|
||||||
sys.exit(main())
|
sys.exit(main())
|
||||||
|
|
||||||
|
|||||||
@@ -271,6 +271,9 @@ class DocParser(object):
|
|||||||
|
|
||||||
pclass = self.getClass(pclass)
|
pclass = self.getClass(pclass)
|
||||||
|
|
||||||
|
# if paragraph uses extratokens (extra glyphs) then make it fixed
|
||||||
|
(pos, extraglyphs) = self.findinDoc('paragraph.extratokens',start,end)
|
||||||
|
|
||||||
# build up a description of the paragraph in result and return it
|
# build up a description of the paragraph in result and return it
|
||||||
# first check for the basic - all words paragraph
|
# first check for the basic - all words paragraph
|
||||||
(pos, sfirst) = self.findinDoc('paragraph.firstWord',start,end)
|
(pos, sfirst) = self.findinDoc('paragraph.firstWord',start,end)
|
||||||
@@ -280,6 +283,7 @@ class DocParser(object):
|
|||||||
last = int(slast)
|
last = int(slast)
|
||||||
|
|
||||||
makeImage = (regtype == 'vertical') or (regtype == 'table')
|
makeImage = (regtype == 'vertical') or (regtype == 'table')
|
||||||
|
makeImage = makeImage or (extraglyphs != None)
|
||||||
if self.fixedimage:
|
if self.fixedimage:
|
||||||
makeImage = makeImage or (regtype == 'fixed')
|
makeImage = makeImage or (regtype == 'fixed')
|
||||||
|
|
||||||
@@ -288,6 +292,11 @@ class DocParser(object):
|
|||||||
if self.fixedimage :
|
if self.fixedimage :
|
||||||
makeImage = makeImage or (pclass.find('cl-f-') >= 0)
|
makeImage = makeImage or (pclass.find('cl-f-') >= 0)
|
||||||
|
|
||||||
|
# before creating an image make sure glyph info exists
|
||||||
|
gidList = self.getData('info.glyph.glyphID',0,-1)
|
||||||
|
|
||||||
|
makeImage = makeImage & (len(gidList) > 0)
|
||||||
|
|
||||||
if not makeImage :
|
if not makeImage :
|
||||||
# standard all word paragraph
|
# standard all word paragraph
|
||||||
for wordnum in xrange(first, last):
|
for wordnum in xrange(first, last):
|
||||||
@@ -353,6 +362,8 @@ class DocParser(object):
|
|||||||
|
|
||||||
word_class = ''
|
word_class = ''
|
||||||
|
|
||||||
|
word_semantic_type = ''
|
||||||
|
|
||||||
while (line < end) :
|
while (line < end) :
|
||||||
|
|
||||||
(name, argres) = self.lineinDoc(line)
|
(name, argres) = self.lineinDoc(line)
|
||||||
@@ -512,13 +523,80 @@ class DocParser(object):
|
|||||||
return parares
|
return parares
|
||||||
|
|
||||||
|
|
||||||
|
def buildTOCEntry(self, pdesc) :
|
||||||
|
parares = ''
|
||||||
|
sep =''
|
||||||
|
tocentry = ''
|
||||||
|
handle_links = len(self.link_id) > 0
|
||||||
|
|
||||||
|
lstart = 0
|
||||||
|
|
||||||
|
cnt = len(pdesc)
|
||||||
|
for j in xrange( 0, cnt) :
|
||||||
|
|
||||||
|
(wtype, num) = pdesc[j]
|
||||||
|
|
||||||
|
if wtype == 'ocr' :
|
||||||
|
word = self.ocrtext[num]
|
||||||
|
sep = ' '
|
||||||
|
|
||||||
|
if handle_links:
|
||||||
|
link = self.link_id[num]
|
||||||
|
if (link > 0):
|
||||||
|
linktype = self.link_type[link-1]
|
||||||
|
title = self.link_title[link-1]
|
||||||
|
title = title.rstrip('. ')
|
||||||
|
alt_title = parares[lstart:]
|
||||||
|
alt_title = alt_title.strip()
|
||||||
|
# now strip off the actual printed page number
|
||||||
|
alt_title = alt_title.rstrip('01234567890ivxldIVXLD-.')
|
||||||
|
alt_title = alt_title.rstrip('. ')
|
||||||
|
# skip over any external links - can't have them in a books toc
|
||||||
|
if linktype == 'external' :
|
||||||
|
title = ''
|
||||||
|
alt_title = ''
|
||||||
|
linkpage = ''
|
||||||
|
else :
|
||||||
|
if len(self.link_page) >= link :
|
||||||
|
ptarget = self.link_page[link-1] - 1
|
||||||
|
linkpage = '%04d' % ptarget
|
||||||
|
else :
|
||||||
|
# just link to the current page
|
||||||
|
linkpage = self.id[4:]
|
||||||
|
if len(alt_title) >= len(title):
|
||||||
|
title = alt_title
|
||||||
|
if title != '' and linkpage != '':
|
||||||
|
tocentry += title + '|' + linkpage + '\n'
|
||||||
|
lstart = len(parares)
|
||||||
|
if word == '_link_' : word = ''
|
||||||
|
elif (link < 0) :
|
||||||
|
if word == '_link_' : word = ''
|
||||||
|
|
||||||
|
if word == '_lb_':
|
||||||
|
word = ''
|
||||||
|
sep = ''
|
||||||
|
|
||||||
|
if num in self.dehyphen_rootid :
|
||||||
|
word = word[0:-1]
|
||||||
|
sep = ''
|
||||||
|
|
||||||
|
parares += word + sep
|
||||||
|
|
||||||
|
else :
|
||||||
|
continue
|
||||||
|
|
||||||
|
return tocentry
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# walk the document tree collecting the information needed
|
# walk the document tree collecting the information needed
|
||||||
# to build an html page using the ocrText
|
# to build an html page using the ocrText
|
||||||
|
|
||||||
def process(self):
|
def process(self):
|
||||||
|
|
||||||
htmlpage = ''
|
tocinfo = ''
|
||||||
|
hlst = []
|
||||||
|
|
||||||
# get the ocr text
|
# get the ocr text
|
||||||
(pos, argres) = self.findinDoc('info.word.ocrText',0,-1)
|
(pos, argres) = self.findinDoc('info.word.ocrText',0,-1)
|
||||||
@@ -575,8 +653,8 @@ class DocParser(object):
|
|||||||
|
|
||||||
# set anchor for link target on this page
|
# set anchor for link target on this page
|
||||||
if not anchorSet and not first_para_continued:
|
if not anchorSet and not first_para_continued:
|
||||||
htmlpage += '<div style="visibility: hidden; height: 0; width: 0;" id="'
|
hlst.append('<div style="visibility: hidden; height: 0; width: 0;" id="')
|
||||||
htmlpage += self.id + '" title="pagetype_' + pagetype + '"></div>\n'
|
hlst.append(self.id + '" title="pagetype_' + pagetype + '"></div>\n')
|
||||||
anchorSet = True
|
anchorSet = True
|
||||||
|
|
||||||
# handle groups of graphics with text captions
|
# handle groups of graphics with text captions
|
||||||
@@ -585,12 +663,12 @@ class DocParser(object):
|
|||||||
if grptype != None:
|
if grptype != None:
|
||||||
if grptype == 'graphic':
|
if grptype == 'graphic':
|
||||||
gcstr = ' class="' + grptype + '"'
|
gcstr = ' class="' + grptype + '"'
|
||||||
htmlpage += '<div' + gcstr + '>'
|
hlst.append('<div' + gcstr + '>')
|
||||||
inGroup = True
|
inGroup = True
|
||||||
|
|
||||||
elif (etype == 'grpend'):
|
elif (etype == 'grpend'):
|
||||||
if inGroup:
|
if inGroup:
|
||||||
htmlpage += '</div>\n'
|
hlst.append('</div>\n')
|
||||||
inGroup = False
|
inGroup = False
|
||||||
|
|
||||||
else:
|
else:
|
||||||
@@ -600,25 +678,25 @@ class DocParser(object):
|
|||||||
(pos, simgsrc) = self.findinDoc('img.src',start,end)
|
(pos, simgsrc) = self.findinDoc('img.src',start,end)
|
||||||
if simgsrc:
|
if simgsrc:
|
||||||
if inGroup:
|
if inGroup:
|
||||||
htmlpage += '<img src="img/img%04d.jpg" alt="" />' % int(simgsrc)
|
hlst.append('<img src="img/img%04d.jpg" alt="" />' % int(simgsrc))
|
||||||
else:
|
else:
|
||||||
htmlpage += '<div class="graphic"><img src="img/img%04d.jpg" alt="" /></div>' % int(simgsrc)
|
hlst.append('<div class="graphic"><img src="img/img%04d.jpg" alt="" /></div>' % int(simgsrc))
|
||||||
|
|
||||||
elif regtype == 'chapterheading' :
|
elif regtype == 'chapterheading' :
|
||||||
(pclass, pdesc) = self.getParaDescription(start,end, regtype)
|
(pclass, pdesc) = self.getParaDescription(start,end, regtype)
|
||||||
if not breakSet:
|
if not breakSet:
|
||||||
htmlpage += '<div style="page-break-after: always;"> </div>\n'
|
hlst.append('<div style="page-break-after: always;"> </div>\n')
|
||||||
breakSet = True
|
breakSet = True
|
||||||
tag = 'h1'
|
tag = 'h1'
|
||||||
if pclass and (len(pclass) >= 7):
|
if pclass and (len(pclass) >= 7):
|
||||||
if pclass[3:7] == 'ch1-' : tag = 'h1'
|
if pclass[3:7] == 'ch1-' : tag = 'h1'
|
||||||
if pclass[3:7] == 'ch2-' : tag = 'h2'
|
if pclass[3:7] == 'ch2-' : tag = 'h2'
|
||||||
if pclass[3:7] == 'ch3-' : tag = 'h3'
|
if pclass[3:7] == 'ch3-' : tag = 'h3'
|
||||||
htmlpage += '<' + tag + ' class="' + pclass + '">'
|
hlst.append('<' + tag + ' class="' + pclass + '">')
|
||||||
else:
|
else:
|
||||||
htmlpage += '<' + tag + '>'
|
hlst.append('<' + tag + '>')
|
||||||
htmlpage += self.buildParagraph(pclass, pdesc, 'middle', regtype)
|
hlst.append(self.buildParagraph(pclass, pdesc, 'middle', regtype))
|
||||||
htmlpage += '</' + tag + '>'
|
hlst.append('</' + tag + '>')
|
||||||
|
|
||||||
elif (regtype == 'text') or (regtype == 'fixed') or (regtype == 'insert') or (regtype == 'listitem'):
|
elif (regtype == 'text') or (regtype == 'fixed') or (regtype == 'insert') or (regtype == 'listitem'):
|
||||||
ptype = 'full'
|
ptype = 'full'
|
||||||
@@ -632,11 +710,11 @@ class DocParser(object):
|
|||||||
if pclass[3:6] == 'h1-' : tag = 'h4'
|
if pclass[3:6] == 'h1-' : tag = 'h4'
|
||||||
if pclass[3:6] == 'h2-' : tag = 'h5'
|
if pclass[3:6] == 'h2-' : tag = 'h5'
|
||||||
if pclass[3:6] == 'h3-' : tag = 'h6'
|
if pclass[3:6] == 'h3-' : tag = 'h6'
|
||||||
htmlpage += '<' + tag + ' class="' + pclass + '">'
|
hlst.append('<' + tag + ' class="' + pclass + '">')
|
||||||
htmlpage += self.buildParagraph(pclass, pdesc, 'middle', regtype)
|
hlst.append(self.buildParagraph(pclass, pdesc, 'middle', regtype))
|
||||||
htmlpage += '</' + tag + '>'
|
hlst.append('</' + tag + '>')
|
||||||
else :
|
else :
|
||||||
htmlpage += self.buildParagraph(pclass, pdesc, ptype, regtype)
|
hlst.append(self.buildParagraph(pclass, pdesc, ptype, regtype))
|
||||||
|
|
||||||
elif (regtype == 'tocentry') :
|
elif (regtype == 'tocentry') :
|
||||||
ptype = 'full'
|
ptype = 'full'
|
||||||
@@ -644,8 +722,8 @@ class DocParser(object):
|
|||||||
ptype = 'end'
|
ptype = 'end'
|
||||||
first_para_continued = False
|
first_para_continued = False
|
||||||
(pclass, pdesc) = self.getParaDescription(start,end, regtype)
|
(pclass, pdesc) = self.getParaDescription(start,end, regtype)
|
||||||
htmlpage += self.buildParagraph(pclass, pdesc, ptype, regtype)
|
tocinfo += self.buildTOCEntry(pdesc)
|
||||||
|
hlst.append(self.buildParagraph(pclass, pdesc, ptype, regtype))
|
||||||
|
|
||||||
elif (regtype == 'vertical') or (regtype == 'table') :
|
elif (regtype == 'vertical') or (regtype == 'table') :
|
||||||
ptype = 'full'
|
ptype = 'full'
|
||||||
@@ -655,13 +733,13 @@ class DocParser(object):
|
|||||||
ptype = 'end'
|
ptype = 'end'
|
||||||
first_para_continued = False
|
first_para_continued = False
|
||||||
(pclass, pdesc) = self.getParaDescription(start, end, regtype)
|
(pclass, pdesc) = self.getParaDescription(start, end, regtype)
|
||||||
htmlpage += self.buildParagraph(pclass, pdesc, ptype, regtype)
|
hlst.append(self.buildParagraph(pclass, pdesc, ptype, regtype))
|
||||||
|
|
||||||
|
|
||||||
elif (regtype == 'synth_fcvr.center'):
|
elif (regtype == 'synth_fcvr.center'):
|
||||||
(pos, simgsrc) = self.findinDoc('img.src',start,end)
|
(pos, simgsrc) = self.findinDoc('img.src',start,end)
|
||||||
if simgsrc:
|
if simgsrc:
|
||||||
htmlpage += '<div class="graphic"><img src="img/img%04d.jpg" alt="" /></div>' % int(simgsrc)
|
hlst.append('<div class="graphic"><img src="img/img%04d.jpg" alt="" /></div>' % int(simgsrc))
|
||||||
|
|
||||||
else :
|
else :
|
||||||
print ' Making region type', regtype,
|
print ' Making region type', regtype,
|
||||||
@@ -687,29 +765,29 @@ class DocParser(object):
|
|||||||
if pclass[3:6] == 'h1-' : tag = 'h4'
|
if pclass[3:6] == 'h1-' : tag = 'h4'
|
||||||
if pclass[3:6] == 'h2-' : tag = 'h5'
|
if pclass[3:6] == 'h2-' : tag = 'h5'
|
||||||
if pclass[3:6] == 'h3-' : tag = 'h6'
|
if pclass[3:6] == 'h3-' : tag = 'h6'
|
||||||
htmlpage += '<' + tag + ' class="' + pclass + '">'
|
hlst.append('<' + tag + ' class="' + pclass + '">')
|
||||||
htmlpage += self.buildParagraph(pclass, pdesc, 'middle', regtype)
|
hlst.append(self.buildParagraph(pclass, pdesc, 'middle', regtype))
|
||||||
htmlpage += '</' + tag + '>'
|
hlst.append('</' + tag + '>')
|
||||||
else :
|
else :
|
||||||
htmlpage += self.buildParagraph(pclass, pdesc, ptype, regtype)
|
hlst.append(self.buildParagraph(pclass, pdesc, ptype, regtype))
|
||||||
else :
|
else :
|
||||||
print ' a "graphic" region'
|
print ' a "graphic" region'
|
||||||
(pos, simgsrc) = self.findinDoc('img.src',start,end)
|
(pos, simgsrc) = self.findinDoc('img.src',start,end)
|
||||||
if simgsrc:
|
if simgsrc:
|
||||||
htmlpage += '<div class="graphic"><img src="img/img%04d.jpg" alt="" /></div>' % int(simgsrc)
|
hlst.append('<div class="graphic"><img src="img/img%04d.jpg" alt="" /></div>' % int(simgsrc))
|
||||||
|
|
||||||
|
|
||||||
|
htmlpage = "".join(hlst)
|
||||||
if last_para_continued :
|
if last_para_continued :
|
||||||
if htmlpage[-4:] == '</p>':
|
if htmlpage[-4:] == '</p>':
|
||||||
htmlpage = htmlpage[0:-4]
|
htmlpage = htmlpage[0:-4]
|
||||||
last_para_continued = False
|
last_para_continued = False
|
||||||
|
|
||||||
return htmlpage
|
return htmlpage, tocinfo
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def convert2HTML(flatxml, classlst, fileid, bookDir, gdict, fixedimage):
|
def convert2HTML(flatxml, classlst, fileid, bookDir, gdict, fixedimage):
|
||||||
# create a document parser
|
# create a document parser
|
||||||
dp = DocParser(flatxml, classlst, fileid, bookDir, gdict, fixedimage)
|
dp = DocParser(flatxml, classlst, fileid, bookDir, gdict, fixedimage)
|
||||||
htmlpage = dp.process()
|
htmlpage, tocinfo = dp.process()
|
||||||
return htmlpage
|
return htmlpage, tocinfo
|
||||||
|
|||||||
@@ -10,17 +10,94 @@ from struct import unpack
|
|||||||
|
|
||||||
|
|
||||||
class PParser(object):
|
class PParser(object):
|
||||||
def __init__(self, gd, flatxml):
|
def __init__(self, gd, flatxml, meta_array):
|
||||||
self.gd = gd
|
self.gd = gd
|
||||||
self.flatdoc = flatxml.split('\n')
|
self.flatdoc = flatxml.split('\n')
|
||||||
|
self.docSize = len(self.flatdoc)
|
||||||
self.temp = []
|
self.temp = []
|
||||||
foo = self.getData('page.h') or self.getData('book.h')
|
|
||||||
self.ph = foo[0]
|
self.ph = -1
|
||||||
foo = self.getData('page.w') or self.getData('book.w')
|
self.pw = -1
|
||||||
self.pw = foo[0]
|
startpos = self.posinDoc('page.h') or self.posinDoc('book.h')
|
||||||
self.gx = self.getData('info.glyph.x')
|
for p in startpos:
|
||||||
self.gy = self.getData('info.glyph.y')
|
(name, argres) = self.lineinDoc(p)
|
||||||
self.gid = self.getData('info.glyph.glyphID')
|
self.ph = max(self.ph, int(argres))
|
||||||
|
startpos = self.posinDoc('page.w') or self.posinDoc('book.w')
|
||||||
|
for p in startpos:
|
||||||
|
(name, argres) = self.lineinDoc(p)
|
||||||
|
self.pw = max(self.pw, int(argres))
|
||||||
|
|
||||||
|
if self.ph <= 0:
|
||||||
|
self.ph = int(meta_array.get('pageHeight', '11000'))
|
||||||
|
if self.pw <= 0:
|
||||||
|
self.pw = int(meta_array.get('pageWidth', '8500'))
|
||||||
|
|
||||||
|
res = []
|
||||||
|
startpos = self.posinDoc('info.glyph.x')
|
||||||
|
for p in startpos:
|
||||||
|
argres = self.getDataatPos('info.glyph.x', p)
|
||||||
|
res.extend(argres)
|
||||||
|
self.gx = res
|
||||||
|
|
||||||
|
res = []
|
||||||
|
startpos = self.posinDoc('info.glyph.y')
|
||||||
|
for p in startpos:
|
||||||
|
argres = self.getDataatPos('info.glyph.y', p)
|
||||||
|
res.extend(argres)
|
||||||
|
self.gy = res
|
||||||
|
|
||||||
|
res = []
|
||||||
|
startpos = self.posinDoc('info.glyph.glyphID')
|
||||||
|
for p in startpos:
|
||||||
|
argres = self.getDataatPos('info.glyph.glyphID', p)
|
||||||
|
res.extend(argres)
|
||||||
|
self.gid = res
|
||||||
|
|
||||||
|
|
||||||
|
# return tag at line pos in document
|
||||||
|
def lineinDoc(self, pos) :
|
||||||
|
if (pos >= 0) and (pos < self.docSize) :
|
||||||
|
item = self.flatdoc[pos]
|
||||||
|
if item.find('=') >= 0:
|
||||||
|
(name, argres) = item.split('=',1)
|
||||||
|
else :
|
||||||
|
name = item
|
||||||
|
argres = ''
|
||||||
|
return name, argres
|
||||||
|
|
||||||
|
# find tag in doc if within pos to end inclusive
|
||||||
|
def findinDoc(self, tagpath, pos, end) :
|
||||||
|
result = None
|
||||||
|
if end == -1 :
|
||||||
|
end = self.docSize
|
||||||
|
else:
|
||||||
|
end = min(self.docSize, end)
|
||||||
|
foundat = -1
|
||||||
|
for j in xrange(pos, end):
|
||||||
|
item = self.flatdoc[j]
|
||||||
|
if item.find('=') >= 0:
|
||||||
|
(name, argres) = item.split('=',1)
|
||||||
|
else :
|
||||||
|
name = item
|
||||||
|
argres = ''
|
||||||
|
if name.endswith(tagpath) :
|
||||||
|
result = argres
|
||||||
|
foundat = j
|
||||||
|
break
|
||||||
|
return foundat, result
|
||||||
|
|
||||||
|
# return list of start positions for the tagpath
|
||||||
|
def posinDoc(self, tagpath):
|
||||||
|
startpos = []
|
||||||
|
pos = 0
|
||||||
|
res = ""
|
||||||
|
while res != None :
|
||||||
|
(foundpos, res) = self.findinDoc(tagpath, pos, -1)
|
||||||
|
if res != None :
|
||||||
|
startpos.append(foundpos)
|
||||||
|
pos = foundpos + 1
|
||||||
|
return startpos
|
||||||
|
|
||||||
def getData(self, path):
|
def getData(self, path):
|
||||||
result = None
|
result = None
|
||||||
cnt = len(self.flatdoc)
|
cnt = len(self.flatdoc)
|
||||||
@@ -39,6 +116,23 @@ class PParser(object):
|
|||||||
for j in xrange(0,len(argres)):
|
for j in xrange(0,len(argres)):
|
||||||
argres[j] = int(argres[j])
|
argres[j] = int(argres[j])
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
def getDataatPos(self, path, pos):
|
||||||
|
result = None
|
||||||
|
item = self.flatdoc[pos]
|
||||||
|
if item.find('=') >= 0:
|
||||||
|
(name, argt) = item.split('=')
|
||||||
|
argres = argt.split('|')
|
||||||
|
else:
|
||||||
|
name = item
|
||||||
|
argres = []
|
||||||
|
if (len(argres) > 0) :
|
||||||
|
for j in xrange(0,len(argres)):
|
||||||
|
argres[j] = int(argres[j])
|
||||||
|
if (name.endswith(path)):
|
||||||
|
result = argres
|
||||||
|
return result
|
||||||
|
|
||||||
def getDataTemp(self, path):
|
def getDataTemp(self, path):
|
||||||
result = None
|
result = None
|
||||||
cnt = len(self.temp)
|
cnt = len(self.temp)
|
||||||
@@ -58,6 +152,7 @@ class PParser(object):
|
|||||||
for j in xrange(0,len(argres)):
|
for j in xrange(0,len(argres)):
|
||||||
argres[j] = int(argres[j])
|
argres[j] = int(argres[j])
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def getImages(self):
|
def getImages(self):
|
||||||
result = []
|
result = []
|
||||||
self.temp = self.flatdoc
|
self.temp = self.flatdoc
|
||||||
@@ -69,6 +164,7 @@ class PParser(object):
|
|||||||
src = self.getDataTemp('img.src')[0]
|
src = self.getDataTemp('img.src')[0]
|
||||||
result.append('<image xlink:href="../img/img%04d.jpg" x="%d" y="%d" width="%d" height="%d" />\n' % (src, x, y, w, h))
|
result.append('<image xlink:href="../img/img%04d.jpg" x="%d" y="%d" width="%d" height="%d" />\n' % (src, x, y, w, h))
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def getGlyphs(self):
|
def getGlyphs(self):
|
||||||
result = []
|
result = []
|
||||||
if (self.gid != None) and (len(self.gid) > 0):
|
if (self.gid != None) and (len(self.gid) > 0):
|
||||||
@@ -84,68 +180,70 @@ class PParser(object):
|
|||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def convert2SVG(gdict, flat_xml, counter, numfiles, svgDir, raw, meta_array, scaledpi):
|
def convert2SVG(gdict, flat_xml, pageid, previd, nextid, svgDir, raw, meta_array, scaledpi):
|
||||||
ml = ''
|
mlst = []
|
||||||
pp = PParser(gdict, flat_xml)
|
pp = PParser(gdict, flat_xml, meta_array)
|
||||||
ml += '<?xml version="1.0" standalone="no"?>\n'
|
mlst.append('<?xml version="1.0" standalone="no"?>\n')
|
||||||
if (raw):
|
if (raw):
|
||||||
ml += '<!DOCTYPE svg PUBLIC "-//W3C/DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">\n'
|
mlst.append('<!DOCTYPE svg PUBLIC "-//W3C/DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">\n')
|
||||||
ml += '<svg width="%fin" height="%fin" viewBox="0 0 %d %d" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1">\n' % (pp.pw / scaledpi, pp.ph / scaledpi, pp.pw -1, pp.ph -1)
|
mlst.append('<svg width="%fin" height="%fin" viewBox="0 0 %d %d" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1">\n' % (pp.pw / scaledpi, pp.ph / scaledpi, pp.pw -1, pp.ph -1))
|
||||||
ml += '<title>Page %d - %s by %s</title>\n' % (counter, meta_array['Title'],meta_array['Authors'])
|
mlst.append('<title>Page %d - %s by %s</title>\n' % (pageid, meta_array['Title'],meta_array['Authors']))
|
||||||
else:
|
else:
|
||||||
ml += '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n'
|
mlst.append('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n')
|
||||||
ml += '<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" ><head>\n'
|
mlst.append('<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" ><head>\n')
|
||||||
ml += '<title>Page %d - %s by %s</title>\n' % (counter, meta_array['Title'],meta_array['Authors'])
|
mlst.append('<title>Page %d - %s by %s</title>\n' % (pageid, meta_array['Title'],meta_array['Authors']))
|
||||||
ml += '<script><![CDATA[\n'
|
mlst.append('<script><![CDATA[\n')
|
||||||
ml += 'function gd(){var p=window.location.href.replace(/^.*\?dpi=(\d+).*$/i,"$1");return p;}\n'
|
mlst.append('function gd(){var p=window.location.href.replace(/^.*\?dpi=(\d+).*$/i,"$1");return p;}\n')
|
||||||
ml += 'var dpi=%d;\n' % scaledpi
|
mlst.append('var dpi=%d;\n' % scaledpi)
|
||||||
if (counter) :
|
if (previd) :
|
||||||
ml += 'var prevpage="page%04d.xhtml";\n' % (counter - 1)
|
mlst.append('var prevpage="page%04d.xhtml";\n' % (previd))
|
||||||
if (counter < numfiles-1) :
|
if (nextid) :
|
||||||
ml += 'var nextpage="page%04d.xhtml";\n' % (counter + 1)
|
mlst.append('var nextpage="page%04d.xhtml";\n' % (nextid))
|
||||||
ml += 'var pw=%d;var ph=%d;' % (pp.pw, pp.ph)
|
mlst.append('var pw=%d;var ph=%d;' % (pp.pw, pp.ph))
|
||||||
ml += 'function zoomin(){dpi=dpi*(0.8);setsize();}\n'
|
mlst.append('function zoomin(){dpi=dpi*(0.8);setsize();}\n')
|
||||||
ml += 'function zoomout(){dpi=dpi*1.25;setsize();}\n'
|
mlst.append('function zoomout(){dpi=dpi*1.25;setsize();}\n')
|
||||||
ml += 'function setsize(){var svg=document.getElementById("svgimg");var prev=document.getElementById("prevsvg");var next=document.getElementById("nextsvg");var width=(pw/dpi)+"in";var height=(ph/dpi)+"in";svg.setAttribute("width",width);svg.setAttribute("height",height);prev.setAttribute("height",height);prev.setAttribute("width","50px");next.setAttribute("height",height);next.setAttribute("width","50px");}\n'
|
mlst.append('function setsize(){var svg=document.getElementById("svgimg");var prev=document.getElementById("prevsvg");var next=document.getElementById("nextsvg");var width=(pw/dpi)+"in";var height=(ph/dpi)+"in";svg.setAttribute("width",width);svg.setAttribute("height",height);prev.setAttribute("height",height);prev.setAttribute("width","50px");next.setAttribute("height",height);next.setAttribute("width","50px");}\n')
|
||||||
ml += 'function ppage(){window.location.href=prevpage+"?dpi="+Math.round(dpi);}\n'
|
mlst.append('function ppage(){window.location.href=prevpage+"?dpi="+Math.round(dpi);}\n')
|
||||||
ml += 'function npage(){window.location.href=nextpage+"?dpi="+Math.round(dpi);}\n'
|
mlst.append('function npage(){window.location.href=nextpage+"?dpi="+Math.round(dpi);}\n')
|
||||||
ml += 'var gt=gd();if(gt>0){dpi=gt;}\n'
|
mlst.append('var gt=gd();if(gt>0){dpi=gt;}\n')
|
||||||
ml += 'window.onload=setsize;\n'
|
mlst.append('window.onload=setsize;\n')
|
||||||
ml += ']]></script>\n'
|
mlst.append(']]></script>\n')
|
||||||
ml += '</head>\n'
|
mlst.append('</head>\n')
|
||||||
ml += '<body onLoad="setsize();" style="background-color:#777;text-align:center;">\n'
|
mlst.append('<body onLoad="setsize();" style="background-color:#777;text-align:center;">\n')
|
||||||
ml += '<div style="white-space:nowrap;">\n'
|
mlst.append('<div style="white-space:nowrap;">\n')
|
||||||
if (counter == 0) :
|
if previd == None:
|
||||||
ml += '<a href="javascript:ppage();"><svg id="prevsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"></svg></a>\n'
|
mlst.append('<a href="javascript:ppage();"><svg id="prevsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"></svg></a>\n')
|
||||||
else:
|
else:
|
||||||
ml += '<a href="javascript:ppage();"><svg id="prevsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"><polygon points="5,150,95,5,95,295" fill="#AAAAAA" /></svg></a>\n'
|
mlst.append('<a href="javascript:ppage();"><svg id="prevsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"><polygon points="5,150,95,5,95,295" fill="#AAAAAA" /></svg></a>\n')
|
||||||
ml += '<a href="javascript:npage();"><svg id="svgimg" viewBox="0 0 %d %d" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" style="background-color:#FFF;border:1px solid black;">' % (pp.pw, pp.ph)
|
|
||||||
|
mlst.append('<a href="javascript:npage();"><svg id="svgimg" viewBox="0 0 %d %d" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" style="background-color:#FFF;border:1px solid black;">' % (pp.pw, pp.ph))
|
||||||
if (pp.gid != None):
|
if (pp.gid != None):
|
||||||
ml += '<defs>\n'
|
mlst.append('<defs>\n')
|
||||||
gdefs = pp.getGlyphs()
|
gdefs = pp.getGlyphs()
|
||||||
for j in xrange(0,len(gdefs)):
|
for j in xrange(0,len(gdefs)):
|
||||||
ml += gdefs[j]
|
mlst.append(gdefs[j])
|
||||||
ml += '</defs>\n'
|
mlst.append('</defs>\n')
|
||||||
img = pp.getImages()
|
img = pp.getImages()
|
||||||
if (img != None):
|
if (img != None):
|
||||||
for j in xrange(0,len(img)):
|
for j in xrange(0,len(img)):
|
||||||
ml += img[j]
|
mlst.append(img[j])
|
||||||
if (pp.gid != None):
|
if (pp.gid != None):
|
||||||
for j in xrange(0,len(pp.gid)):
|
for j in xrange(0,len(pp.gid)):
|
||||||
ml += '<use xlink:href="#gl%d" x="%d" y="%d" />\n' % (pp.gid[j], pp.gx[j], pp.gy[j])
|
mlst.append('<use xlink:href="#gl%d" x="%d" y="%d" />\n' % (pp.gid[j], pp.gx[j], pp.gy[j]))
|
||||||
if (img == None or len(img) == 0) and (pp.gid == None or len(pp.gid) == 0):
|
if (img == None or len(img) == 0) and (pp.gid == None or len(pp.gid) == 0):
|
||||||
ml += '<text x="10" y="10" font-family="Helvetica" font-size="100" stroke="black">This page intentionally left blank.</text>\n<text x="10" y="110" font-family="Helvetica" font-size="50" stroke="black">Until this notice unintentionally gave it content. (gensvg.py)</text>\n'
|
xpos = "%d" % (pp.pw // 3)
|
||||||
|
ypos = "%d" % (pp.ph // 3)
|
||||||
|
mlst.append('<text x="' + xpos + '" y="' + ypos + '" font-size="' + meta_array['fontSize'] + '" font-family="Helvetica" stroke="black">This page intentionally left blank.</text>\n')
|
||||||
if (raw) :
|
if (raw) :
|
||||||
ml += '</svg>'
|
mlst.append('</svg>')
|
||||||
else :
|
else :
|
||||||
ml += '</svg></a>\n'
|
mlst.append('</svg></a>\n')
|
||||||
if (counter == numfiles - 1) :
|
if nextid == None:
|
||||||
ml += '<a href="javascript:npage();"><svg id="nextsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"></svg></a>\n'
|
mlst.append('<a href="javascript:npage();"><svg id="nextsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"></svg></a>\n')
|
||||||
else :
|
else :
|
||||||
ml += '<a href="javascript:npage();"><svg id="nextsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"><polygon points="5,5,5,295,95,150" fill="#AAAAAA" /></svg></a>\n'
|
mlst.append('<a href="javascript:npage();"><svg id="nextsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"><polygon points="5,5,5,295,95,150" fill="#AAAAAA" /></svg></a>\n')
|
||||||
ml += '</div>\n'
|
mlst.append('</div>\n')
|
||||||
ml += '<div><a href="javascript:zoomin();">zoom in</a> - <a href="javascript:zoomout();">zoom out</a></div>\n'
|
mlst.append('<div><a href="javascript:zoomin();">zoom in</a> - <a href="javascript:zoomout();">zoom out</a></div>\n')
|
||||||
ml += '</body>\n'
|
mlst.append('</body>\n')
|
||||||
ml += '</html>\n'
|
mlst.append('</html>\n')
|
||||||
return ml
|
return "".join(mlst)
|
||||||
|
|
||||||
|
|||||||
@@ -19,6 +19,8 @@ import getopt
|
|||||||
from struct import pack
|
from struct import pack
|
||||||
from struct import unpack
|
from struct import unpack
|
||||||
|
|
||||||
|
class TpzDRMError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
# local support routines
|
# local support routines
|
||||||
if 'calibre' in sys.modules:
|
if 'calibre' in sys.modules:
|
||||||
@@ -37,6 +39,8 @@ else :
|
|||||||
import flatxml2svg
|
import flatxml2svg
|
||||||
import stylexml2css
|
import stylexml2css
|
||||||
|
|
||||||
|
# global switch
|
||||||
|
buildXML = False
|
||||||
|
|
||||||
# Get a 7 bit encoded number from a file
|
# Get a 7 bit encoded number from a file
|
||||||
def readEncodedNumber(file):
|
def readEncodedNumber(file):
|
||||||
@@ -114,7 +118,8 @@ class Dictionary(object):
|
|||||||
return self.stable[self.pos]
|
return self.stable[self.pos]
|
||||||
else:
|
else:
|
||||||
print "Error - %d outside of string table limits" % val
|
print "Error - %d outside of string table limits" % val
|
||||||
sys.exit(-1)
|
raise TpzDRMError('outside or string table limits')
|
||||||
|
# sys.exit(-1)
|
||||||
def getSize(self):
|
def getSize(self):
|
||||||
return self.size
|
return self.size
|
||||||
def getPos(self):
|
def getPos(self):
|
||||||
@@ -295,6 +300,7 @@ def generateBook(bookDir, raw, fixedimage):
|
|||||||
if not os.path.exists(svgDir) :
|
if not os.path.exists(svgDir) :
|
||||||
os.makedirs(svgDir)
|
os.makedirs(svgDir)
|
||||||
|
|
||||||
|
if buildXML:
|
||||||
xmlDir = os.path.join(bookDir,'xml')
|
xmlDir = os.path.join(bookDir,'xml')
|
||||||
if not os.path.exists(xmlDir) :
|
if not os.path.exists(xmlDir) :
|
||||||
os.makedirs(xmlDir)
|
os.makedirs(xmlDir)
|
||||||
@@ -345,23 +351,38 @@ def generateBook(bookDir, raw, fixedimage):
|
|||||||
authors = authors.replace('>','>')
|
authors = authors.replace('>','>')
|
||||||
meta_array['Authors'] = authors
|
meta_array['Authors'] = authors
|
||||||
|
|
||||||
|
if buildXML:
|
||||||
xname = os.path.join(xmlDir, 'metadata.xml')
|
xname = os.path.join(xmlDir, 'metadata.xml')
|
||||||
metastr = ''
|
mlst = []
|
||||||
for key in meta_array:
|
for key in meta_array:
|
||||||
metastr += '<meta name="' + key + '" content="' + meta_array[key] + '" />\n'
|
mlst.append('<meta name="' + key + '" content="' + meta_array[key] + '" />\n')
|
||||||
|
metastr = "".join(mlst)
|
||||||
|
mlst = None
|
||||||
file(xname, 'wb').write(metastr)
|
file(xname, 'wb').write(metastr)
|
||||||
|
|
||||||
print 'Processing StyleSheet'
|
print 'Processing StyleSheet'
|
||||||
|
|
||||||
# get some scaling info from metadata to use while processing styles
|
# get some scaling info from metadata to use while processing styles
|
||||||
|
# and first page info
|
||||||
|
|
||||||
fontsize = '135'
|
fontsize = '135'
|
||||||
if 'fontSize' in meta_array:
|
if 'fontSize' in meta_array:
|
||||||
fontsize = meta_array['fontSize']
|
fontsize = meta_array['fontSize']
|
||||||
|
|
||||||
# also get the size of a normal text page
|
# also get the size of a normal text page
|
||||||
|
# get the total number of pages unpacked as a safety check
|
||||||
|
filenames = os.listdir(pageDir)
|
||||||
|
numfiles = len(filenames)
|
||||||
|
|
||||||
spage = '1'
|
spage = '1'
|
||||||
if 'firstTextPage' in meta_array:
|
if 'firstTextPage' in meta_array:
|
||||||
spage = meta_array['firstTextPage']
|
spage = meta_array['firstTextPage']
|
||||||
pnum = int(spage)
|
pnum = int(spage)
|
||||||
|
if pnum >= numfiles or pnum < 0:
|
||||||
|
# metadata is wrong so just select a page near the front
|
||||||
|
# 10% of the book to get a normal text page
|
||||||
|
pnum = int(0.10 * numfiles)
|
||||||
|
# print "first normal text page is", spage
|
||||||
|
|
||||||
# get page height and width from first text page for use in stylesheet scaling
|
# get page height and width from first text page for use in stylesheet scaling
|
||||||
pname = 'page%04d.dat' % (pnum + 1)
|
pname = 'page%04d.dat' % (pnum + 1)
|
||||||
@@ -371,12 +392,37 @@ def generateBook(bookDir, raw, fixedimage):
|
|||||||
(ph, pw) = getPageDim(flat_xml)
|
(ph, pw) = getPageDim(flat_xml)
|
||||||
if (ph == '-1') or (ph == '0') : ph = '11000'
|
if (ph == '-1') or (ph == '0') : ph = '11000'
|
||||||
if (pw == '-1') or (pw == '0') : pw = '8500'
|
if (pw == '-1') or (pw == '0') : pw = '8500'
|
||||||
|
meta_array['pageHeight'] = ph
|
||||||
|
meta_array['pageWidth'] = pw
|
||||||
|
if 'fontSize' not in meta_array.keys():
|
||||||
|
meta_array['fontSize'] = fontsize
|
||||||
|
|
||||||
# print ' ', 'other0000.dat'
|
# process other.dat for css info and for map of page files to svg images
|
||||||
|
# this map is needed because some pages actually are made up of multiple
|
||||||
|
# pageXXXX.xml files
|
||||||
xname = os.path.join(bookDir, 'style.css')
|
xname = os.path.join(bookDir, 'style.css')
|
||||||
flat_xml = convert2xml.fromData(dict, otherFile)
|
flat_xml = convert2xml.fromData(dict, otherFile)
|
||||||
|
|
||||||
|
# extract info.original.pid to get original page information
|
||||||
|
pageIDMap = {}
|
||||||
|
pageidnums = stylexml2css.getpageIDMap(flat_xml)
|
||||||
|
if len(pageidnums) == 0:
|
||||||
|
filenames = os.listdir(pageDir)
|
||||||
|
numfiles = len(filenames)
|
||||||
|
for k in range(numfiles):
|
||||||
|
pageidnums.append(k)
|
||||||
|
# create a map from page ids to list of page file nums to process for that page
|
||||||
|
for i in range(len(pageidnums)):
|
||||||
|
id = pageidnums[i]
|
||||||
|
if id in pageIDMap.keys():
|
||||||
|
pageIDMap[id].append(i)
|
||||||
|
else:
|
||||||
|
pageIDMap[id] = [i]
|
||||||
|
|
||||||
|
# now get the css info
|
||||||
cssstr , classlst = stylexml2css.convert2CSS(flat_xml, fontsize, ph, pw)
|
cssstr , classlst = stylexml2css.convert2CSS(flat_xml, fontsize, ph, pw)
|
||||||
file(xname, 'wb').write(cssstr)
|
file(xname, 'wb').write(cssstr)
|
||||||
|
if buildXML:
|
||||||
xname = os.path.join(xmlDir, 'other0000.xml')
|
xname = os.path.join(xmlDir, 'other0000.xml')
|
||||||
file(xname, 'wb').write(convert2xml.getXML(dict, otherFile))
|
file(xname, 'wb').write(convert2xml.getXML(dict, otherFile))
|
||||||
|
|
||||||
@@ -398,6 +444,7 @@ def generateBook(bookDir, raw, fixedimage):
|
|||||||
fname = os.path.join(glyphsDir,filename)
|
fname = os.path.join(glyphsDir,filename)
|
||||||
flat_xml = convert2xml.fromData(dict, fname)
|
flat_xml = convert2xml.fromData(dict, fname)
|
||||||
|
|
||||||
|
if buildXML:
|
||||||
xname = os.path.join(xmlDir, filename.replace('.dat','.xml'))
|
xname = os.path.join(xmlDir, filename.replace('.dat','.xml'))
|
||||||
file(xname, 'wb').write(convert2xml.getXML(dict, fname))
|
file(xname, 'wb').write(convert2xml.getXML(dict, fname))
|
||||||
|
|
||||||
@@ -414,108 +461,188 @@ def generateBook(bookDir, raw, fixedimage):
|
|||||||
glyfile.close()
|
glyfile.close()
|
||||||
print " "
|
print " "
|
||||||
|
|
||||||
|
|
||||||
# start up the html
|
# start up the html
|
||||||
|
# also build up tocentries while processing html
|
||||||
htmlFileName = "book.html"
|
htmlFileName = "book.html"
|
||||||
htmlstr = '<?xml version="1.0" encoding="utf-8"?>\n'
|
hlst = []
|
||||||
htmlstr += '<!DOCTYPE HTML PUBLIC "-//W3C//DTD XHTML 1.1 Strict//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11-strict.dtd">\n'
|
hlst.append('<?xml version="1.0" encoding="utf-8"?>\n')
|
||||||
htmlstr += '<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en">\n'
|
hlst.append('<!DOCTYPE HTML PUBLIC "-//W3C//DTD XHTML 1.1 Strict//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11-strict.dtd">\n')
|
||||||
htmlstr += '<head>\n'
|
hlst.append('<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en">\n')
|
||||||
htmlstr += '<meta http-equiv="content-type" content="text/html; charset=utf-8"/>\n'
|
hlst.append('<head>\n')
|
||||||
htmlstr += '<title>' + meta_array['Title'] + ' by ' + meta_array['Authors'] + '</title>\n'
|
hlst.append('<meta http-equiv="content-type" content="text/html; charset=utf-8"/>\n')
|
||||||
htmlstr += '<meta name="Author" content="' + meta_array['Authors'] + '" />\n'
|
hlst.append('<title>' + meta_array['Title'] + ' by ' + meta_array['Authors'] + '</title>\n')
|
||||||
htmlstr += '<meta name="Title" content="' + meta_array['Title'] + '" />\n'
|
hlst.append('<meta name="Author" content="' + meta_array['Authors'] + '" />\n')
|
||||||
|
hlst.append('<meta name="Title" content="' + meta_array['Title'] + '" />\n')
|
||||||
if 'ASIN' in meta_array:
|
if 'ASIN' in meta_array:
|
||||||
htmlstr += '<meta name="ASIN" content="' + meta_array['ASIN'] + '" />\n'
|
hlst.append('<meta name="ASIN" content="' + meta_array['ASIN'] + '" />\n')
|
||||||
if 'GUID' in meta_array:
|
if 'GUID' in meta_array:
|
||||||
htmlstr += '<meta name="GUID" content="' + meta_array['GUID'] + '" />\n'
|
hlst.append('<meta name="GUID" content="' + meta_array['GUID'] + '" />\n')
|
||||||
htmlstr += '<link href="style.css" rel="stylesheet" type="text/css" />\n'
|
hlst.append('<link href="style.css" rel="stylesheet" type="text/css" />\n')
|
||||||
htmlstr += '</head>\n<body>\n'
|
hlst.append('</head>\n<body>\n')
|
||||||
|
|
||||||
print 'Processing Pages'
|
print 'Processing Pages'
|
||||||
# Books are at 1440 DPI. This is rendering at twice that size for
|
# Books are at 1440 DPI. This is rendering at twice that size for
|
||||||
# readability when rendering to the screen.
|
# readability when rendering to the screen.
|
||||||
scaledpi = 1440.0
|
scaledpi = 1440.0
|
||||||
|
|
||||||
svgindex = '<?xml version="1.0" encoding="utf-8"?>\n'
|
|
||||||
svgindex += '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n'
|
|
||||||
svgindex += '<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" >'
|
|
||||||
svgindex += '<head>\n'
|
|
||||||
svgindex += '<title>' + meta_array['Title'] + '</title>\n'
|
|
||||||
svgindex += '<meta name="Author" content="' + meta_array['Authors'] + '" />\n'
|
|
||||||
svgindex += '<meta name="Title" content="' + meta_array['Title'] + '" />\n'
|
|
||||||
if 'ASIN' in meta_array:
|
|
||||||
svgindex += '<meta name="ASIN" content="' + meta_array['ASIN'] + '" />\n'
|
|
||||||
if 'GUID' in meta_array:
|
|
||||||
svgindex += '<meta name="GUID" content="' + meta_array['GUID'] + '" />\n'
|
|
||||||
svgindex += '</head>\n'
|
|
||||||
svgindex += '<body>\n'
|
|
||||||
|
|
||||||
filenames = os.listdir(pageDir)
|
filenames = os.listdir(pageDir)
|
||||||
filenames = sorted(filenames)
|
filenames = sorted(filenames)
|
||||||
numfiles = len(filenames)
|
numfiles = len(filenames)
|
||||||
counter = 0
|
|
||||||
|
xmllst = []
|
||||||
|
elst = []
|
||||||
|
|
||||||
for filename in filenames:
|
for filename in filenames:
|
||||||
# print ' ', filename
|
# print ' ', filename
|
||||||
print ".",
|
print ".",
|
||||||
|
|
||||||
fname = os.path.join(pageDir,filename)
|
fname = os.path.join(pageDir,filename)
|
||||||
flat_xml = convert2xml.fromData(dict, fname)
|
flat_xml = convert2xml.fromData(dict, fname)
|
||||||
|
|
||||||
|
# keep flat_xml for later svg processing
|
||||||
|
xmllst.append(flat_xml)
|
||||||
|
|
||||||
|
if buildXML:
|
||||||
xname = os.path.join(xmlDir, filename.replace('.dat','.xml'))
|
xname = os.path.join(xmlDir, filename.replace('.dat','.xml'))
|
||||||
file(xname, 'wb').write(convert2xml.getXML(dict, fname))
|
file(xname, 'wb').write(convert2xml.getXML(dict, fname))
|
||||||
|
|
||||||
# first get the html
|
# first get the html
|
||||||
htmlstr += flatxml2html.convert2HTML(flat_xml, classlst, fname, bookDir, gd, fixedimage)
|
pagehtml, tocinfo = flatxml2html.convert2HTML(flat_xml, classlst, fname, bookDir, gd, fixedimage)
|
||||||
|
elst.append(tocinfo)
|
||||||
|
hlst.append(pagehtml)
|
||||||
|
|
||||||
# now get the svg image of the page
|
# finish up the html string and output it
|
||||||
svgxml = flatxml2svg.convert2SVG(gd, flat_xml, counter, numfiles, svgDir, raw, meta_array, scaledpi)
|
hlst.append('</body>\n</html>\n')
|
||||||
|
htmlstr = "".join(hlst)
|
||||||
|
hlst = None
|
||||||
|
file(os.path.join(bookDir, htmlFileName), 'wb').write(htmlstr)
|
||||||
|
|
||||||
|
print " "
|
||||||
|
print 'Extracting Table of Contents from Amazon OCR'
|
||||||
|
|
||||||
|
# first create a table of contents file for the svg images
|
||||||
|
tlst = []
|
||||||
|
tlst.append('<?xml version="1.0" encoding="utf-8"?>\n')
|
||||||
|
tlst.append('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n')
|
||||||
|
tlst.append('<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" >')
|
||||||
|
tlst.append('<head>\n')
|
||||||
|
tlst.append('<title>' + meta_array['Title'] + '</title>\n')
|
||||||
|
tlst.append('<meta name="Author" content="' + meta_array['Authors'] + '" />\n')
|
||||||
|
tlst.append('<meta name="Title" content="' + meta_array['Title'] + '" />\n')
|
||||||
|
if 'ASIN' in meta_array:
|
||||||
|
tlst.append('<meta name="ASIN" content="' + meta_array['ASIN'] + '" />\n')
|
||||||
|
if 'GUID' in meta_array:
|
||||||
|
tlst.append('<meta name="GUID" content="' + meta_array['GUID'] + '" />\n')
|
||||||
|
tlst.append('</head>\n')
|
||||||
|
tlst.append('<body>\n')
|
||||||
|
|
||||||
|
tlst.append('<h2>Table of Contents</h2>\n')
|
||||||
|
start = pageidnums[0]
|
||||||
if (raw):
|
if (raw):
|
||||||
pfile = open(os.path.join(svgDir,filename.replace('.dat','.svg')), 'w')
|
startname = 'page%04d.svg' % start
|
||||||
svgindex += '<a href="svg/page%04d.svg">Page %d</a>\n' % (counter, counter)
|
|
||||||
else:
|
else:
|
||||||
pfile = open(os.path.join(svgDir,'page%04d.xhtml' % counter), 'w')
|
startname = 'page%04d.xhtml' % start
|
||||||
svgindex += '<a href="svg/page%04d.xhtml">Page %d</a>\n' % (counter, counter)
|
|
||||||
|
tlst.append('<h3><a href="' + startname + '">Start of Book</a></h3>\n')
|
||||||
|
# build up a table of contents for the svg xhtml output
|
||||||
|
tocentries = "".join(elst)
|
||||||
|
elst = None
|
||||||
|
toclst = tocentries.split('\n')
|
||||||
|
toclst.pop()
|
||||||
|
for entry in toclst:
|
||||||
|
print entry
|
||||||
|
title, pagenum = entry.split('|')
|
||||||
|
id = pageidnums[int(pagenum)]
|
||||||
|
if (raw):
|
||||||
|
fname = 'page%04d.svg' % id
|
||||||
|
else:
|
||||||
|
fname = 'page%04d.xhtml' % id
|
||||||
|
tlst.append('<h3><a href="'+ fname + '">' + title + '</a></h3>\n')
|
||||||
|
tlst.append('</body>\n')
|
||||||
|
tlst.append('</html>\n')
|
||||||
|
tochtml = "".join(tlst)
|
||||||
|
file(os.path.join(svgDir, 'toc.xhtml'), 'wb').write(tochtml)
|
||||||
|
|
||||||
|
|
||||||
|
# now create index_svg.xhtml that points to all required files
|
||||||
|
slst = []
|
||||||
|
slst.append('<?xml version="1.0" encoding="utf-8"?>\n')
|
||||||
|
slst.append('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n')
|
||||||
|
slst.append('<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" >')
|
||||||
|
slst.append('<head>\n')
|
||||||
|
slst.append('<title>' + meta_array['Title'] + '</title>\n')
|
||||||
|
slst.append('<meta name="Author" content="' + meta_array['Authors'] + '" />\n')
|
||||||
|
slst.append('<meta name="Title" content="' + meta_array['Title'] + '" />\n')
|
||||||
|
if 'ASIN' in meta_array:
|
||||||
|
slst.append('<meta name="ASIN" content="' + meta_array['ASIN'] + '" />\n')
|
||||||
|
if 'GUID' in meta_array:
|
||||||
|
slst.append('<meta name="GUID" content="' + meta_array['GUID'] + '" />\n')
|
||||||
|
slst.append('</head>\n')
|
||||||
|
slst.append('<body>\n')
|
||||||
|
|
||||||
|
print "Building svg images of each book page"
|
||||||
|
slst.append('<h2>List of Pages</h2>\n')
|
||||||
|
slst.append('<div>\n')
|
||||||
|
idlst = sorted(pageIDMap.keys())
|
||||||
|
numids = len(idlst)
|
||||||
|
cnt = len(idlst)
|
||||||
|
previd = None
|
||||||
|
for j in range(cnt):
|
||||||
|
pageid = idlst[j]
|
||||||
|
if j < cnt - 1:
|
||||||
|
nextid = idlst[j+1]
|
||||||
|
else:
|
||||||
|
nextid = None
|
||||||
|
print '.',
|
||||||
|
pagelst = pageIDMap[pageid]
|
||||||
|
flst = []
|
||||||
|
for page in pagelst:
|
||||||
|
flst.append(xmllst[page])
|
||||||
|
flat_svg = "".join(flst)
|
||||||
|
flst=None
|
||||||
|
svgxml = flatxml2svg.convert2SVG(gd, flat_svg, pageid, previd, nextid, svgDir, raw, meta_array, scaledpi)
|
||||||
|
if (raw) :
|
||||||
|
pfile = open(os.path.join(svgDir,'page%04d.svg' % pageid),'w')
|
||||||
|
slst.append('<a href="svg/page%04d.svg">Page %d</a>\n' % (pageid, pageid))
|
||||||
|
else :
|
||||||
|
pfile = open(os.path.join(svgDir,'page%04d.xhtml' % pageid), 'w')
|
||||||
|
slst.append('<a href="svg/page%04d.xhtml">Page %d</a>\n' % (pageid, pageid))
|
||||||
|
previd = pageid
|
||||||
pfile.write(svgxml)
|
pfile.write(svgxml)
|
||||||
pfile.close()
|
pfile.close()
|
||||||
|
|
||||||
counter += 1
|
counter += 1
|
||||||
|
slst.append('</div>\n')
|
||||||
|
slst.append('<h2><a href="svg/toc.xhtml">Table of Contents</a></h2>\n')
|
||||||
|
slst.append('</body>\n</html>\n')
|
||||||
|
svgindex = "".join(slst)
|
||||||
|
slst = None
|
||||||
|
file(os.path.join(bookDir, 'index_svg.xhtml'), 'wb').write(svgindex)
|
||||||
|
|
||||||
print " "
|
print " "
|
||||||
|
|
||||||
# finish up the html string and output it
|
|
||||||
htmlstr += '</body>\n</html>\n'
|
|
||||||
file(os.path.join(bookDir, htmlFileName), 'wb').write(htmlstr)
|
|
||||||
|
|
||||||
# finish up the svg index string and output it
|
|
||||||
svgindex += '</body>\n</html>\n'
|
|
||||||
file(os.path.join(bookDir, 'index_svg.xhtml'), 'wb').write(svgindex)
|
|
||||||
|
|
||||||
# build the opf file
|
# build the opf file
|
||||||
opfname = os.path.join(bookDir, 'book.opf')
|
opfname = os.path.join(bookDir, 'book.opf')
|
||||||
opfstr = '<?xml version="1.0" encoding="utf-8"?>\n'
|
olst = []
|
||||||
opfstr += '<package xmlns="http://www.idpf.org/2007/opf" unique-identifier="guid_id">\n'
|
olst.append('<?xml version="1.0" encoding="utf-8"?>\n')
|
||||||
|
olst.append('<package xmlns="http://www.idpf.org/2007/opf" unique-identifier="guid_id">\n')
|
||||||
# adding metadata
|
# adding metadata
|
||||||
opfstr += ' <metadata xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:opf="http://www.idpf.org/2007/opf">\n'
|
olst.append(' <metadata xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:opf="http://www.idpf.org/2007/opf">\n')
|
||||||
if 'GUID' in meta_array:
|
if 'GUID' in meta_array:
|
||||||
opfstr += ' <dc:identifier opf:scheme="GUID" id="guid_id">' + meta_array['GUID'] + '</dc:identifier>\n'
|
olst.append(' <dc:identifier opf:scheme="GUID" id="guid_id">' + meta_array['GUID'] + '</dc:identifier>\n')
|
||||||
if 'ASIN' in meta_array:
|
if 'ASIN' in meta_array:
|
||||||
opfstr += ' <dc:identifier opf:scheme="ASIN">' + meta_array['ASIN'] + '</dc:identifier>\n'
|
olst.append(' <dc:identifier opf:scheme="ASIN">' + meta_array['ASIN'] + '</dc:identifier>\n')
|
||||||
if 'oASIN' in meta_array:
|
if 'oASIN' in meta_array:
|
||||||
opfstr += ' <dc:identifier opf:scheme="oASIN">' + meta_array['oASIN'] + '</dc:identifier>\n'
|
olst.append(' <dc:identifier opf:scheme="oASIN">' + meta_array['oASIN'] + '</dc:identifier>\n')
|
||||||
opfstr += ' <dc:title>' + meta_array['Title'] + '</dc:title>\n'
|
olst.append(' <dc:title>' + meta_array['Title'] + '</dc:title>\n')
|
||||||
opfstr += ' <dc:creator opf:role="aut">' + meta_array['Authors'] + '</dc:creator>\n'
|
olst.append(' <dc:creator opf:role="aut">' + meta_array['Authors'] + '</dc:creator>\n')
|
||||||
opfstr += ' <dc:language>en</dc:language>\n'
|
olst.append(' <dc:language>en</dc:language>\n')
|
||||||
opfstr += ' <dc:date>' + meta_array['UpdateTime'] + '</dc:date>\n'
|
olst.append(' <dc:date>' + meta_array['UpdateTime'] + '</dc:date>\n')
|
||||||
if isCover:
|
if isCover:
|
||||||
opfstr += ' <meta name="cover" content="bookcover"/>\n'
|
olst.append(' <meta name="cover" content="bookcover"/>\n')
|
||||||
opfstr += ' </metadata>\n'
|
olst.append(' </metadata>\n')
|
||||||
opfstr += '<manifest>\n'
|
olst.append('<manifest>\n')
|
||||||
opfstr += ' <item id="book" href="book.html" media-type="application/xhtml+xml"/>\n'
|
olst.append(' <item id="book" href="book.html" media-type="application/xhtml+xml"/>\n')
|
||||||
opfstr += ' <item id="stylesheet" href="style.css" media-type="text/css"/>\n'
|
olst.append(' <item id="stylesheet" href="style.css" media-type="text/css"/>\n')
|
||||||
# adding image files to manifest
|
# adding image files to manifest
|
||||||
filenames = os.listdir(imgDir)
|
filenames = os.listdir(imgDir)
|
||||||
filenames = sorted(filenames)
|
filenames = sorted(filenames)
|
||||||
@@ -525,17 +652,19 @@ def generateBook(bookDir, raw, fixedimage):
|
|||||||
imgext = 'jpeg'
|
imgext = 'jpeg'
|
||||||
if imgext == '.svg':
|
if imgext == '.svg':
|
||||||
imgext = 'svg+xml'
|
imgext = 'svg+xml'
|
||||||
opfstr += ' <item id="' + imgname + '" href="img/' + filename + '" media-type="image/' + imgext + '"/>\n'
|
olst.append(' <item id="' + imgname + '" href="img/' + filename + '" media-type="image/' + imgext + '"/>\n')
|
||||||
if isCover:
|
if isCover:
|
||||||
opfstr += ' <item id="bookcover" href="cover.jpg" media-type="image/jpeg" />\n'
|
olst.append(' <item id="bookcover" href="cover.jpg" media-type="image/jpeg" />\n')
|
||||||
opfstr += '</manifest>\n'
|
olst.append('</manifest>\n')
|
||||||
# adding spine
|
# adding spine
|
||||||
opfstr += '<spine>\n <itemref idref="book" />\n</spine>\n'
|
olst.append('<spine>\n <itemref idref="book" />\n</spine>\n')
|
||||||
if isCover:
|
if isCover:
|
||||||
opfstr += ' <guide>\n'
|
olst.append(' <guide>\n')
|
||||||
opfstr += ' <reference href="cover.jpg" type="cover" title="Cover"/>\n'
|
olst.append(' <reference href="cover.jpg" type="cover" title="Cover"/>\n')
|
||||||
opfstr += ' </guide>\n'
|
olst.append(' </guide>\n')
|
||||||
opfstr += '</package>\n'
|
olst.append('</package>\n')
|
||||||
|
opfstr = "".join(olst)
|
||||||
|
olst = None
|
||||||
file(opfname, 'wb').write(opfstr)
|
file(opfname, 'wb').write(opfstr)
|
||||||
|
|
||||||
print 'Processing Complete'
|
print 'Processing Complete'
|
||||||
@@ -556,7 +685,6 @@ def usage():
|
|||||||
|
|
||||||
def main(argv):
|
def main(argv):
|
||||||
bookDir = ''
|
bookDir = ''
|
||||||
|
|
||||||
if len(argv) == 0:
|
if len(argv) == 0:
|
||||||
argv = sys.argv
|
argv = sys.argv
|
||||||
|
|
||||||
@@ -573,7 +701,7 @@ def main(argv):
|
|||||||
return 1
|
return 1
|
||||||
|
|
||||||
raw = 0
|
raw = 0
|
||||||
fixedimage = False
|
fixedimage = True
|
||||||
for o, a in opts:
|
for o, a in opts:
|
||||||
if o =="-h":
|
if o =="-h":
|
||||||
usage()
|
usage()
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ from __future__ import with_statement
|
|||||||
# and many many others
|
# and many many others
|
||||||
|
|
||||||
|
|
||||||
__version__ = '3.1'
|
__version__ = '4.2'
|
||||||
|
|
||||||
class Unbuffered:
|
class Unbuffered:
|
||||||
def __init__(self, stream):
|
def __init__(self, stream):
|
||||||
@@ -32,6 +32,9 @@ import sys
|
|||||||
import os, csv, getopt
|
import os, csv, getopt
|
||||||
import string
|
import string
|
||||||
import re
|
import re
|
||||||
|
import traceback
|
||||||
|
|
||||||
|
buildXML = False
|
||||||
|
|
||||||
class DrmException(Exception):
|
class DrmException(Exception):
|
||||||
pass
|
pass
|
||||||
@@ -74,9 +77,11 @@ def cleanup_name(name):
|
|||||||
return one
|
return one
|
||||||
|
|
||||||
def decryptBook(infile, outdir, k4, kInfoFiles, serials, pids):
|
def decryptBook(infile, outdir, k4, kInfoFiles, serials, pids):
|
||||||
|
global buildXML
|
||||||
|
|
||||||
# handle the obvious cases at the beginning
|
# handle the obvious cases at the beginning
|
||||||
if not os.path.isfile(infile):
|
if not os.path.isfile(infile):
|
||||||
print "Error: Input file does not exist"
|
print >>sys.stderr, ('K4MobiDeDrm v%(__version__)s\n' % globals()) + "Error: Input file does not exist"
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
mobi = True
|
mobi = True
|
||||||
@@ -95,8 +100,14 @@ def decryptBook(infile, outdir, k4, kInfoFiles, serials, pids):
|
|||||||
print "Processing Book: ", title
|
print "Processing Book: ", title
|
||||||
filenametitle = cleanup_name(title)
|
filenametitle = cleanup_name(title)
|
||||||
outfilename = bookname
|
outfilename = bookname
|
||||||
if len(bookname)>4 and len(filenametitle)>4 and bookname[:4] != filenametitle[:4]:
|
if len(outfilename)<=8 or len(filenametitle)<=8:
|
||||||
outfilename = outfilename + "_" + filenametitle
|
outfilename = outfilename + "_" + filenametitle
|
||||||
|
elif outfilename[:8] != filenametitle[:8]:
|
||||||
|
outfilename = outfilename[:8] + "_" + filenametitle
|
||||||
|
|
||||||
|
# avoid excessively long file names
|
||||||
|
if len(outfilename)>150:
|
||||||
|
outfilename = outfilename[:150]
|
||||||
|
|
||||||
# build pid list
|
# build pid list
|
||||||
md1, md2 = mb.getPIDMetaInfo()
|
md1, md2 = mb.getPIDMetaInfo()
|
||||||
@@ -106,16 +117,21 @@ def decryptBook(infile, outdir, k4, kInfoFiles, serials, pids):
|
|||||||
mb.processBook(pidlst)
|
mb.processBook(pidlst)
|
||||||
|
|
||||||
except mobidedrm.DrmException, e:
|
except mobidedrm.DrmException, e:
|
||||||
print "Error: " + str(e) + "\nDRM Removal Failed.\n"
|
print >>sys.stderr, ('K4MobiDeDrm v%(__version__)s\n' % globals()) + "Error: " + str(e) + "\nDRM Removal Failed.\n"
|
||||||
return 1
|
return 1
|
||||||
except topazextract.TpzDRMError, e:
|
except topazextract.TpzDRMError, e:
|
||||||
print "Error: " + str(e) + "\nDRM Removal Failed.\n"
|
print >>sys.stderr, ('K4MobiDeDrm v%(__version__)s\n' % globals()) + "Error: " + str(e) + "\nDRM Removal Failed.\n"
|
||||||
return 1
|
return 1
|
||||||
except Exception, e:
|
except Exception, e:
|
||||||
print "Error: " + str(e) + "\nDRM Removal Failed.\n"
|
print >>sys.stderr, ('K4MobiDeDrm v%(__version__)s\n' % globals()) + "Error: " + str(e) + "\nDRM Removal Failed.\n"
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
if mobi:
|
if mobi:
|
||||||
|
if mb.getPrintReplica():
|
||||||
|
outfile = os.path.join(outdir, outfilename + '_nodrm' + '.azw4')
|
||||||
|
elif mb.getMobiVersion() >= 8:
|
||||||
|
outfile = os.path.join(outdir, outfilename + '_nodrm' + '.azw3')
|
||||||
|
else:
|
||||||
outfile = os.path.join(outdir, outfilename + '_nodrm' + '.mobi')
|
outfile = os.path.join(outdir, outfilename + '_nodrm' + '.mobi')
|
||||||
mb.getMobiFile(outfile)
|
mb.getMobiFile(outfile)
|
||||||
return 0
|
return 0
|
||||||
@@ -125,10 +141,11 @@ def decryptBook(infile, outdir, k4, kInfoFiles, serials, pids):
|
|||||||
zipname = os.path.join(outdir, outfilename + '_nodrm' + '.htmlz')
|
zipname = os.path.join(outdir, outfilename + '_nodrm' + '.htmlz')
|
||||||
mb.getHTMLZip(zipname)
|
mb.getHTMLZip(zipname)
|
||||||
|
|
||||||
print " Creating SVG HTMLZ Archive"
|
print " Creating SVG ZIP Archive"
|
||||||
zipname = os.path.join(outdir, outfilename + '_SVG' + '.htmlz')
|
zipname = os.path.join(outdir, outfilename + '_SVG' + '.zip')
|
||||||
mb.getSVGZip(zipname)
|
mb.getSVGZip(zipname)
|
||||||
|
|
||||||
|
if buildXML:
|
||||||
print " Creating XML ZIP Archive"
|
print " Creating XML ZIP Archive"
|
||||||
zipname = os.path.join(outdir, outfilename + '_XML' + '.zip')
|
zipname = os.path.join(outdir, outfilename + '_XML' + '.zip')
|
||||||
mb.getXMLZip(zipname)
|
mb.getXMLZip(zipname)
|
||||||
@@ -158,7 +175,6 @@ def main(argv=sys.argv):
|
|||||||
print ('K4MobiDeDrm v%(__version__)s '
|
print ('K4MobiDeDrm v%(__version__)s '
|
||||||
'provided by the work of many including DiapDealer, SomeUpdates, IHeartCabbages, CMBDTC, Skindle, DarkReverser, ApprenticeAlf, etc .' % globals())
|
'provided by the work of many including DiapDealer, SomeUpdates, IHeartCabbages, CMBDTC, Skindle, DarkReverser, ApprenticeAlf, etc .' % globals())
|
||||||
|
|
||||||
print ' '
|
|
||||||
try:
|
try:
|
||||||
opts, args = getopt.getopt(sys.argv[1:], "k:p:s:")
|
opts, args = getopt.getopt(sys.argv[1:], "k:p:s:")
|
||||||
except getopt.GetoptError, err:
|
except getopt.GetoptError, err:
|
||||||
@@ -196,4 +212,3 @@ def main(argv=sys.argv):
|
|||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
sys.stdout=Unbuffered(sys.stdout)
|
sys.stdout=Unbuffered(sys.stdout)
|
||||||
sys.exit(main())
|
sys.exit(main())
|
||||||
|
|
||||||
|
|||||||
@@ -1,11 +1,13 @@
|
|||||||
# standlone set of Mac OSX specific routines needed for K4DeDRM
|
# standlone set of Mac OSX specific routines needed for KindleBooks
|
||||||
|
|
||||||
from __future__ import with_statement
|
from __future__ import with_statement
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
import os
|
import os
|
||||||
|
import os.path
|
||||||
|
import re
|
||||||
|
import copy
|
||||||
import subprocess
|
import subprocess
|
||||||
|
|
||||||
from struct import pack, unpack, unpack_from
|
from struct import pack, unpack, unpack_from
|
||||||
|
|
||||||
class DrmException(Exception):
|
class DrmException(Exception):
|
||||||
@@ -23,6 +25,25 @@ def _load_crypto_libcrypto():
|
|||||||
raise DrmException('libcrypto not found')
|
raise DrmException('libcrypto not found')
|
||||||
libcrypto = CDLL(libcrypto)
|
libcrypto = CDLL(libcrypto)
|
||||||
|
|
||||||
|
# From OpenSSL's crypto aes header
|
||||||
|
#
|
||||||
|
# AES_ENCRYPT 1
|
||||||
|
# AES_DECRYPT 0
|
||||||
|
# AES_MAXNR 14 (in bytes)
|
||||||
|
# AES_BLOCK_SIZE 16 (in bytes)
|
||||||
|
#
|
||||||
|
# struct aes_key_st {
|
||||||
|
# unsigned long rd_key[4 *(AES_MAXNR + 1)];
|
||||||
|
# int rounds;
|
||||||
|
# };
|
||||||
|
# typedef struct aes_key_st AES_KEY;
|
||||||
|
#
|
||||||
|
# int AES_set_decrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key);
|
||||||
|
#
|
||||||
|
# note: the ivec string, and output buffer are both mutable
|
||||||
|
# void AES_cbc_encrypt(const unsigned char *in, unsigned char *out,
|
||||||
|
# const unsigned long length, const AES_KEY *key, unsigned char *ivec, const int enc);
|
||||||
|
|
||||||
AES_MAXNR = 14
|
AES_MAXNR = 14
|
||||||
c_char_pp = POINTER(c_char_p)
|
c_char_pp = POINTER(c_char_p)
|
||||||
c_int_p = POINTER(c_int)
|
c_int_p = POINTER(c_int)
|
||||||
@@ -41,6 +62,12 @@ def _load_crypto_libcrypto():
|
|||||||
|
|
||||||
AES_set_decrypt_key = F(c_int, 'AES_set_decrypt_key',[c_char_p, c_int, AES_KEY_p])
|
AES_set_decrypt_key = F(c_int, 'AES_set_decrypt_key',[c_char_p, c_int, AES_KEY_p])
|
||||||
|
|
||||||
|
# From OpenSSL's Crypto evp/p5_crpt2.c
|
||||||
|
#
|
||||||
|
# int PKCS5_PBKDF2_HMAC_SHA1(const char *pass, int passlen,
|
||||||
|
# const unsigned char *salt, int saltlen, int iter,
|
||||||
|
# int keylen, unsigned char *out);
|
||||||
|
|
||||||
PKCS5_PBKDF2_HMAC_SHA1 = F(c_int, 'PKCS5_PBKDF2_HMAC_SHA1',
|
PKCS5_PBKDF2_HMAC_SHA1 = F(c_int, 'PKCS5_PBKDF2_HMAC_SHA1',
|
||||||
[c_char_p, c_ulong, c_char_p, c_ulong, c_ulong, c_ulong, c_char_p])
|
[c_char_p, c_ulong, c_char_p, c_ulong, c_ulong, c_ulong, c_char_p])
|
||||||
|
|
||||||
@@ -48,7 +75,7 @@ def _load_crypto_libcrypto():
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
self._blocksize = 0
|
self._blocksize = 0
|
||||||
self._keyctx = None
|
self._keyctx = None
|
||||||
self.iv = 0
|
self._iv = 0
|
||||||
|
|
||||||
def set_decrypt_key(self, userkey, iv):
|
def set_decrypt_key(self, userkey, iv):
|
||||||
self._blocksize = len(userkey)
|
self._blocksize = len(userkey)
|
||||||
@@ -56,23 +83,24 @@ def _load_crypto_libcrypto():
|
|||||||
raise DrmException('AES improper key used')
|
raise DrmException('AES improper key used')
|
||||||
return
|
return
|
||||||
keyctx = self._keyctx = AES_KEY()
|
keyctx = self._keyctx = AES_KEY()
|
||||||
self.iv = iv
|
self._iv = iv
|
||||||
|
self._userkey = userkey
|
||||||
rv = AES_set_decrypt_key(userkey, len(userkey) * 8, keyctx)
|
rv = AES_set_decrypt_key(userkey, len(userkey) * 8, keyctx)
|
||||||
if rv < 0:
|
if rv < 0:
|
||||||
raise DrmException('Failed to initialize AES key')
|
raise DrmException('Failed to initialize AES key')
|
||||||
|
|
||||||
def decrypt(self, data):
|
def decrypt(self, data):
|
||||||
out = create_string_buffer(len(data))
|
out = create_string_buffer(len(data))
|
||||||
rv = AES_cbc_encrypt(data, out, len(data), self._keyctx, self.iv, 0)
|
mutable_iv = create_string_buffer(self._iv, len(self._iv))
|
||||||
|
keyctx = self._keyctx
|
||||||
|
rv = AES_cbc_encrypt(data, out, len(data), keyctx, mutable_iv, 0)
|
||||||
if rv == 0:
|
if rv == 0:
|
||||||
raise DrmException('AES decryption failed')
|
raise DrmException('AES decryption failed')
|
||||||
return out.raw
|
return out.raw
|
||||||
|
|
||||||
def keyivgen(self, passwd, salt):
|
def keyivgen(self, passwd, salt, iter, keylen):
|
||||||
saltlen = len(salt)
|
saltlen = len(salt)
|
||||||
passlen = len(passwd)
|
passlen = len(passwd)
|
||||||
iter = 0x3e8
|
|
||||||
keylen = 80
|
|
||||||
out = create_string_buffer(keylen)
|
out = create_string_buffer(keylen)
|
||||||
rv = PKCS5_PBKDF2_HMAC_SHA1(passwd, passlen, salt, saltlen, iter, keylen, out)
|
rv = PKCS5_PBKDF2_HMAC_SHA1(passwd, passlen, salt, saltlen, iter, keylen, out)
|
||||||
return out.raw
|
return out.raw
|
||||||
@@ -114,8 +142,13 @@ def SHA256(message):
|
|||||||
charMap1 = "n5Pr6St7Uv8Wx9YzAb0Cd1Ef2Gh3Jk4M"
|
charMap1 = "n5Pr6St7Uv8Wx9YzAb0Cd1Ef2Gh3Jk4M"
|
||||||
charMap2 = "ZB0bYyc1xDdW2wEV3Ff7KkPpL8UuGA4gz-Tme9Nn_tHh5SvXCsIiR6rJjQaqlOoM"
|
charMap2 = "ZB0bYyc1xDdW2wEV3Ff7KkPpL8UuGA4gz-Tme9Nn_tHh5SvXCsIiR6rJjQaqlOoM"
|
||||||
|
|
||||||
# For Future Reference from .kinf approach of K4PC
|
# For kinf approach of K4Mac 1.6.X or later
|
||||||
charMap5 = "AzB0bYyCeVvaZ3FfUuG4g-TtHh5SsIiR6rJjQq7KkPpL8lOoMm9Nn_c1XxDdW2wE"
|
# On K4PC charMap5 = "AzB0bYyCeVvaZ3FfUuG4g-TtHh5SsIiR6rJjQq7KkPpL8lOoMm9Nn_c1XxDdW2wE"
|
||||||
|
# For Mac they seem to re-use charMap2 here
|
||||||
|
charMap5 = charMap2
|
||||||
|
|
||||||
|
# new in K4M 1.9.X
|
||||||
|
testMap8 = "YvaZ3FfUm9Nn_c1XuG4yCAzB0beVg-TtHh5SsIiR6rJjQdW2wEq7KkPpL8lOoMxD"
|
||||||
|
|
||||||
|
|
||||||
def encode(data, map):
|
def encode(data, map):
|
||||||
@@ -144,7 +177,7 @@ def decode(data,map):
|
|||||||
result += pack("B",value)
|
result += pack("B",value)
|
||||||
return result
|
return result
|
||||||
|
|
||||||
# For Future Reference from .kinf approach of K4PC
|
# For K4M 1.6.X and later
|
||||||
# generate table of prime number less than or equal to int n
|
# generate table of prime number less than or equal to int n
|
||||||
def primes(n):
|
def primes(n):
|
||||||
if n==2: return [2]
|
if n==2: return [2]
|
||||||
@@ -166,7 +199,6 @@ def primes(n):
|
|||||||
return [2]+[x for x in s if x]
|
return [2]+[x for x in s if x]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# uses a sub process to get the Hard Drive Serial Number using ioreg
|
# uses a sub process to get the Hard Drive Serial Number using ioreg
|
||||||
# returns with the serial number of drive whose BSD Name is "disk0"
|
# returns with the serial number of drive whose BSD Name is "disk0"
|
||||||
def GetVolumeSerialNumber():
|
def GetVolumeSerialNumber():
|
||||||
@@ -196,20 +228,234 @@ def GetVolumeSerialNumber():
|
|||||||
foundIt = True
|
foundIt = True
|
||||||
break
|
break
|
||||||
if not foundIt:
|
if not foundIt:
|
||||||
sernum = '9999999999'
|
sernum = ''
|
||||||
return sernum
|
return sernum
|
||||||
|
|
||||||
|
def GetUserHomeAppSupKindleDirParitionName():
|
||||||
|
home = os.getenv('HOME')
|
||||||
|
dpath = home + '/Library/Application Support/Kindle'
|
||||||
|
cmdline = '/sbin/mount'
|
||||||
|
cmdline = cmdline.encode(sys.getfilesystemencoding())
|
||||||
|
p = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
|
||||||
|
out1, out2 = p.communicate()
|
||||||
|
reslst = out1.split('\n')
|
||||||
|
cnt = len(reslst)
|
||||||
|
disk = ''
|
||||||
|
foundIt = False
|
||||||
|
for j in xrange(cnt):
|
||||||
|
resline = reslst[j]
|
||||||
|
if resline.startswith('/dev'):
|
||||||
|
(devpart, mpath) = resline.split(' on ')
|
||||||
|
dpart = devpart[5:]
|
||||||
|
pp = mpath.find('(')
|
||||||
|
if pp >= 0:
|
||||||
|
mpath = mpath[:pp-1]
|
||||||
|
if dpath.startswith(mpath):
|
||||||
|
disk = dpart
|
||||||
|
return disk
|
||||||
|
|
||||||
|
# uses a sub process to get the UUID of the specified disk partition using ioreg
|
||||||
|
def GetDiskPartitionUUID(diskpart):
|
||||||
|
uuidnum = os.getenv('MYUUIDNUMBER')
|
||||||
|
if uuidnum != None:
|
||||||
|
return uuidnum
|
||||||
|
cmdline = '/usr/sbin/ioreg -l -S -w 0 -r -c AppleAHCIDiskDriver'
|
||||||
|
cmdline = cmdline.encode(sys.getfilesystemencoding())
|
||||||
|
p = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
|
||||||
|
out1, out2 = p.communicate()
|
||||||
|
reslst = out1.split('\n')
|
||||||
|
cnt = len(reslst)
|
||||||
|
bsdname = None
|
||||||
|
uuidnum = None
|
||||||
|
foundIt = False
|
||||||
|
nest = 0
|
||||||
|
uuidnest = -1
|
||||||
|
partnest = -2
|
||||||
|
for j in xrange(cnt):
|
||||||
|
resline = reslst[j]
|
||||||
|
if resline.find('{') >= 0:
|
||||||
|
nest += 1
|
||||||
|
if resline.find('}') >= 0:
|
||||||
|
nest -= 1
|
||||||
|
pp = resline.find('"UUID" = "')
|
||||||
|
if pp >= 0:
|
||||||
|
uuidnum = resline[pp+10:-1]
|
||||||
|
uuidnum = uuidnum.strip()
|
||||||
|
uuidnest = nest
|
||||||
|
if partnest == uuidnest and uuidnest > 0:
|
||||||
|
foundIt = True
|
||||||
|
break
|
||||||
|
bb = resline.find('"BSD Name" = "')
|
||||||
|
if bb >= 0:
|
||||||
|
bsdname = resline[bb+14:-1]
|
||||||
|
bsdname = bsdname.strip()
|
||||||
|
if (bsdname == diskpart):
|
||||||
|
partnest = nest
|
||||||
|
else :
|
||||||
|
partnest = -2
|
||||||
|
if partnest == uuidnest and partnest > 0:
|
||||||
|
foundIt = True
|
||||||
|
break
|
||||||
|
if nest == 0:
|
||||||
|
partnest = -2
|
||||||
|
uuidnest = -1
|
||||||
|
uuidnum = None
|
||||||
|
bsdname = None
|
||||||
|
if not foundIt:
|
||||||
|
uuidnum = ''
|
||||||
|
return uuidnum
|
||||||
|
|
||||||
|
def GetMACAddressMunged():
|
||||||
|
macnum = os.getenv('MYMACNUM')
|
||||||
|
if macnum != None:
|
||||||
|
return macnum
|
||||||
|
cmdline = '/sbin/ifconfig en0'
|
||||||
|
cmdline = cmdline.encode(sys.getfilesystemencoding())
|
||||||
|
p = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
|
||||||
|
out1, out2 = p.communicate()
|
||||||
|
reslst = out1.split('\n')
|
||||||
|
cnt = len(reslst)
|
||||||
|
macnum = None
|
||||||
|
foundIt = False
|
||||||
|
for j in xrange(cnt):
|
||||||
|
resline = reslst[j]
|
||||||
|
pp = resline.find('ether ')
|
||||||
|
if pp >= 0:
|
||||||
|
macnum = resline[pp+6:-1]
|
||||||
|
macnum = macnum.strip()
|
||||||
|
# print "original mac", macnum
|
||||||
|
# now munge it up the way Kindle app does
|
||||||
|
# by xoring it with 0xa5 and swapping elements 3 and 4
|
||||||
|
maclst = macnum.split(':')
|
||||||
|
n = len(maclst)
|
||||||
|
if n != 6:
|
||||||
|
fountIt = False
|
||||||
|
break
|
||||||
|
for i in range(6):
|
||||||
|
maclst[i] = int('0x' + maclst[i], 0)
|
||||||
|
mlst = [0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
|
||||||
|
mlst[5] = maclst[5] ^ 0xa5
|
||||||
|
mlst[4] = maclst[3] ^ 0xa5
|
||||||
|
mlst[3] = maclst[4] ^ 0xa5
|
||||||
|
mlst[2] = maclst[2] ^ 0xa5
|
||||||
|
mlst[1] = maclst[1] ^ 0xa5
|
||||||
|
mlst[0] = maclst[0] ^ 0xa5
|
||||||
|
macnum = "%0.2x%0.2x%0.2x%0.2x%0.2x%0.2x" % (mlst[0], mlst[1], mlst[2], mlst[3], mlst[4], mlst[5])
|
||||||
|
foundIt = True
|
||||||
|
break
|
||||||
|
if not foundIt:
|
||||||
|
macnum = ''
|
||||||
|
return macnum
|
||||||
|
|
||||||
|
|
||||||
# uses unix env to get username instead of using sysctlbyname
|
# uses unix env to get username instead of using sysctlbyname
|
||||||
def GetUserName():
|
def GetUserName():
|
||||||
username = os.getenv('USER')
|
username = os.getenv('USER')
|
||||||
return username
|
return username
|
||||||
|
|
||||||
|
def isNewInstall():
|
||||||
|
home = os.getenv('HOME')
|
||||||
|
# soccer game fan anyone
|
||||||
|
dpath = home + '/Library/Application Support/Kindle/storage/.pes2011'
|
||||||
|
# print dpath, os.path.exists(dpath)
|
||||||
|
if os.path.exists(dpath):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def GetIDString():
|
||||||
|
# K4Mac now has an extensive set of ids strings it uses
|
||||||
|
# in encoding pids and in creating unique passwords
|
||||||
|
# for use in its own version of CryptUnprotectDataV2
|
||||||
|
|
||||||
|
# BUT Amazon has now become nasty enough to detect when its app
|
||||||
|
# is being run under a debugger and actually changes code paths
|
||||||
|
# including which one of these strings is chosen, all to try
|
||||||
|
# to prevent reverse engineering
|
||||||
|
|
||||||
|
# Sad really ... they will only hurt their own sales ...
|
||||||
|
# true book lovers really want to keep their books forever
|
||||||
|
# and move them to their devices and DRM prevents that so they
|
||||||
|
# will just buy from someplace else that they can remove
|
||||||
|
# the DRM from
|
||||||
|
|
||||||
|
# Amazon should know by now that true book lover's are not like
|
||||||
|
# penniless kids that pirate music, we do not pirate books
|
||||||
|
|
||||||
|
if isNewInstall():
|
||||||
|
mungedmac = GetMACAddressMunged()
|
||||||
|
if len(mungedmac) > 7:
|
||||||
|
return mungedmac
|
||||||
|
sernum = GetVolumeSerialNumber()
|
||||||
|
if len(sernum) > 7:
|
||||||
|
return sernum
|
||||||
|
diskpart = GetUserHomeAppSupKindleDirParitionName()
|
||||||
|
uuidnum = GetDiskPartitionUUID(diskpart)
|
||||||
|
if len(uuidnum) > 7:
|
||||||
|
return uuidnum
|
||||||
|
mungedmac = GetMACAddressMunged()
|
||||||
|
if len(mungedmac) > 7:
|
||||||
|
return mungedmac
|
||||||
|
return '9999999999'
|
||||||
|
|
||||||
|
|
||||||
# implements an Pseudo Mac Version of Windows built-in Crypto routine
|
# implements an Pseudo Mac Version of Windows built-in Crypto routine
|
||||||
def CryptUnprotectData(encryptedData, salt):
|
# used by Kindle for Mac versions < 1.6.0
|
||||||
sp = GetVolumeSerialNumber() + '!@#' + GetUserName()
|
class CryptUnprotectData(object):
|
||||||
|
def __init__(self):
|
||||||
|
sernum = GetVolumeSerialNumber()
|
||||||
|
if sernum == '':
|
||||||
|
sernum = '9999999999'
|
||||||
|
sp = sernum + '!@#' + GetUserName()
|
||||||
passwdData = encode(SHA256(sp),charMap1)
|
passwdData = encode(SHA256(sp),charMap1)
|
||||||
|
salt = '16743'
|
||||||
|
self.crp = LibCrypto()
|
||||||
|
iter = 0x3e8
|
||||||
|
keylen = 0x80
|
||||||
|
key_iv = self.crp.keyivgen(passwdData, salt, iter, keylen)
|
||||||
|
self.key = key_iv[0:32]
|
||||||
|
self.iv = key_iv[32:48]
|
||||||
|
self.crp.set_decrypt_key(self.key, self.iv)
|
||||||
|
|
||||||
|
def decrypt(self, encryptedData):
|
||||||
|
cleartext = self.crp.decrypt(encryptedData)
|
||||||
|
cleartext = decode(cleartext,charMap1)
|
||||||
|
return cleartext
|
||||||
|
|
||||||
|
|
||||||
|
# implements an Pseudo Mac Version of Windows built-in Crypto routine
|
||||||
|
# used for Kindle for Mac Versions >= 1.6.0
|
||||||
|
class CryptUnprotectDataV2(object):
|
||||||
|
def __init__(self):
|
||||||
|
sp = GetUserName() + ':&%:' + GetIDString()
|
||||||
|
passwdData = encode(SHA256(sp),charMap5)
|
||||||
|
# salt generation as per the code
|
||||||
|
salt = 0x0512981d * 2 * 1 * 1
|
||||||
|
salt = str(salt) + GetUserName()
|
||||||
|
salt = encode(salt,charMap5)
|
||||||
|
self.crp = LibCrypto()
|
||||||
|
iter = 0x800
|
||||||
|
keylen = 0x400
|
||||||
|
key_iv = self.crp.keyivgen(passwdData, salt, iter, keylen)
|
||||||
|
self.key = key_iv[0:32]
|
||||||
|
self.iv = key_iv[32:48]
|
||||||
|
self.crp.set_decrypt_key(self.key, self.iv)
|
||||||
|
|
||||||
|
def decrypt(self, encryptedData):
|
||||||
|
cleartext = self.crp.decrypt(encryptedData)
|
||||||
|
cleartext = decode(cleartext, charMap5)
|
||||||
|
return cleartext
|
||||||
|
|
||||||
|
|
||||||
|
# unprotect the new header blob in .kinf2011
|
||||||
|
# used in Kindle for Mac Version >= 1.9.0
|
||||||
|
def UnprotectHeaderData(encryptedData):
|
||||||
|
passwdData = 'header_key_data'
|
||||||
|
salt = 'HEADER.2011'
|
||||||
|
iter = 0x80
|
||||||
|
keylen = 0x100
|
||||||
crp = LibCrypto()
|
crp = LibCrypto()
|
||||||
key_iv = crp.keyivgen(passwdData, salt)
|
key_iv = crp.keyivgen(passwdData, salt, iter, keylen)
|
||||||
key = key_iv[0:32]
|
key = key_iv[0:32]
|
||||||
iv = key_iv[32:48]
|
iv = key_iv[32:48]
|
||||||
crp.set_decrypt_key(key,iv)
|
crp.set_decrypt_key(key,iv)
|
||||||
@@ -217,6 +463,27 @@ def CryptUnprotectData(encryptedData, salt):
|
|||||||
return cleartext
|
return cleartext
|
||||||
|
|
||||||
|
|
||||||
|
# implements an Pseudo Mac Version of Windows built-in Crypto routine
|
||||||
|
# used for Kindle for Mac Versions >= 1.9.0
|
||||||
|
class CryptUnprotectDataV3(object):
|
||||||
|
def __init__(self, entropy):
|
||||||
|
sp = GetUserName() + '+@#$%+' + GetIDString()
|
||||||
|
passwdData = encode(SHA256(sp),charMap2)
|
||||||
|
salt = entropy
|
||||||
|
self.crp = LibCrypto()
|
||||||
|
iter = 0x800
|
||||||
|
keylen = 0x400
|
||||||
|
key_iv = self.crp.keyivgen(passwdData, salt, iter, keylen)
|
||||||
|
self.key = key_iv[0:32]
|
||||||
|
self.iv = key_iv[32:48]
|
||||||
|
self.crp.set_decrypt_key(self.key, self.iv)
|
||||||
|
|
||||||
|
def decrypt(self, encryptedData):
|
||||||
|
cleartext = self.crp.decrypt(encryptedData)
|
||||||
|
cleartext = decode(cleartext, charMap2)
|
||||||
|
return cleartext
|
||||||
|
|
||||||
|
|
||||||
# Locate the .kindle-info files
|
# Locate the .kindle-info files
|
||||||
def getKindleInfoFiles(kInfoFiles):
|
def getKindleInfoFiles(kInfoFiles):
|
||||||
# first search for current .kindle-info files
|
# first search for current .kindle-info files
|
||||||
@@ -232,18 +499,26 @@ def getKindleInfoFiles(kInfoFiles):
|
|||||||
if os.path.isfile(resline):
|
if os.path.isfile(resline):
|
||||||
kInfoFiles.append(resline)
|
kInfoFiles.append(resline)
|
||||||
found = True
|
found = True
|
||||||
# For Future Reference
|
# add any .rainier*-kinf files
|
||||||
#
|
cmdline = 'find "' + home + '/Library/Application Support" -name ".rainier*-kinf"'
|
||||||
# # add any .kinf files
|
cmdline = cmdline.encode(sys.getfilesystemencoding())
|
||||||
# cmdline = 'find "' + home + '/Library/Application Support" -name "rainier*.kinf"'
|
p1 = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
|
||||||
# cmdline = cmdline.encode(sys.getfilesystemencoding())
|
out1, out2 = p1.communicate()
|
||||||
# p1 = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
|
reslst = out1.split('\n')
|
||||||
# out1, out2 = p1.communicate()
|
for resline in reslst:
|
||||||
# reslst = out1.split('\n')
|
if os.path.isfile(resline):
|
||||||
# for resline in reslst:
|
kInfoFiles.append(resline)
|
||||||
# if os.path.isfile(resline):
|
found = True
|
||||||
# kInfoFiles.append(resline)
|
# add any .kinf2011 files
|
||||||
# found = True
|
cmdline = 'find "' + home + '/Library/Application Support" -name ".kinf2011"'
|
||||||
|
cmdline = cmdline.encode(sys.getfilesystemencoding())
|
||||||
|
p1 = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
|
||||||
|
out1, out2 = p1.communicate()
|
||||||
|
reslst = out1.split('\n')
|
||||||
|
for resline in reslst:
|
||||||
|
if os.path.isfile(resline):
|
||||||
|
kInfoFiles.append(resline)
|
||||||
|
found = True
|
||||||
if not found:
|
if not found:
|
||||||
print('No kindle-info files have been found.')
|
print('No kindle-info files have been found.')
|
||||||
return kInfoFiles
|
return kInfoFiles
|
||||||
@@ -251,7 +526,7 @@ def getKindleInfoFiles(kInfoFiles):
|
|||||||
# determine type of kindle info provided and return a
|
# determine type of kindle info provided and return a
|
||||||
# database of keynames and values
|
# database of keynames and values
|
||||||
def getDBfromFile(kInfoFile):
|
def getDBfromFile(kInfoFile):
|
||||||
names = ["kindle.account.tokens","kindle.cookie.item","eulaVersionAccepted","login_date","kindle.token.item","login","kindle.key.item","kindle.name.info","kindle.device.info", "MazamaRandomNumber"]
|
names = ["kindle.account.tokens","kindle.cookie.item","eulaVersionAccepted","login_date","kindle.token.item","login","kindle.key.item","kindle.name.info","kindle.device.info", "MazamaRandomNumber", "max_date", "SIGVERIF"]
|
||||||
DB = {}
|
DB = {}
|
||||||
cnt = 0
|
cnt = 0
|
||||||
infoReader = open(kInfoFile, 'r')
|
infoReader = open(kInfoFile, 'r')
|
||||||
@@ -261,6 +536,7 @@ def getDBfromFile(kInfoFile):
|
|||||||
if data.find('[') != -1 :
|
if data.find('[') != -1 :
|
||||||
|
|
||||||
# older style kindle-info file
|
# older style kindle-info file
|
||||||
|
cud = CryptUnprotectData()
|
||||||
items = data.split('[')
|
items = data.split('[')
|
||||||
for item in items:
|
for item in items:
|
||||||
if item != '':
|
if item != '':
|
||||||
@@ -273,84 +549,177 @@ def getDBfromFile(kInfoFile):
|
|||||||
if keyname == "unknown":
|
if keyname == "unknown":
|
||||||
keyname = keyhash
|
keyname = keyhash
|
||||||
encryptedValue = decode(rawdata,charMap2)
|
encryptedValue = decode(rawdata,charMap2)
|
||||||
salt = '16743'
|
cleartext = cud.decrypt(encryptedValue)
|
||||||
cleartext = CryptUnprotectData(encryptedValue, salt)
|
DB[keyname] = cleartext
|
||||||
DB[keyname] = decode(cleartext,charMap1)
|
|
||||||
cnt = cnt + 1
|
cnt = cnt + 1
|
||||||
if cnt == 0:
|
if cnt == 0:
|
||||||
DB = None
|
DB = None
|
||||||
return DB
|
return DB
|
||||||
|
|
||||||
# For Future Reference taken from K4PC 1.5.0 .kinf
|
if hdr == '/':
|
||||||
#
|
|
||||||
# # else newer style .kinf file
|
# else newer style .kinf file used by K4Mac >= 1.6.0
|
||||||
# # the .kinf file uses "/" to separate it into records
|
# the .kinf file uses "/" to separate it into records
|
||||||
# # so remove the trailing "/" to make it easy to use split
|
# so remove the trailing "/" to make it easy to use split
|
||||||
# data = data[:-1]
|
data = data[:-1]
|
||||||
# items = data.split('/')
|
items = data.split('/')
|
||||||
#
|
cud = CryptUnprotectDataV2()
|
||||||
# # loop through the item records until all are processed
|
|
||||||
# while len(items) > 0:
|
# loop through the item records until all are processed
|
||||||
#
|
while len(items) > 0:
|
||||||
# # get the first item record
|
|
||||||
# item = items.pop(0)
|
# get the first item record
|
||||||
#
|
item = items.pop(0)
|
||||||
# # the first 32 chars of the first record of a group
|
|
||||||
# # is the MD5 hash of the key name encoded by charMap5
|
# the first 32 chars of the first record of a group
|
||||||
# keyhash = item[0:32]
|
# is the MD5 hash of the key name encoded by charMap5
|
||||||
#
|
keyhash = item[0:32]
|
||||||
# # the raw keyhash string is also used to create entropy for the actual
|
keyname = "unknown"
|
||||||
# # CryptProtectData Blob that represents that keys contents
|
|
||||||
|
# the raw keyhash string is also used to create entropy for the actual
|
||||||
|
# CryptProtectData Blob that represents that keys contents
|
||||||
|
# "entropy" not used for K4Mac only K4PC
|
||||||
# entropy = SHA1(keyhash)
|
# entropy = SHA1(keyhash)
|
||||||
#
|
|
||||||
# # the remainder of the first record when decoded with charMap5
|
# the remainder of the first record when decoded with charMap5
|
||||||
# # has the ':' split char followed by the string representation
|
# has the ':' split char followed by the string representation
|
||||||
# # of the number of records that follow
|
# of the number of records that follow
|
||||||
# # and make up the contents
|
# and make up the contents
|
||||||
# srcnt = decode(item[34:],charMap5)
|
srcnt = decode(item[34:],charMap5)
|
||||||
# rcnt = int(srcnt)
|
rcnt = int(srcnt)
|
||||||
#
|
|
||||||
# # read and store in rcnt records of data
|
# read and store in rcnt records of data
|
||||||
# # that make up the contents value
|
# that make up the contents value
|
||||||
# edlst = []
|
edlst = []
|
||||||
# for i in xrange(rcnt):
|
for i in xrange(rcnt):
|
||||||
# item = items.pop(0)
|
item = items.pop(0)
|
||||||
# edlst.append(item)
|
edlst.append(item)
|
||||||
#
|
|
||||||
# keyname = "unknown"
|
keyname = "unknown"
|
||||||
# for name in names:
|
for name in names:
|
||||||
# if encodeHash(name,charMap5) == keyhash:
|
if encodeHash(name,charMap5) == keyhash:
|
||||||
# keyname = name
|
keyname = name
|
||||||
# break
|
break
|
||||||
# if keyname == "unknown":
|
if keyname == "unknown":
|
||||||
# keyname = keyhash
|
keyname = keyhash
|
||||||
#
|
|
||||||
# # the charMap5 encoded contents data has had a length
|
# the charMap5 encoded contents data has had a length
|
||||||
# # of chars (always odd) cut off of the front and moved
|
# of chars (always odd) cut off of the front and moved
|
||||||
# # to the end to prevent decoding using charMap5 from
|
# to the end to prevent decoding using charMap5 from
|
||||||
# # working properly, and thereby preventing the ensuing
|
# working properly, and thereby preventing the ensuing
|
||||||
# # CryptUnprotectData call from succeeding.
|
# CryptUnprotectData call from succeeding.
|
||||||
#
|
|
||||||
# # The offset into the charMap5 encoded contents seems to be:
|
# The offset into the charMap5 encoded contents seems to be:
|
||||||
# # len(contents) - largest prime number less than or equal to int(len(content)/3)
|
# len(contents) - largest prime number less than or equal to int(len(content)/3)
|
||||||
# # (in other words split "about" 2/3rds of the way through)
|
# (in other words split "about" 2/3rds of the way through)
|
||||||
#
|
|
||||||
# # move first offsets chars to end to align for decode by charMap5
|
# move first offsets chars to end to align for decode by charMap5
|
||||||
# encdata = "".join(edlst)
|
encdata = "".join(edlst)
|
||||||
# contlen = len(encdata)
|
contlen = len(encdata)
|
||||||
# noffset = contlen - primes(int(contlen/3))[-1]
|
|
||||||
#
|
# now properly split and recombine
|
||||||
# # now properly split and recombine
|
# by moving noffset chars from the start of the
|
||||||
# # by moving noffset chars from the start of the
|
# string to the end of the string
|
||||||
# # string to the end of the string
|
noffset = contlen - primes(int(contlen/3))[-1]
|
||||||
# pfx = encdata[0:noffset]
|
pfx = encdata[0:noffset]
|
||||||
# encdata = encdata[noffset:]
|
encdata = encdata[noffset:]
|
||||||
# encdata = encdata + pfx
|
encdata = encdata + pfx
|
||||||
#
|
|
||||||
# # decode using Map5 to get the CryptProtect Data
|
# decode using charMap5 to get the CryptProtect Data
|
||||||
# encryptedValue = decode(encdata,charMap5)
|
encryptedValue = decode(encdata,charMap5)
|
||||||
# DB[keyname] = CryptUnprotectData(encryptedValue, entropy, 1)
|
cleartext = cud.decrypt(encryptedValue)
|
||||||
# cnt = cnt + 1
|
DB[keyname] = cleartext
|
||||||
|
cnt = cnt + 1
|
||||||
|
|
||||||
|
if cnt == 0:
|
||||||
|
DB = None
|
||||||
|
return DB
|
||||||
|
|
||||||
|
# the latest .kinf2011 version for K4M 1.9.1
|
||||||
|
# put back the hdr char, it is needed
|
||||||
|
data = hdr + data
|
||||||
|
data = data[:-1]
|
||||||
|
items = data.split('/')
|
||||||
|
|
||||||
|
# the headerblob is the encrypted information needed to build the entropy string
|
||||||
|
headerblob = items.pop(0)
|
||||||
|
encryptedValue = decode(headerblob, charMap1)
|
||||||
|
cleartext = UnprotectHeaderData(encryptedValue)
|
||||||
|
|
||||||
|
# now extract the pieces in the same way
|
||||||
|
# this version is different from K4PC it scales the build number by multipying by 735
|
||||||
|
pattern = re.compile(r'''\[Version:(\d+)\]\[Build:(\d+)\]\[Cksum:([^\]]+)\]\[Guid:([\{\}a-z0-9\-]+)\]''', re.IGNORECASE)
|
||||||
|
for m in re.finditer(pattern, cleartext):
|
||||||
|
entropy = str(int(m.group(2)) * 0x2df) + m.group(4)
|
||||||
|
|
||||||
|
cud = CryptUnprotectDataV3(entropy)
|
||||||
|
|
||||||
|
# loop through the item records until all are processed
|
||||||
|
while len(items) > 0:
|
||||||
|
|
||||||
|
# get the first item record
|
||||||
|
item = items.pop(0)
|
||||||
|
|
||||||
|
# the first 32 chars of the first record of a group
|
||||||
|
# is the MD5 hash of the key name encoded by charMap5
|
||||||
|
keyhash = item[0:32]
|
||||||
|
keyname = "unknown"
|
||||||
|
|
||||||
|
# unlike K4PC the keyhash is not used in generating entropy
|
||||||
|
# entropy = SHA1(keyhash) + added_entropy
|
||||||
|
# entropy = added_entropy
|
||||||
|
|
||||||
|
# the remainder of the first record when decoded with charMap5
|
||||||
|
# has the ':' split char followed by the string representation
|
||||||
|
# of the number of records that follow
|
||||||
|
# and make up the contents
|
||||||
|
srcnt = decode(item[34:],charMap5)
|
||||||
|
rcnt = int(srcnt)
|
||||||
|
|
||||||
|
# read and store in rcnt records of data
|
||||||
|
# that make up the contents value
|
||||||
|
edlst = []
|
||||||
|
for i in xrange(rcnt):
|
||||||
|
item = items.pop(0)
|
||||||
|
edlst.append(item)
|
||||||
|
|
||||||
|
keyname = "unknown"
|
||||||
|
for name in names:
|
||||||
|
if encodeHash(name,testMap8) == keyhash:
|
||||||
|
keyname = name
|
||||||
|
break
|
||||||
|
if keyname == "unknown":
|
||||||
|
keyname = keyhash
|
||||||
|
|
||||||
|
# the testMap8 encoded contents data has had a length
|
||||||
|
# of chars (always odd) cut off of the front and moved
|
||||||
|
# to the end to prevent decoding using testMap8 from
|
||||||
|
# working properly, and thereby preventing the ensuing
|
||||||
|
# CryptUnprotectData call from succeeding.
|
||||||
|
|
||||||
|
# The offset into the testMap8 encoded contents seems to be:
|
||||||
|
# len(contents) - largest prime number less than or equal to int(len(content)/3)
|
||||||
|
# (in other words split "about" 2/3rds of the way through)
|
||||||
|
|
||||||
|
# move first offsets chars to end to align for decode by testMap8
|
||||||
|
encdata = "".join(edlst)
|
||||||
|
contlen = len(encdata)
|
||||||
|
|
||||||
|
# now properly split and recombine
|
||||||
|
# by moving noffset chars from the start of the
|
||||||
|
# string to the end of the string
|
||||||
|
noffset = contlen - primes(int(contlen/3))[-1]
|
||||||
|
pfx = encdata[0:noffset]
|
||||||
|
encdata = encdata[noffset:]
|
||||||
|
encdata = encdata + pfx
|
||||||
|
|
||||||
|
# decode using testMap8 to get the CryptProtect Data
|
||||||
|
encryptedValue = decode(encdata,testMap8)
|
||||||
|
cleartext = cud.decrypt(encryptedValue)
|
||||||
|
# print keyname
|
||||||
|
# print cleartext
|
||||||
|
DB[keyname] = cleartext
|
||||||
|
cnt = cnt + 1
|
||||||
|
|
||||||
if cnt == 0:
|
if cnt == 0:
|
||||||
DB = None
|
DB = None
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
|
|
||||||
from __future__ import with_statement
|
from __future__ import with_statement
|
||||||
|
|
||||||
import sys, os
|
import sys, os, re
|
||||||
from struct import pack, unpack, unpack_from
|
from struct import pack, unpack, unpack_from
|
||||||
|
|
||||||
from ctypes import windll, c_char_p, c_wchar_p, c_uint, POINTER, byref, \
|
from ctypes import windll, c_char_p, c_wchar_p, c_uint, POINTER, byref, \
|
||||||
@@ -11,9 +11,7 @@ from ctypes import windll, c_char_p, c_wchar_p, c_uint, POINTER, byref, \
|
|||||||
string_at, Structure, c_void_p, cast
|
string_at, Structure, c_void_p, cast
|
||||||
|
|
||||||
import _winreg as winreg
|
import _winreg as winreg
|
||||||
|
|
||||||
MAX_PATH = 255
|
MAX_PATH = 255
|
||||||
|
|
||||||
kernel32 = windll.kernel32
|
kernel32 = windll.kernel32
|
||||||
advapi32 = windll.advapi32
|
advapi32 = windll.advapi32
|
||||||
crypt32 = windll.crypt32
|
crypt32 = windll.crypt32
|
||||||
@@ -33,6 +31,32 @@ def SHA1(message):
|
|||||||
ctx.update(message)
|
ctx.update(message)
|
||||||
return ctx.digest()
|
return ctx.digest()
|
||||||
|
|
||||||
|
def SHA256(message):
|
||||||
|
ctx = hashlib.sha256()
|
||||||
|
ctx.update(message)
|
||||||
|
return ctx.digest()
|
||||||
|
|
||||||
|
# For K4PC 1.9.X
|
||||||
|
# use routines in alfcrypto:
|
||||||
|
# AES_cbc_encrypt
|
||||||
|
# AES_set_decrypt_key
|
||||||
|
# PKCS5_PBKDF2_HMAC_SHA1
|
||||||
|
|
||||||
|
from alfcrypto import AES_CBC, KeyIVGen
|
||||||
|
|
||||||
|
def UnprotectHeaderData(encryptedData):
|
||||||
|
passwdData = 'header_key_data'
|
||||||
|
salt = 'HEADER.2011'
|
||||||
|
iter = 0x80
|
||||||
|
keylen = 0x100
|
||||||
|
key_iv = KeyIVGen().pbkdf2(passwdData, salt, iter, keylen)
|
||||||
|
key = key_iv[0:32]
|
||||||
|
iv = key_iv[32:48]
|
||||||
|
aes=AES_CBC()
|
||||||
|
aes.set_decrypt_key(key, iv)
|
||||||
|
cleartext = aes.decrypt(encryptedData)
|
||||||
|
return cleartext
|
||||||
|
|
||||||
|
|
||||||
# simple primes table (<= n) calculator
|
# simple primes table (<= n) calculator
|
||||||
def primes(n):
|
def primes(n):
|
||||||
@@ -59,6 +83,10 @@ def primes(n):
|
|||||||
# Probably supposed to act as obfuscation
|
# Probably supposed to act as obfuscation
|
||||||
charMap2 = "AaZzB0bYyCc1XxDdW2wEeVv3FfUuG4g-TtHh5SsIiR6rJjQq7KkPpL8lOoMm9Nn_"
|
charMap2 = "AaZzB0bYyCc1XxDdW2wEeVv3FfUuG4g-TtHh5SsIiR6rJjQq7KkPpL8lOoMm9Nn_"
|
||||||
charMap5 = "AzB0bYyCeVvaZ3FfUuG4g-TtHh5SsIiR6rJjQq7KkPpL8lOoMm9Nn_c1XxDdW2wE"
|
charMap5 = "AzB0bYyCeVvaZ3FfUuG4g-TtHh5SsIiR6rJjQq7KkPpL8lOoMm9Nn_c1XxDdW2wE"
|
||||||
|
# New maps in K4PC 1.9.0
|
||||||
|
testMap1 = "n5Pr6St7Uv8Wx9YzAb0Cd1Ef2Gh3Jk4M"
|
||||||
|
testMap6 = "9YzAb0Cd1Ef2n5Pr6St7Uvh3Jk4M8WxG"
|
||||||
|
testMap8 = "YvaZ3FfUm9Nn_c1XuG4yCAzB0beVg-TtHh5SsIiR6rJjQdW2wEq7KkPpL8lOoMxD"
|
||||||
|
|
||||||
class DrmException(Exception):
|
class DrmException(Exception):
|
||||||
pass
|
pass
|
||||||
@@ -122,6 +150,9 @@ def GetVolumeSerialNumber():
|
|||||||
return GetVolumeSerialNumber
|
return GetVolumeSerialNumber
|
||||||
GetVolumeSerialNumber = GetVolumeSerialNumber()
|
GetVolumeSerialNumber = GetVolumeSerialNumber()
|
||||||
|
|
||||||
|
def GetIDString():
|
||||||
|
return GetVolumeSerialNumber()
|
||||||
|
|
||||||
def getLastError():
|
def getLastError():
|
||||||
GetLastError = kernel32.GetLastError
|
GetLastError = kernel32.GetLastError
|
||||||
GetLastError.argtypes = None
|
GetLastError.argtypes = None
|
||||||
@@ -162,7 +193,8 @@ def CryptUnprotectData():
|
|||||||
outdata = DataBlob()
|
outdata = DataBlob()
|
||||||
if not _CryptUnprotectData(byref(indata), None, byref(entropy),
|
if not _CryptUnprotectData(byref(indata), None, byref(entropy),
|
||||||
None, None, flags, byref(outdata)):
|
None, None, flags, byref(outdata)):
|
||||||
raise DrmException("Failed to Unprotect Data")
|
# raise DrmException("Failed to Unprotect Data")
|
||||||
|
return 'failed'
|
||||||
return string_at(outdata.pbData, outdata.cbData)
|
return string_at(outdata.pbData, outdata.cbData)
|
||||||
return CryptUnprotectData
|
return CryptUnprotectData
|
||||||
CryptUnprotectData = CryptUnprotectData()
|
CryptUnprotectData = CryptUnprotectData()
|
||||||
@@ -173,6 +205,13 @@ def getKindleInfoFiles(kInfoFiles):
|
|||||||
regkey = winreg.OpenKey(winreg.HKEY_CURRENT_USER, "Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Shell Folders\\")
|
regkey = winreg.OpenKey(winreg.HKEY_CURRENT_USER, "Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Shell Folders\\")
|
||||||
path = winreg.QueryValueEx(regkey, 'Local AppData')[0]
|
path = winreg.QueryValueEx(regkey, 'Local AppData')[0]
|
||||||
|
|
||||||
|
# some 64 bit machines do not have the proper registry key for some reason
|
||||||
|
# or the pythonn interface to the 32 vs 64 bit registry is broken
|
||||||
|
if 'LOCALAPPDATA' in os.environ.keys():
|
||||||
|
path = os.environ['LOCALAPPDATA']
|
||||||
|
|
||||||
|
print "searching for kinfoFiles in ", path
|
||||||
|
|
||||||
# first look for older kindle-info files
|
# first look for older kindle-info files
|
||||||
kinfopath = path +'\\Amazon\\Kindle For PC\\{AMAwzsaPaaZAzmZzZQzgZCAkZ3AjA_AY}\\kindle.info'
|
kinfopath = path +'\\Amazon\\Kindle For PC\\{AMAwzsaPaaZAzmZzZQzgZCAkZ3AjA_AY}\\kindle.info'
|
||||||
if not os.path.isfile(kinfopath):
|
if not os.path.isfile(kinfopath):
|
||||||
@@ -181,18 +220,34 @@ def getKindleInfoFiles(kInfoFiles):
|
|||||||
kInfoFiles.append(kinfopath)
|
kInfoFiles.append(kinfopath)
|
||||||
|
|
||||||
# now look for newer (K4PC 1.5.0 and later rainier.2.1.1.kinf file
|
# now look for newer (K4PC 1.5.0 and later rainier.2.1.1.kinf file
|
||||||
|
|
||||||
kinfopath = path +'\\Amazon\\Kindle For PC\\storage\\rainier.2.1.1.kinf'
|
kinfopath = path +'\\Amazon\\Kindle For PC\\storage\\rainier.2.1.1.kinf'
|
||||||
if not os.path.isfile(kinfopath):
|
if not os.path.isfile(kinfopath):
|
||||||
print('No .kinf files have not been found.')
|
print('No K4PC 1.5.X .kinf files have not been found.')
|
||||||
else:
|
else:
|
||||||
kInfoFiles.append(kinfopath)
|
kInfoFiles.append(kinfopath)
|
||||||
|
|
||||||
|
# now look for even newer (K4PC 1.6.0 and later) rainier.2.1.1.kinf file
|
||||||
|
kinfopath = path +'\\Amazon\\Kindle\\storage\\rainier.2.1.1.kinf'
|
||||||
|
if not os.path.isfile(kinfopath):
|
||||||
|
print('No K4PC 1.6.X .kinf files have not been found.')
|
||||||
|
else:
|
||||||
|
kInfoFiles.append(kinfopath)
|
||||||
|
|
||||||
|
# now look for even newer (K4PC 1.9.0 and later) .kinf2011 file
|
||||||
|
kinfopath = path +'\\Amazon\\Kindle\\storage\\.kinf2011'
|
||||||
|
if not os.path.isfile(kinfopath):
|
||||||
|
print('No K4PC 1.9.X .kinf files have not been found.')
|
||||||
|
else:
|
||||||
|
kInfoFiles.append(kinfopath)
|
||||||
|
|
||||||
return kInfoFiles
|
return kInfoFiles
|
||||||
|
|
||||||
|
|
||||||
# determine type of kindle info provided and return a
|
# determine type of kindle info provided and return a
|
||||||
# database of keynames and values
|
# database of keynames and values
|
||||||
def getDBfromFile(kInfoFile):
|
def getDBfromFile(kInfoFile):
|
||||||
names = ["kindle.account.tokens","kindle.cookie.item","eulaVersionAccepted","login_date","kindle.token.item","login","kindle.key.item","kindle.name.info","kindle.device.info", "MazamaRandomNumber"]
|
names = ["kindle.account.tokens","kindle.cookie.item","eulaVersionAccepted","login_date","kindle.token.item","login","kindle.key.item","kindle.name.info","kindle.device.info", "MazamaRandomNumber", "max_date", "SIGVERIF"]
|
||||||
DB = {}
|
DB = {}
|
||||||
cnt = 0
|
cnt = 0
|
||||||
infoReader = open(kInfoFile, 'r')
|
infoReader = open(kInfoFile, 'r')
|
||||||
@@ -220,7 +275,8 @@ def getDBfromFile(kInfoFile):
|
|||||||
DB = None
|
DB = None
|
||||||
return DB
|
return DB
|
||||||
|
|
||||||
# else newer style .kinf file
|
if hdr == '/':
|
||||||
|
# else rainier-2-1-1 .kinf file
|
||||||
# the .kinf file uses "/" to separate it into records
|
# the .kinf file uses "/" to separate it into records
|
||||||
# so remove the trailing "/" to make it easy to use split
|
# so remove the trailing "/" to make it easy to use split
|
||||||
data = data[:-1]
|
data = data[:-1]
|
||||||
@@ -236,7 +292,7 @@ def getDBfromFile(kInfoFile):
|
|||||||
# is the MD5 hash of the key name encoded by charMap5
|
# is the MD5 hash of the key name encoded by charMap5
|
||||||
keyhash = item[0:32]
|
keyhash = item[0:32]
|
||||||
|
|
||||||
# the raw keyhash string is also used to create entropy for the actual
|
# the raw keyhash string is used to create entropy for the actual
|
||||||
# CryptProtectData Blob that represents that keys contents
|
# CryptProtectData Blob that represents that keys contents
|
||||||
entropy = SHA1(keyhash)
|
entropy = SHA1(keyhash)
|
||||||
|
|
||||||
@@ -261,7 +317,6 @@ def getDBfromFile(kInfoFile):
|
|||||||
break
|
break
|
||||||
if keyname == "unknown":
|
if keyname == "unknown":
|
||||||
keyname = keyhash
|
keyname = keyhash
|
||||||
|
|
||||||
# the charMap5 encoded contents data has had a length
|
# the charMap5 encoded contents data has had a length
|
||||||
# of chars (always odd) cut off of the front and moved
|
# of chars (always odd) cut off of the front and moved
|
||||||
# to the end to prevent decoding using charMap5 from
|
# to the end to prevent decoding using charMap5 from
|
||||||
@@ -269,7 +324,7 @@ def getDBfromFile(kInfoFile):
|
|||||||
# CryptUnprotectData call from succeeding.
|
# CryptUnprotectData call from succeeding.
|
||||||
|
|
||||||
# The offset into the charMap5 encoded contents seems to be:
|
# The offset into the charMap5 encoded contents seems to be:
|
||||||
# len(contents) - largest prime number less than or equal to int(len(content)/3)
|
# len(contents)-largest prime number <= int(len(content)/3)
|
||||||
# (in other words split "about" 2/3rds of the way through)
|
# (in other words split "about" 2/3rds of the way through)
|
||||||
|
|
||||||
# move first offsets chars to end to align for decode by charMap5
|
# move first offsets chars to end to align for decode by charMap5
|
||||||
@@ -293,4 +348,85 @@ def getDBfromFile(kInfoFile):
|
|||||||
DB = None
|
DB = None
|
||||||
return DB
|
return DB
|
||||||
|
|
||||||
|
# else newest .kinf2011 style .kinf file
|
||||||
|
# the .kinf file uses "/" to separate it into records
|
||||||
|
# so remove the trailing "/" to make it easy to use split
|
||||||
|
# need to put back the first char read because it it part
|
||||||
|
# of the added entropy blob
|
||||||
|
data = hdr + data[:-1]
|
||||||
|
items = data.split('/')
|
||||||
|
|
||||||
|
# starts with and encoded and encrypted header blob
|
||||||
|
headerblob = items.pop(0)
|
||||||
|
encryptedValue = decode(headerblob, testMap1)
|
||||||
|
cleartext = UnprotectHeaderData(encryptedValue)
|
||||||
|
# now extract the pieces that form the added entropy
|
||||||
|
pattern = re.compile(r'''\[Version:(\d+)\]\[Build:(\d+)\]\[Cksum:([^\]]+)\]\[Guid:([\{\}a-z0-9\-]+)\]''', re.IGNORECASE)
|
||||||
|
for m in re.finditer(pattern, cleartext):
|
||||||
|
added_entropy = m.group(2) + m.group(4)
|
||||||
|
|
||||||
|
|
||||||
|
# loop through the item records until all are processed
|
||||||
|
while len(items) > 0:
|
||||||
|
|
||||||
|
# get the first item record
|
||||||
|
item = items.pop(0)
|
||||||
|
|
||||||
|
# the first 32 chars of the first record of a group
|
||||||
|
# is the MD5 hash of the key name encoded by charMap5
|
||||||
|
keyhash = item[0:32]
|
||||||
|
|
||||||
|
# the sha1 of raw keyhash string is used to create entropy along
|
||||||
|
# with the added entropy provided above from the headerblob
|
||||||
|
entropy = SHA1(keyhash) + added_entropy
|
||||||
|
|
||||||
|
# the remainder of the first record when decoded with charMap5
|
||||||
|
# has the ':' split char followed by the string representation
|
||||||
|
# of the number of records that follow
|
||||||
|
# and make up the contents
|
||||||
|
srcnt = decode(item[34:],charMap5)
|
||||||
|
rcnt = int(srcnt)
|
||||||
|
|
||||||
|
# read and store in rcnt records of data
|
||||||
|
# that make up the contents value
|
||||||
|
edlst = []
|
||||||
|
for i in xrange(rcnt):
|
||||||
|
item = items.pop(0)
|
||||||
|
edlst.append(item)
|
||||||
|
|
||||||
|
# key names now use the new testMap8 encoding
|
||||||
|
keyname = "unknown"
|
||||||
|
for name in names:
|
||||||
|
if encodeHash(name,testMap8) == keyhash:
|
||||||
|
keyname = name
|
||||||
|
break
|
||||||
|
|
||||||
|
# the testMap8 encoded contents data has had a length
|
||||||
|
# of chars (always odd) cut off of the front and moved
|
||||||
|
# to the end to prevent decoding using testMap8 from
|
||||||
|
# working properly, and thereby preventing the ensuing
|
||||||
|
# CryptUnprotectData call from succeeding.
|
||||||
|
|
||||||
|
# The offset into the testMap8 encoded contents seems to be:
|
||||||
|
# len(contents)-largest prime number <= int(len(content)/3)
|
||||||
|
# (in other words split "about" 2/3rds of the way through)
|
||||||
|
|
||||||
|
# move first offsets chars to end to align for decode by testMap8
|
||||||
|
# by moving noffset chars from the start of the
|
||||||
|
# string to the end of the string
|
||||||
|
encdata = "".join(edlst)
|
||||||
|
contlen = len(encdata)
|
||||||
|
noffset = contlen - primes(int(contlen/3))[-1]
|
||||||
|
pfx = encdata[0:noffset]
|
||||||
|
encdata = encdata[noffset:]
|
||||||
|
encdata = encdata + pfx
|
||||||
|
|
||||||
|
# decode using new testMap8 to get the original CryptProtect Data
|
||||||
|
encryptedValue = decode(encdata,testMap8)
|
||||||
|
cleartext = CryptUnprotectData(encryptedValue, entropy, 1)
|
||||||
|
DB[keyname] = cleartext
|
||||||
|
cnt = cnt + 1
|
||||||
|
|
||||||
|
if cnt == 0:
|
||||||
|
DB = None
|
||||||
|
return DB
|
||||||
|
|||||||
@@ -22,16 +22,16 @@ else:
|
|||||||
|
|
||||||
if inCalibre:
|
if inCalibre:
|
||||||
if sys.platform.startswith('win'):
|
if sys.platform.startswith('win'):
|
||||||
from calibre_plugins.k4mobidedrm.k4pcutils import getKindleInfoFiles, getDBfromFile, GetUserName, GetVolumeSerialNumber
|
from calibre_plugins.k4mobidedrm.k4pcutils import getKindleInfoFiles, getDBfromFile, GetUserName, GetIDString
|
||||||
|
|
||||||
if sys.platform.startswith('darwin'):
|
if sys.platform.startswith('darwin'):
|
||||||
from calibre_plugins.k4mobidedrm.k4mutils import getKindleInfoFiles, getDBfromFile, GetUserName, GetVolumeSerialNumber
|
from calibre_plugins.k4mobidedrm.k4mutils import getKindleInfoFiles, getDBfromFile, GetUserName, GetIDString
|
||||||
else:
|
else:
|
||||||
if sys.platform.startswith('win'):
|
if sys.platform.startswith('win'):
|
||||||
from k4pcutils import getKindleInfoFiles, getDBfromFile, GetUserName, GetVolumeSerialNumber
|
from k4pcutils import getKindleInfoFiles, getDBfromFile, GetUserName, GetIDString
|
||||||
|
|
||||||
if sys.platform.startswith('darwin'):
|
if sys.platform.startswith('darwin'):
|
||||||
from k4mutils import getKindleInfoFiles, getDBfromFile, GetUserName, GetVolumeSerialNumber
|
from k4mutils import getKindleInfoFiles, getDBfromFile, GetUserName, GetIDString
|
||||||
|
|
||||||
|
|
||||||
charMap1 = "n5Pr6St7Uv8Wx9YzAb0Cd1Ef2Gh3Jk4M"
|
charMap1 = "n5Pr6St7Uv8Wx9YzAb0Cd1Ef2Gh3Jk4M"
|
||||||
@@ -218,14 +218,14 @@ def getK4Pids(pidlst, rec209, token, kInfoFile):
|
|||||||
print "Keys not found in " + kInfoFile
|
print "Keys not found in " + kInfoFile
|
||||||
return pidlst
|
return pidlst
|
||||||
|
|
||||||
# Get the HDD serial
|
# Get the ID string used
|
||||||
encodedSystemVolumeSerialNumber = encodeHash(GetVolumeSerialNumber(),charMap1)
|
encodedIDString = encodeHash(GetIDString(),charMap1)
|
||||||
|
|
||||||
# Get the current user name
|
# Get the current user name
|
||||||
encodedUsername = encodeHash(GetUserName(),charMap1)
|
encodedUsername = encodeHash(GetUserName(),charMap1)
|
||||||
|
|
||||||
# concat, hash and encode to calculate the DSN
|
# concat, hash and encode to calculate the DSN
|
||||||
DSN = encode(SHA1(MazamaRandomNumber+encodedSystemVolumeSerialNumber+encodedUsername),charMap1)
|
DSN = encode(SHA1(MazamaRandomNumber+encodedIDString+encodedUsername),charMap1)
|
||||||
|
|
||||||
# Compute the device PID (for which I can tell, is used for nothing).
|
# Compute the device PID (for which I can tell, is used for nothing).
|
||||||
table = generatePidEncryptionTable()
|
table = generatePidEncryptionTable()
|
||||||
|
|||||||
Binary file not shown.
Binary file not shown.
@@ -49,11 +49,16 @@
|
|||||||
# 0.27 - Correct pid metadata token generation to match that used by skindle (Thank You Bart!)
|
# 0.27 - Correct pid metadata token generation to match that used by skindle (Thank You Bart!)
|
||||||
# 0.28 - slight additional changes to metadata token generation (None -> '')
|
# 0.28 - slight additional changes to metadata token generation (None -> '')
|
||||||
# 0.29 - It seems that the ideas about when multibyte trailing characters were
|
# 0.29 - It seems that the ideas about when multibyte trailing characters were
|
||||||
# included in the encryption were wrong. They aren't for DOC compressed
|
# included in the encryption were wrong. They are for DOC compressed
|
||||||
# files, but they are for HUFF/CDIC compress files!
|
# files, but they are not for HUFF/CDIC compress files!
|
||||||
# 0.30 - Modified interface slightly to work better with new calibre plugin style
|
# 0.30 - Modified interface slightly to work better with new calibre plugin style
|
||||||
|
# 0.31 - The multibyte encrytion info is true for version 7 files too.
|
||||||
|
# 0.32 - Added support for "Print Replica" Kindle ebooks
|
||||||
|
# 0.33 - Performance improvements for large files (concatenation)
|
||||||
|
# 0.34 - Performance improvements in decryption (libalfcrypto)
|
||||||
|
# 0.35 - add interface to get mobi_version
|
||||||
|
|
||||||
__version__ = '0.30'
|
__version__ = '0.35'
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
@@ -70,6 +75,7 @@ sys.stdout=Unbuffered(sys.stdout)
|
|||||||
import os
|
import os
|
||||||
import struct
|
import struct
|
||||||
import binascii
|
import binascii
|
||||||
|
from alfcrypto import Pukall_Cipher
|
||||||
|
|
||||||
class DrmException(Exception):
|
class DrmException(Exception):
|
||||||
pass
|
pass
|
||||||
@@ -81,36 +87,37 @@ class DrmException(Exception):
|
|||||||
|
|
||||||
# Implementation of Pukall Cipher 1
|
# Implementation of Pukall Cipher 1
|
||||||
def PC1(key, src, decryption=True):
|
def PC1(key, src, decryption=True):
|
||||||
sum1 = 0;
|
return Pukall_Cipher().PC1(key,src,decryption)
|
||||||
sum2 = 0;
|
# sum1 = 0;
|
||||||
keyXorVal = 0;
|
# sum2 = 0;
|
||||||
if len(key)!=16:
|
# keyXorVal = 0;
|
||||||
print "Bad key length!"
|
# if len(key)!=16:
|
||||||
return None
|
# print "Bad key length!"
|
||||||
wkey = []
|
# return None
|
||||||
for i in xrange(8):
|
# wkey = []
|
||||||
wkey.append(ord(key[i*2])<<8 | ord(key[i*2+1]))
|
# for i in xrange(8):
|
||||||
dst = ""
|
# wkey.append(ord(key[i*2])<<8 | ord(key[i*2+1]))
|
||||||
for i in xrange(len(src)):
|
# dst = ""
|
||||||
temp1 = 0;
|
# for i in xrange(len(src)):
|
||||||
byteXorVal = 0;
|
# temp1 = 0;
|
||||||
for j in xrange(8):
|
# byteXorVal = 0;
|
||||||
temp1 ^= wkey[j]
|
# for j in xrange(8):
|
||||||
sum2 = (sum2+j)*20021 + sum1
|
# temp1 ^= wkey[j]
|
||||||
sum1 = (temp1*346)&0xFFFF
|
# sum2 = (sum2+j)*20021 + sum1
|
||||||
sum2 = (sum2+sum1)&0xFFFF
|
# sum1 = (temp1*346)&0xFFFF
|
||||||
temp1 = (temp1*20021+1)&0xFFFF
|
# sum2 = (sum2+sum1)&0xFFFF
|
||||||
byteXorVal ^= temp1 ^ sum2
|
# temp1 = (temp1*20021+1)&0xFFFF
|
||||||
curByte = ord(src[i])
|
# byteXorVal ^= temp1 ^ sum2
|
||||||
if not decryption:
|
# curByte = ord(src[i])
|
||||||
keyXorVal = curByte * 257;
|
# if not decryption:
|
||||||
curByte = ((curByte ^ (byteXorVal >> 8)) ^ byteXorVal) & 0xFF
|
# keyXorVal = curByte * 257;
|
||||||
if decryption:
|
# curByte = ((curByte ^ (byteXorVal >> 8)) ^ byteXorVal) & 0xFF
|
||||||
keyXorVal = curByte * 257;
|
# if decryption:
|
||||||
for j in xrange(8):
|
# keyXorVal = curByte * 257;
|
||||||
wkey[j] ^= keyXorVal;
|
# for j in xrange(8):
|
||||||
dst+=chr(curByte)
|
# wkey[j] ^= keyXorVal;
|
||||||
return dst
|
# dst+=chr(curByte)
|
||||||
|
# return dst
|
||||||
|
|
||||||
def checksumPid(s):
|
def checksumPid(s):
|
||||||
letters = "ABCDEFGHIJKLMNPQRSTUVWXYZ123456789"
|
letters = "ABCDEFGHIJKLMNPQRSTUVWXYZ123456789"
|
||||||
@@ -162,6 +169,9 @@ class MobiBook:
|
|||||||
return self.data_file[off:endoff]
|
return self.data_file[off:endoff]
|
||||||
|
|
||||||
def __init__(self, infile):
|
def __init__(self, infile):
|
||||||
|
print ('MobiDeDrm v%(__version__)s. '
|
||||||
|
'Copyright 2008-2011 The Dark Reverser et al.' % globals())
|
||||||
|
|
||||||
# initial sanity check on file
|
# initial sanity check on file
|
||||||
self.data_file = file(infile, 'rb').read()
|
self.data_file = file(infile, 'rb').read()
|
||||||
self.mobi_data = ''
|
self.mobi_data = ''
|
||||||
@@ -192,14 +202,15 @@ class MobiBook:
|
|||||||
self.meta_array = {}
|
self.meta_array = {}
|
||||||
return
|
return
|
||||||
self.mobi_length, = struct.unpack('>L',self.sect[0x14:0x18])
|
self.mobi_length, = struct.unpack('>L',self.sect[0x14:0x18])
|
||||||
|
self.mobi_codepage, = struct.unpack('>L',self.sect[0x1c:0x20])
|
||||||
self.mobi_version, = struct.unpack('>L',self.sect[0x68:0x6C])
|
self.mobi_version, = struct.unpack('>L',self.sect[0x68:0x6C])
|
||||||
print "MOBI header version = %d, length = %d" %(self.mobi_version, self.mobi_length)
|
print "MOBI header version = %d, length = %d" %(self.mobi_version, self.mobi_length)
|
||||||
self.extra_data_flags = 0
|
self.extra_data_flags = 0
|
||||||
if (self.mobi_length >= 0xE4) and (self.mobi_version >= 5):
|
if (self.mobi_length >= 0xE4) and (self.mobi_version >= 5):
|
||||||
self.extra_data_flags, = struct.unpack('>H', self.sect[0xF2:0xF4])
|
self.extra_data_flags, = struct.unpack('>H', self.sect[0xF2:0xF4])
|
||||||
print "Extra Data Flags = %d" % self.extra_data_flags
|
print "Extra Data Flags = %d" % self.extra_data_flags
|
||||||
if (self.mobi_version < 7) and (self.compression != 17480):
|
if (self.compression != 17480):
|
||||||
# multibyte utf8 data is included in the encryption for mobi_version 6 and below
|
# multibyte utf8 data is included in the encryption for PalmDoc compression
|
||||||
# so clear that byte so that we leave it to be decrypted.
|
# so clear that byte so that we leave it to be decrypted.
|
||||||
self.extra_data_flags &= 0xFFFE
|
self.extra_data_flags &= 0xFFFE
|
||||||
|
|
||||||
@@ -229,8 +240,13 @@ class MobiBook:
|
|||||||
except:
|
except:
|
||||||
self.meta_array = {}
|
self.meta_array = {}
|
||||||
pass
|
pass
|
||||||
|
self.print_replica = False
|
||||||
|
|
||||||
def getBookTitle(self):
|
def getBookTitle(self):
|
||||||
|
codec_map = {
|
||||||
|
1252 : 'windows-1252',
|
||||||
|
65001 : 'utf-8',
|
||||||
|
}
|
||||||
title = ''
|
title = ''
|
||||||
if 503 in self.meta_array:
|
if 503 in self.meta_array:
|
||||||
title = self.meta_array[503]
|
title = self.meta_array[503]
|
||||||
@@ -241,7 +257,10 @@ class MobiBook:
|
|||||||
if title == '':
|
if title == '':
|
||||||
title = self.header[:32]
|
title = self.header[:32]
|
||||||
title = title.split("\0")[0]
|
title = title.split("\0")[0]
|
||||||
return title
|
codec = 'windows-1252'
|
||||||
|
if self.mobi_codepage in codec_map.keys():
|
||||||
|
codec = codec_map[self.mobi_codepage]
|
||||||
|
return unicode(title, codec).encode('utf-8')
|
||||||
|
|
||||||
def getPIDMetaInfo(self):
|
def getPIDMetaInfo(self):
|
||||||
rec209 = ''
|
rec209 = ''
|
||||||
@@ -306,16 +325,29 @@ class MobiBook:
|
|||||||
def getMobiFile(self, outpath):
|
def getMobiFile(self, outpath):
|
||||||
file(outpath,'wb').write(self.mobi_data)
|
file(outpath,'wb').write(self.mobi_data)
|
||||||
|
|
||||||
|
def getMobiVersion(self):
|
||||||
|
return self.mobi_version
|
||||||
|
|
||||||
|
def getPrintReplica(self):
|
||||||
|
return self.print_replica
|
||||||
|
|
||||||
def processBook(self, pidlist):
|
def processBook(self, pidlist):
|
||||||
crypto_type, = struct.unpack('>H', self.sect[0xC:0xC+2])
|
crypto_type, = struct.unpack('>H', self.sect[0xC:0xC+2])
|
||||||
print 'Crypto Type is: ', crypto_type
|
print 'Crypto Type is: ', crypto_type
|
||||||
self.crypto_type = crypto_type
|
self.crypto_type = crypto_type
|
||||||
if crypto_type == 0:
|
if crypto_type == 0:
|
||||||
print "This book is not encrypted."
|
print "This book is not encrypted."
|
||||||
|
# we must still check for Print Replica
|
||||||
|
self.print_replica = (self.loadSection(1)[0:4] == '%MOP')
|
||||||
self.mobi_data = self.data_file
|
self.mobi_data = self.data_file
|
||||||
return
|
return
|
||||||
if crypto_type != 2 and crypto_type != 1:
|
if crypto_type != 2 and crypto_type != 1:
|
||||||
raise DrmException("Cannot decode unknown Mobipocket encryption type %d" % crypto_type)
|
raise DrmException("Cannot decode unknown Mobipocket encryption type %d" % crypto_type)
|
||||||
|
if 406 in self.meta_array:
|
||||||
|
data406 = self.meta_array[406]
|
||||||
|
val406, = struct.unpack('>Q',data406)
|
||||||
|
if val406 != 0:
|
||||||
|
raise DrmException("Cannot decode library or rented ebooks.")
|
||||||
|
|
||||||
goodpids = []
|
goodpids = []
|
||||||
for pid in pidlist:
|
for pid in pidlist:
|
||||||
@@ -343,7 +375,7 @@ class MobiBook:
|
|||||||
raise DrmException("Not yet initialised with PID. Must be opened with Mobipocket Reader first.")
|
raise DrmException("Not yet initialised with PID. Must be opened with Mobipocket Reader first.")
|
||||||
found_key, pid = self.parseDRM(self.sect[drm_ptr:drm_ptr+drm_size], drm_count, goodpids)
|
found_key, pid = self.parseDRM(self.sect[drm_ptr:drm_ptr+drm_size], drm_count, goodpids)
|
||||||
if not found_key:
|
if not found_key:
|
||||||
raise DrmException("No key found. Most likely the correct PID has not been given.")
|
raise DrmException("No key found. Please report this failure for help.")
|
||||||
# kill the drm keys
|
# kill the drm keys
|
||||||
self.patchSection(0, "\0" * drm_size, drm_ptr)
|
self.patchSection(0, "\0" * drm_size, drm_ptr)
|
||||||
# kill the drm pointers
|
# kill the drm pointers
|
||||||
@@ -359,18 +391,23 @@ class MobiBook:
|
|||||||
|
|
||||||
# decrypt sections
|
# decrypt sections
|
||||||
print "Decrypting. Please wait . . .",
|
print "Decrypting. Please wait . . .",
|
||||||
self.mobi_data = self.data_file[:self.sections[1][0]]
|
mobidataList = []
|
||||||
|
mobidataList.append(self.data_file[:self.sections[1][0]])
|
||||||
for i in xrange(1, self.records+1):
|
for i in xrange(1, self.records+1):
|
||||||
data = self.loadSection(i)
|
data = self.loadSection(i)
|
||||||
extra_size = getSizeOfTrailingDataEntries(data, len(data), self.extra_data_flags)
|
extra_size = getSizeOfTrailingDataEntries(data, len(data), self.extra_data_flags)
|
||||||
if i%100 == 0:
|
if i%100 == 0:
|
||||||
print ".",
|
print ".",
|
||||||
# print "record %d, extra_size %d" %(i,extra_size)
|
# print "record %d, extra_size %d" %(i,extra_size)
|
||||||
self.mobi_data += PC1(found_key, data[0:len(data) - extra_size])
|
decoded_data = PC1(found_key, data[0:len(data) - extra_size])
|
||||||
|
if i==1:
|
||||||
|
self.print_replica = (decoded_data[0:4] == '%MOP')
|
||||||
|
mobidataList.append(decoded_data)
|
||||||
if extra_size > 0:
|
if extra_size > 0:
|
||||||
self.mobi_data += data[-extra_size:]
|
mobidataList.append(data[-extra_size:])
|
||||||
if self.num_sections > self.records+1:
|
if self.num_sections > self.records+1:
|
||||||
self.mobi_data += self.data_file[self.sections[self.records+1][0]:]
|
mobidataList.append(self.data_file[self.sections[self.records+1][0]:])
|
||||||
|
self.mobi_data = "".join(mobidataList)
|
||||||
print "done"
|
print "done"
|
||||||
return
|
return
|
||||||
|
|
||||||
@@ -391,9 +428,9 @@ def getUnencryptedBookWithList(infile,pidlist):
|
|||||||
|
|
||||||
def main(argv=sys.argv):
|
def main(argv=sys.argv):
|
||||||
print ('MobiDeDrm v%(__version__)s. '
|
print ('MobiDeDrm v%(__version__)s. '
|
||||||
'Copyright 2008-2010 The Dark Reverser.' % globals())
|
'Copyright 2008-2011 The Dark Reverser et al.' % globals())
|
||||||
if len(argv)<3 or len(argv)>4:
|
if len(argv)<3 or len(argv)>4:
|
||||||
print "Removes protection from Mobipocket books"
|
print "Removes protection from Kindle/Mobipocket and Kindle/Print Replica ebooks"
|
||||||
print "Usage:"
|
print "Usage:"
|
||||||
print " %s <infile> <outfile> [<Comma separated list of PIDs to try>]" % sys.argv[0]
|
print " %s <infile> <outfile> [<Comma separated list of PIDs to try>]" % sys.argv[0]
|
||||||
return 1
|
return 1
|
||||||
|
|||||||
@@ -49,11 +49,13 @@
|
|||||||
# 0.27 - Correct pid metadata token generation to match that used by skindle (Thank You Bart!)
|
# 0.27 - Correct pid metadata token generation to match that used by skindle (Thank You Bart!)
|
||||||
# 0.28 - slight additional changes to metadata token generation (None -> '')
|
# 0.28 - slight additional changes to metadata token generation (None -> '')
|
||||||
# 0.29 - It seems that the ideas about when multibyte trailing characters were
|
# 0.29 - It seems that the ideas about when multibyte trailing characters were
|
||||||
# included in the encryption were wrong. They aren't for DOC compressed
|
# included in the encryption were wrong. They are for DOC compressed
|
||||||
# files, but they are for HUFF/CDIC compress files!
|
# files, but they are not for HUFF/CDIC compress files!
|
||||||
# 0.30 - Modified interface slightly to work better with new calibre plugin style
|
# 0.30 - Modified interface slightly to work better with new calibre plugin style
|
||||||
|
# 0.31 - The multibyte encrytion info is true for version 7 files too.
|
||||||
|
# 0.32 - Added support for "Print Replica" Kindle ebooks
|
||||||
|
|
||||||
__version__ = '0.30'
|
__version__ = '0.32'
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
@@ -162,6 +164,9 @@ class MobiBook:
|
|||||||
return self.data_file[off:endoff]
|
return self.data_file[off:endoff]
|
||||||
|
|
||||||
def __init__(self, infile):
|
def __init__(self, infile):
|
||||||
|
print ('MobiDeDrm v%(__version__)s. '
|
||||||
|
'Copyright 2008-2011 The Dark Reverser et al.' % globals())
|
||||||
|
|
||||||
# initial sanity check on file
|
# initial sanity check on file
|
||||||
self.data_file = file(infile, 'rb').read()
|
self.data_file = file(infile, 'rb').read()
|
||||||
self.mobi_data = ''
|
self.mobi_data = ''
|
||||||
@@ -192,14 +197,15 @@ class MobiBook:
|
|||||||
self.meta_array = {}
|
self.meta_array = {}
|
||||||
return
|
return
|
||||||
self.mobi_length, = struct.unpack('>L',self.sect[0x14:0x18])
|
self.mobi_length, = struct.unpack('>L',self.sect[0x14:0x18])
|
||||||
|
self.mobi_codepage, = struct.unpack('>L',self.sect[0x1c:0x20])
|
||||||
self.mobi_version, = struct.unpack('>L',self.sect[0x68:0x6C])
|
self.mobi_version, = struct.unpack('>L',self.sect[0x68:0x6C])
|
||||||
print "MOBI header version = %d, length = %d" %(self.mobi_version, self.mobi_length)
|
print "MOBI header version = %d, length = %d" %(self.mobi_version, self.mobi_length)
|
||||||
self.extra_data_flags = 0
|
self.extra_data_flags = 0
|
||||||
if (self.mobi_length >= 0xE4) and (self.mobi_version >= 5):
|
if (self.mobi_length >= 0xE4) and (self.mobi_version >= 5):
|
||||||
self.extra_data_flags, = struct.unpack('>H', self.sect[0xF2:0xF4])
|
self.extra_data_flags, = struct.unpack('>H', self.sect[0xF2:0xF4])
|
||||||
print "Extra Data Flags = %d" % self.extra_data_flags
|
print "Extra Data Flags = %d" % self.extra_data_flags
|
||||||
if (self.mobi_version < 7) and (self.compression != 17480):
|
if (self.compression != 17480):
|
||||||
# multibyte utf8 data is included in the encryption for mobi_version 6 and below
|
# multibyte utf8 data is included in the encryption for PalmDoc compression
|
||||||
# so clear that byte so that we leave it to be decrypted.
|
# so clear that byte so that we leave it to be decrypted.
|
||||||
self.extra_data_flags &= 0xFFFE
|
self.extra_data_flags &= 0xFFFE
|
||||||
|
|
||||||
@@ -229,8 +235,13 @@ class MobiBook:
|
|||||||
except:
|
except:
|
||||||
self.meta_array = {}
|
self.meta_array = {}
|
||||||
pass
|
pass
|
||||||
|
self.print_replica = False
|
||||||
|
|
||||||
def getBookTitle(self):
|
def getBookTitle(self):
|
||||||
|
codec_map = {
|
||||||
|
1252 : 'windows-1252',
|
||||||
|
65001 : 'utf-8',
|
||||||
|
}
|
||||||
title = ''
|
title = ''
|
||||||
if 503 in self.meta_array:
|
if 503 in self.meta_array:
|
||||||
title = self.meta_array[503]
|
title = self.meta_array[503]
|
||||||
@@ -241,7 +252,10 @@ class MobiBook:
|
|||||||
if title == '':
|
if title == '':
|
||||||
title = self.header[:32]
|
title = self.header[:32]
|
||||||
title = title.split("\0")[0]
|
title = title.split("\0")[0]
|
||||||
return title
|
codec = 'windows-1252'
|
||||||
|
if self.mobi_codepage in codec_map.keys():
|
||||||
|
codec = codec_map[self.mobi_codepage]
|
||||||
|
return unicode(title, codec).encode('utf-8')
|
||||||
|
|
||||||
def getPIDMetaInfo(self):
|
def getPIDMetaInfo(self):
|
||||||
rec209 = ''
|
rec209 = ''
|
||||||
@@ -306,16 +320,26 @@ class MobiBook:
|
|||||||
def getMobiFile(self, outpath):
|
def getMobiFile(self, outpath):
|
||||||
file(outpath,'wb').write(self.mobi_data)
|
file(outpath,'wb').write(self.mobi_data)
|
||||||
|
|
||||||
|
def getPrintReplica(self):
|
||||||
|
return self.print_replica
|
||||||
|
|
||||||
def processBook(self, pidlist):
|
def processBook(self, pidlist):
|
||||||
crypto_type, = struct.unpack('>H', self.sect[0xC:0xC+2])
|
crypto_type, = struct.unpack('>H', self.sect[0xC:0xC+2])
|
||||||
print 'Crypto Type is: ', crypto_type
|
print 'Crypto Type is: ', crypto_type
|
||||||
self.crypto_type = crypto_type
|
self.crypto_type = crypto_type
|
||||||
if crypto_type == 0:
|
if crypto_type == 0:
|
||||||
print "This book is not encrypted."
|
print "This book is not encrypted."
|
||||||
|
# we must still check for Print Replica
|
||||||
|
self.print_replica = (self.loadSection(1)[0:4] == '%MOP')
|
||||||
self.mobi_data = self.data_file
|
self.mobi_data = self.data_file
|
||||||
return
|
return
|
||||||
if crypto_type != 2 and crypto_type != 1:
|
if crypto_type != 2 and crypto_type != 1:
|
||||||
raise DrmException("Cannot decode unknown Mobipocket encryption type %d" % crypto_type)
|
raise DrmException("Cannot decode unknown Mobipocket encryption type %d" % crypto_type)
|
||||||
|
if 406 in self.meta_array:
|
||||||
|
data406 = self.meta_array[406]
|
||||||
|
val406, = struct.unpack('>Q',data406)
|
||||||
|
if val406 != 0:
|
||||||
|
raise DrmException("Cannot decode library or rented ebooks.")
|
||||||
|
|
||||||
goodpids = []
|
goodpids = []
|
||||||
for pid in pidlist:
|
for pid in pidlist:
|
||||||
@@ -366,7 +390,10 @@ class MobiBook:
|
|||||||
if i%100 == 0:
|
if i%100 == 0:
|
||||||
print ".",
|
print ".",
|
||||||
# print "record %d, extra_size %d" %(i,extra_size)
|
# print "record %d, extra_size %d" %(i,extra_size)
|
||||||
self.mobi_data += PC1(found_key, data[0:len(data) - extra_size])
|
decoded_data = PC1(found_key, data[0:len(data) - extra_size])
|
||||||
|
if i==1:
|
||||||
|
self.print_replica = (decoded_data[0:4] == '%MOP')
|
||||||
|
self.mobi_data += decoded_data
|
||||||
if extra_size > 0:
|
if extra_size > 0:
|
||||||
self.mobi_data += data[-extra_size:]
|
self.mobi_data += data[-extra_size:]
|
||||||
if self.num_sections > self.records+1:
|
if self.num_sections > self.records+1:
|
||||||
@@ -391,9 +418,9 @@ def getUnencryptedBookWithList(infile,pidlist):
|
|||||||
|
|
||||||
def main(argv=sys.argv):
|
def main(argv=sys.argv):
|
||||||
print ('MobiDeDrm v%(__version__)s. '
|
print ('MobiDeDrm v%(__version__)s. '
|
||||||
'Copyright 2008-2010 The Dark Reverser.' % globals())
|
'Copyright 2008-2011 The Dark Reverser et al.' % globals())
|
||||||
if len(argv)<3 or len(argv)>4:
|
if len(argv)<3 or len(argv)>4:
|
||||||
print "Removes protection from Mobipocket books"
|
print "Removes protection from Kindle/Mobipocket and Kindle/Print Replica ebooks"
|
||||||
print "Usage:"
|
print "Usage:"
|
||||||
print " %s <infile> <outfile> [<Comma separated list of PIDs to try>]" % sys.argv[0]
|
print " %s <infile> <outfile> [<Comma separated list of PIDs to try>]" % sys.argv[0]
|
||||||
return 1
|
return 1
|
||||||
@@ -87,4 +87,3 @@ def load_libcrypto():
|
|||||||
return ''.join(result)
|
return ''.join(result)
|
||||||
|
|
||||||
return DES
|
return DES
|
||||||
|
|
||||||
|
|||||||
@@ -0,0 +1,68 @@
|
|||||||
|
# A simple implementation of pbkdf2 using stock python modules. See RFC2898
|
||||||
|
# for details. Basically, it derives a key from a password and salt.
|
||||||
|
|
||||||
|
# Copyright 2004 Matt Johnston <matt @ ucc asn au>
|
||||||
|
# Copyright 2009 Daniel Holth <dholth@fastmail.fm>
|
||||||
|
# This code may be freely used and modified for any purpose.
|
||||||
|
|
||||||
|
# Revision history
|
||||||
|
# v0.1 October 2004 - Initial release
|
||||||
|
# v0.2 8 March 2007 - Make usable with hashlib in Python 2.5 and use
|
||||||
|
# v0.3 "" the correct digest_size rather than always 20
|
||||||
|
# v0.4 Oct 2009 - Rescue from chandler svn, test and optimize.
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import hmac
|
||||||
|
from struct import pack
|
||||||
|
try:
|
||||||
|
# only in python 2.5
|
||||||
|
import hashlib
|
||||||
|
sha = hashlib.sha1
|
||||||
|
md5 = hashlib.md5
|
||||||
|
sha256 = hashlib.sha256
|
||||||
|
except ImportError: # pragma: NO COVERAGE
|
||||||
|
# fallback
|
||||||
|
import sha
|
||||||
|
import md5
|
||||||
|
|
||||||
|
# this is what you want to call.
|
||||||
|
def pbkdf2( password, salt, itercount, keylen, hashfn = sha ):
|
||||||
|
try:
|
||||||
|
# depending whether the hashfn is from hashlib or sha/md5
|
||||||
|
digest_size = hashfn().digest_size
|
||||||
|
except TypeError: # pragma: NO COVERAGE
|
||||||
|
digest_size = hashfn.digest_size
|
||||||
|
# l - number of output blocks to produce
|
||||||
|
l = keylen / digest_size
|
||||||
|
if keylen % digest_size != 0:
|
||||||
|
l += 1
|
||||||
|
|
||||||
|
h = hmac.new( password, None, hashfn )
|
||||||
|
|
||||||
|
T = ""
|
||||||
|
for i in range(1, l+1):
|
||||||
|
T += pbkdf2_F( h, salt, itercount, i )
|
||||||
|
|
||||||
|
return T[0: keylen]
|
||||||
|
|
||||||
|
def xorstr( a, b ):
|
||||||
|
if len(a) != len(b):
|
||||||
|
raise ValueError("xorstr(): lengths differ")
|
||||||
|
return ''.join((chr(ord(x)^ord(y)) for x, y in zip(a, b)))
|
||||||
|
|
||||||
|
def prf( h, data ):
|
||||||
|
hm = h.copy()
|
||||||
|
hm.update( data )
|
||||||
|
return hm.digest()
|
||||||
|
|
||||||
|
# Helper as per the spec. h is a hmac which has been created seeded with the
|
||||||
|
# password, it will be copy()ed and not modified.
|
||||||
|
def pbkdf2_F( h, salt, itercount, blocknum ):
|
||||||
|
U = prf( h, salt + pack('>i',blocknum ) )
|
||||||
|
T = U
|
||||||
|
|
||||||
|
for i in range(2, itercount+1):
|
||||||
|
U = prf( h, U )
|
||||||
|
T = xorstr( T, U )
|
||||||
|
|
||||||
|
return T
|
||||||
@@ -6,6 +6,7 @@ import csv
|
|||||||
import sys
|
import sys
|
||||||
import os
|
import os
|
||||||
import getopt
|
import getopt
|
||||||
|
import re
|
||||||
from struct import pack
|
from struct import pack
|
||||||
from struct import unpack
|
from struct import unpack
|
||||||
|
|
||||||
@@ -81,6 +82,21 @@ class DocParser(object):
|
|||||||
pos = foundpos + 1
|
pos = foundpos + 1
|
||||||
return startpos
|
return startpos
|
||||||
|
|
||||||
|
# returns a vector of integers for the tagpath
|
||||||
|
def getData(self, tagpath, pos, end, clean=False):
|
||||||
|
if clean:
|
||||||
|
digits_only = re.compile(r'''([0-9]+)''')
|
||||||
|
argres=[]
|
||||||
|
(foundat, argt) = self.findinDoc(tagpath, pos, end)
|
||||||
|
if (argt != None) and (len(argt) > 0) :
|
||||||
|
argList = argt.split('|')
|
||||||
|
for strval in argList:
|
||||||
|
if clean:
|
||||||
|
m = re.search(digits_only, strval)
|
||||||
|
if m != None:
|
||||||
|
strval = m.group()
|
||||||
|
argres.append(int(strval))
|
||||||
|
return argres
|
||||||
|
|
||||||
def process(self):
|
def process(self):
|
||||||
|
|
||||||
@@ -237,7 +253,11 @@ def convert2CSS(flatxml, fontsize, ph, pw):
|
|||||||
|
|
||||||
# create a document parser
|
# create a document parser
|
||||||
dp = DocParser(flatxml, fontsize, ph, pw)
|
dp = DocParser(flatxml, fontsize, ph, pw)
|
||||||
|
|
||||||
csspage = dp.process()
|
csspage = dp.process()
|
||||||
|
|
||||||
return csspage
|
return csspage
|
||||||
|
|
||||||
|
|
||||||
|
def getpageIDMap(flatxml):
|
||||||
|
dp = DocParser(flatxml, 0, 0, 0)
|
||||||
|
pageidnumbers = dp.getData('info.original.pid', 0, -1, True)
|
||||||
|
return pageidnumbers
|
||||||
|
|||||||
@@ -146,4 +146,3 @@ class Process(object):
|
|||||||
self.__quit = True
|
self.__quit = True
|
||||||
self.__inputsem.release()
|
self.__inputsem.release()
|
||||||
self.__lock.release()
|
self.__lock.release()
|
||||||
|
|
||||||
|
|||||||
@@ -16,10 +16,13 @@ if 'calibre' in sys.modules:
|
|||||||
else:
|
else:
|
||||||
inCalibre = False
|
inCalibre = False
|
||||||
|
|
||||||
|
buildXML = False
|
||||||
|
|
||||||
import os, csv, getopt
|
import os, csv, getopt
|
||||||
import zlib, zipfile, tempfile, shutil
|
import zlib, zipfile, tempfile, shutil
|
||||||
from struct import pack
|
from struct import pack
|
||||||
from struct import unpack
|
from struct import unpack
|
||||||
|
from alfcrypto import Topaz_Cipher
|
||||||
|
|
||||||
class TpzDRMError(Exception):
|
class TpzDRMError(Exception):
|
||||||
pass
|
pass
|
||||||
@@ -81,25 +84,28 @@ def bookReadString(fo):
|
|||||||
|
|
||||||
# Context initialisation for the Topaz Crypto
|
# Context initialisation for the Topaz Crypto
|
||||||
def topazCryptoInit(key):
|
def topazCryptoInit(key):
|
||||||
ctx1 = 0x0CAFFE19E
|
return Topaz_Cipher().ctx_init(key)
|
||||||
for keyChar in key:
|
|
||||||
keyByte = ord(keyChar)
|
# ctx1 = 0x0CAFFE19E
|
||||||
ctx2 = ctx1
|
# for keyChar in key:
|
||||||
ctx1 = ((((ctx1 >>2) * (ctx1 >>7))&0xFFFFFFFF) ^ (keyByte * keyByte * 0x0F902007)& 0xFFFFFFFF )
|
# keyByte = ord(keyChar)
|
||||||
return [ctx1,ctx2]
|
# ctx2 = ctx1
|
||||||
|
# ctx1 = ((((ctx1 >>2) * (ctx1 >>7))&0xFFFFFFFF) ^ (keyByte * keyByte * 0x0F902007)& 0xFFFFFFFF )
|
||||||
|
# return [ctx1,ctx2]
|
||||||
|
|
||||||
# decrypt data with the context prepared by topazCryptoInit()
|
# decrypt data with the context prepared by topazCryptoInit()
|
||||||
def topazCryptoDecrypt(data, ctx):
|
def topazCryptoDecrypt(data, ctx):
|
||||||
ctx1 = ctx[0]
|
return Topaz_Cipher().decrypt(data, ctx)
|
||||||
ctx2 = ctx[1]
|
# ctx1 = ctx[0]
|
||||||
plainText = ""
|
# ctx2 = ctx[1]
|
||||||
for dataChar in data:
|
# plainText = ""
|
||||||
dataByte = ord(dataChar)
|
# for dataChar in data:
|
||||||
m = (dataByte ^ ((ctx1 >> 3) &0xFF) ^ ((ctx2<<3) & 0xFF)) &0xFF
|
# dataByte = ord(dataChar)
|
||||||
ctx2 = ctx1
|
# m = (dataByte ^ ((ctx1 >> 3) &0xFF) ^ ((ctx2<<3) & 0xFF)) &0xFF
|
||||||
ctx1 = (((ctx1 >> 2) * (ctx1 >> 7)) &0xFFFFFFFF) ^((m * m * 0x0F902007) &0xFFFFFFFF)
|
# ctx2 = ctx1
|
||||||
plainText += chr(m)
|
# ctx1 = (((ctx1 >> 2) * (ctx1 >> 7)) &0xFFFFFFFF) ^((m * m * 0x0F902007) &0xFFFFFFFF)
|
||||||
return plainText
|
# plainText += chr(m)
|
||||||
|
# return plainText
|
||||||
|
|
||||||
# Decrypt data with the PID
|
# Decrypt data with the PID
|
||||||
def decryptRecord(data,PID):
|
def decryptRecord(data,PID):
|
||||||
@@ -140,6 +146,7 @@ class TopazBook:
|
|||||||
def __init__(self, filename):
|
def __init__(self, filename):
|
||||||
self.fo = file(filename, 'rb')
|
self.fo = file(filename, 'rb')
|
||||||
self.outdir = tempfile.mkdtemp()
|
self.outdir = tempfile.mkdtemp()
|
||||||
|
# self.outdir = 'rawdat'
|
||||||
self.bookPayloadOffset = 0
|
self.bookPayloadOffset = 0
|
||||||
self.bookHeaderRecords = {}
|
self.bookHeaderRecords = {}
|
||||||
self.bookMetadata = {}
|
self.bookMetadata = {}
|
||||||
@@ -380,6 +387,7 @@ def usage(progname):
|
|||||||
|
|
||||||
# Main
|
# Main
|
||||||
def main(argv=sys.argv):
|
def main(argv=sys.argv):
|
||||||
|
global buildXML
|
||||||
progname = os.path.basename(argv[0])
|
progname = os.path.basename(argv[0])
|
||||||
k4 = False
|
k4 = False
|
||||||
pids = []
|
pids = []
|
||||||
@@ -438,9 +446,10 @@ def main(argv=sys.argv):
|
|||||||
tb.getHTMLZip(zipname)
|
tb.getHTMLZip(zipname)
|
||||||
|
|
||||||
print " Creating SVG ZIP Archive"
|
print " Creating SVG ZIP Archive"
|
||||||
zipname = os.path.join(outdir, bookname + '_SVG' + '.htmlz')
|
zipname = os.path.join(outdir, bookname + '_SVG' + '.zip')
|
||||||
tb.getSVGZip(zipname)
|
tb.getSVGZip(zipname)
|
||||||
|
|
||||||
|
if buildXML:
|
||||||
print " Creating XML ZIP Archive"
|
print " Creating XML ZIP Archive"
|
||||||
zipname = os.path.join(outdir, bookname + '_XML' + '.zip')
|
zipname = os.path.join(outdir, bookname + '_XML' + '.zip')
|
||||||
tb.getXMLZip(zipname)
|
tb.getXMLZip(zipname)
|
||||||
@@ -450,12 +459,12 @@ def main(argv=sys.argv):
|
|||||||
|
|
||||||
except TpzDRMError, e:
|
except TpzDRMError, e:
|
||||||
print str(e)
|
print str(e)
|
||||||
tb.cleanup()
|
# tb.cleanup()
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
except Exception, e:
|
except Exception, e:
|
||||||
print str(e)
|
print str(e)
|
||||||
tb.cleanup
|
# tb.cleanup
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
return 0
|
return 0
|
||||||
@@ -464,4 +473,3 @@ def main(argv=sys.argv):
|
|||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
sys.stdout=Unbuffered(sys.stdout)
|
sys.stdout=Unbuffered(sys.stdout)
|
||||||
sys.exit(main())
|
sys.exit(main())
|
||||||
|
|
||||||
|
|||||||
@@ -152,5 +152,3 @@ def main(argv=sys.argv):
|
|||||||
|
|
||||||
if __name__ == '__main__' :
|
if __name__ == '__main__' :
|
||||||
sys.exit(main())
|
sys.exit(main())
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
49
DeDRM_Macintosh_Application/ReadMe_DeDRM.app.rtf
Normal file
49
DeDRM_Macintosh_Application/ReadMe_DeDRM.app.rtf
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
{\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360
|
||||||
|
{\fonttbl\f0\fswiss\fcharset0 Helvetica;}
|
||||||
|
{\colortbl;\red255\green255\blue255;}
|
||||||
|
\paperw11900\paperh16840\margl1440\margr1440\vieww10320\viewh9840\viewkind0
|
||||||
|
\pard\tx566\tx1133\tx1700\tx2267\tx2834\tx3401\tx3968\tx4535\tx5102\tx5669\tx6236\tx6803\ql\qnatural\pardirnatural
|
||||||
|
|
||||||
|
\f0\b\fs24 \cf0 ReadMe_DeDRM_X.X
|
||||||
|
\b0 \
|
||||||
|
\
|
||||||
|
\pard\tx566\tx1133\tx1700\tx2267\tx2834\tx3401\tx3968\tx4535\tx5102\tx5669\tx6236\tx6803\qj\pardirnatural
|
||||||
|
\cf0 DeDRM_X.X is an AppleScript droplet that allows users to drag and drop ebooks or folders of ebooks onto the DeDRM droplet to have the DRM removed. It repackages the all the "tools" DeDRM python software in one easy to use program that remembers preferences and settings.\
|
||||||
|
\
|
||||||
|
It should work without manual configuration with Kindle for Mac ebooks and Adobe Adept epub and pdf ebooks.\
|
||||||
|
\
|
||||||
|
To remove the DRM from standalone Kindle ebooks, eReader pdb ebooks, Barnes and Noble epubs, and Mobipocket ebooks requires the user to double-click the DeDRM droplet and set some additional Preferences including:\
|
||||||
|
\
|
||||||
|
Mobipocket, Kindle for iPhone/iPad/iPodTouch: 10 digit PID\
|
||||||
|
Kindle (not Kindle Fire): 16 digit Serial Number\
|
||||||
|
Barnes & Noble key files (bnepubkey.b64)\
|
||||||
|
eReader Social DRM: (Name:Last 8 digits of CC number)\
|
||||||
|
Additional Above Adept key files (.der)\
|
||||||
|
Location for DRM-free ebooks.\
|
||||||
|
\
|
||||||
|
Once these preferences have been set, the user can simply drag and drop ebooks onto the DeDRM droplet to remove the DRM.\
|
||||||
|
\
|
||||||
|
This program requires Mac OS X 10.5, 10.5 or 10.7 (Leopard, Snow Leopard or Lion)\
|
||||||
|
\pard\tx566\tx1133\tx1700\tx2267\tx2834\tx3401\tx3968\tx4535\tx5102\tx5669\tx6236\tx6803\ql\qnatural\pardirnatural
|
||||||
|
\cf0 \
|
||||||
|
\
|
||||||
|
\
|
||||||
|
|
||||||
|
\b Installation\
|
||||||
|
|
||||||
|
\b0 \
|
||||||
|
1. From tools_vX.X\\DeDRM_Applications\\, double click on DeDRM_X.X.zip to extract its contents. \
|
||||||
|
\
|
||||||
|
2. Move the resulting DeDRM X.X.app AppleScript droplet to wherever you keep you other applications. (Typically your Applications folder.)\
|
||||||
|
\
|
||||||
|
3. Optionally drag it into your dock, to make it easily available.\
|
||||||
|
\
|
||||||
|
\
|
||||||
|
\
|
||||||
|
|
||||||
|
\b Use\
|
||||||
|
|
||||||
|
\b0 \
|
||||||
|
1. To set the preferences simply double-click the Applescript droplet in your Applications folder or click on its icon in your dock, and follow the instructions in the dialogs.\
|
||||||
|
\
|
||||||
|
2. Drag & Drop DRMed ebooks or folders containing DRMed ebooks onto the Application, either in your Applications folder, or the icon in your dock.}
|
||||||
@@ -4,6 +4,8 @@
|
|||||||
import sys
|
import sys
|
||||||
import os, os.path
|
import os, os.path
|
||||||
sys.path.append(sys.path[0]+os.sep+'lib')
|
sys.path.append(sys.path[0]+os.sep+'lib')
|
||||||
|
os.environ['PYTHONIOENCODING'] = "utf-8"
|
||||||
|
|
||||||
import shutil
|
import shutil
|
||||||
import Tkinter
|
import Tkinter
|
||||||
from Tkinter import *
|
from Tkinter import *
|
||||||
@@ -18,6 +20,9 @@ from subasyncio import Process
|
|||||||
import re
|
import re
|
||||||
import simpleprefs
|
import simpleprefs
|
||||||
|
|
||||||
|
|
||||||
|
__version__ = '5.0'
|
||||||
|
|
||||||
class DrmException(Exception):
|
class DrmException(Exception):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@@ -107,7 +112,6 @@ class PrefsDialog(Toplevel):
|
|||||||
keyfile = os.path.join(prefdir,'adeptkey.der')
|
keyfile = os.path.join(prefdir,'adeptkey.der')
|
||||||
if os.path.isfile(keyfile):
|
if os.path.isfile(keyfile):
|
||||||
path = keyfile
|
path = keyfile
|
||||||
path = path.encode('utf-8')
|
|
||||||
self.adkpath.insert(0, path)
|
self.adkpath.insert(0, path)
|
||||||
button = Tkinter.Button(body, text="...", command=self.get_adkpath)
|
button = Tkinter.Button(body, text="...", command=self.get_adkpath)
|
||||||
button.grid(row=0, column=2)
|
button.grid(row=0, column=2)
|
||||||
@@ -119,7 +123,6 @@ class PrefsDialog(Toplevel):
|
|||||||
keyfile = os.path.join(prefdir,'bnepubkey.b64')
|
keyfile = os.path.join(prefdir,'bnepubkey.b64')
|
||||||
if os.path.isfile(keyfile):
|
if os.path.isfile(keyfile):
|
||||||
path = keyfile
|
path = keyfile
|
||||||
path = path.encode('utf-8')
|
|
||||||
self.bnkpath.insert(0, path)
|
self.bnkpath.insert(0, path)
|
||||||
button = Tkinter.Button(body, text="...", command=self.get_bnkpath)
|
button = Tkinter.Button(body, text="...", command=self.get_bnkpath)
|
||||||
button.grid(row=1, column=2)
|
button.grid(row=1, column=2)
|
||||||
@@ -135,7 +138,6 @@ class PrefsDialog(Toplevel):
|
|||||||
path = infofile
|
path = infofile
|
||||||
elif os.path.isfile(ainfofile):
|
elif os.path.isfile(ainfofile):
|
||||||
path = ainfofile
|
path = ainfofile
|
||||||
path = path.encode('utf-8')
|
|
||||||
self.altinfopath.insert(0, path)
|
self.altinfopath.insert(0, path)
|
||||||
button = Tkinter.Button(body, text="...", command=self.get_altinfopath)
|
button = Tkinter.Button(body, text="...", command=self.get_altinfopath)
|
||||||
button.grid(row=2, column=2)
|
button.grid(row=2, column=2)
|
||||||
@@ -166,7 +168,6 @@ class PrefsDialog(Toplevel):
|
|||||||
self.outpath.grid(row=6, column=1, sticky=sticky)
|
self.outpath.grid(row=6, column=1, sticky=sticky)
|
||||||
if 'outdir' in self.prefs_array:
|
if 'outdir' in self.prefs_array:
|
||||||
dpath = self.prefs_array['outdir']
|
dpath = self.prefs_array['outdir']
|
||||||
dpath = dpath.encode('utf-8')
|
|
||||||
self.outpath.insert(0, dpath)
|
self.outpath.insert(0, dpath)
|
||||||
button = Tkinter.Button(body, text="...", command=self.get_outpath)
|
button = Tkinter.Button(body, text="...", command=self.get_outpath)
|
||||||
button.grid(row=6, column=2)
|
button.grid(row=6, column=2)
|
||||||
@@ -263,6 +264,8 @@ class PrefsDialog(Toplevel):
|
|||||||
filetypes=[('ePub Files','.epub'),
|
filetypes=[('ePub Files','.epub'),
|
||||||
('Kindle','.azw'),
|
('Kindle','.azw'),
|
||||||
('Kindle','.azw1'),
|
('Kindle','.azw1'),
|
||||||
|
('Kindle','.azw3'),
|
||||||
|
('Kindle','.azw4'),
|
||||||
('Kindle','.tpz'),
|
('Kindle','.tpz'),
|
||||||
('Kindle','.mobi'),
|
('Kindle','.mobi'),
|
||||||
('Kindle','.prc'),
|
('Kindle','.prc'),
|
||||||
@@ -413,7 +416,6 @@ class ConvDialog(Toplevel):
|
|||||||
# post output from subprocess in scrolled text widget
|
# post output from subprocess in scrolled text widget
|
||||||
def showCmdOutput(self, msg):
|
def showCmdOutput(self, msg):
|
||||||
if msg and msg !='':
|
if msg and msg !='':
|
||||||
msg = msg.encode('utf-8')
|
|
||||||
if sys.platform.startswith('win'):
|
if sys.platform.startswith('win'):
|
||||||
msg = msg.replace('\r\n','\n')
|
msg = msg.replace('\r\n','\n')
|
||||||
self.stext.insert(Tkconstants.END,msg)
|
self.stext.insert(Tkconstants.END,msg)
|
||||||
@@ -465,7 +467,7 @@ class ConvDialog(Toplevel):
|
|||||||
if ext == '.pdb':
|
if ext == '.pdb':
|
||||||
self.p2 = processPDB(apphome, infile, outdir, rscpath)
|
self.p2 = processPDB(apphome, infile, outdir, rscpath)
|
||||||
return 0
|
return 0
|
||||||
if ext in ['.azw', '.azw1', '.prc', '.mobi', '.tpz']:
|
if ext in ['.azw', '.azw1', '.azw3', '.azw4', '.prc', '.mobi', '.tpz']:
|
||||||
self.p2 = processK4MOBI(apphome, infile, outdir, rscpath)
|
self.p2 = processK4MOBI(apphome, infile, outdir, rscpath)
|
||||||
return 0
|
return 0
|
||||||
if ext == '.pdf':
|
if ext == '.pdf':
|
||||||
@@ -476,13 +478,17 @@ class ConvDialog(Toplevel):
|
|||||||
|
|
||||||
# run as a subprocess via pipes and collect stdout, stderr, and return value
|
# run as a subprocess via pipes and collect stdout, stderr, and return value
|
||||||
def runit(apphome, ncmd, nparms):
|
def runit(apphome, ncmd, nparms):
|
||||||
cmdline = 'python ' + '"' + os.path.join(apphome, ncmd) + '" '
|
pengine = sys.executable
|
||||||
if sys.platform.startswith('win'):
|
if pengine is None or pengine == '':
|
||||||
search_path = os.environ['PATH']
|
pengine = 'python'
|
||||||
search_path = search_path.lower()
|
pengine = os.path.normpath(pengine)
|
||||||
if search_path.find('python') < 0:
|
cmdline = pengine + ' "' + os.path.join(apphome, ncmd) + '" '
|
||||||
# if no python hope that win registry finds what is associated with py extension
|
# if sys.platform.startswith('win'):
|
||||||
cmdline = '"' + os.path.join(apphome, ncmd) + '" '
|
# search_path = os.environ['PATH']
|
||||||
|
# search_path = search_path.lower()
|
||||||
|
# if search_path.find('python') < 0:
|
||||||
|
# # if no python hope that win registry finds what is associated with py extension
|
||||||
|
# cmdline = pengine + ' "' + os.path.join(apphome, ncmd) + '" '
|
||||||
cmdline += nparms
|
cmdline += nparms
|
||||||
cmdline = cmdline.encode(sys.getfilesystemencoding())
|
cmdline = cmdline.encode(sys.getfilesystemencoding())
|
||||||
p2 = subasyncio.Process(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
|
p2 = subasyncio.Process(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
|
||||||
@@ -560,6 +566,7 @@ def main(argv=sys.argv):
|
|||||||
infilelst = argv[1:]
|
infilelst = argv[1:]
|
||||||
filenames = []
|
filenames = []
|
||||||
for infile in infilelst:
|
for infile in infilelst:
|
||||||
|
print infile
|
||||||
infile = infile.replace('"','')
|
infile = infile.replace('"','')
|
||||||
infile = os.path.abspath(infile)
|
infile = os.path.abspath(infile)
|
||||||
if os.path.isdir(infile):
|
if os.path.isdir(infile):
|
||||||
@@ -584,4 +591,3 @@ def main(argv=sys.argv):
|
|||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
sys.exit(main())
|
sys.exit(main())
|
||||||
|
|
||||||
|
|||||||
568
DeDRM_Windows_Application/DeDRM_WinApp/DeDRM_lib/lib/aescbc.py
Normal file
568
DeDRM_Windows_Application/DeDRM_WinApp/DeDRM_lib/lib/aescbc.py
Normal file
@@ -0,0 +1,568 @@
|
|||||||
|
#! /usr/bin/env python
|
||||||
|
|
||||||
|
"""
|
||||||
|
Routines for doing AES CBC in one file
|
||||||
|
|
||||||
|
Modified by some_updates to extract
|
||||||
|
and combine only those parts needed for AES CBC
|
||||||
|
into one simple to add python file
|
||||||
|
|
||||||
|
Original Version
|
||||||
|
Copyright (c) 2002 by Paul A. Lambert
|
||||||
|
Under:
|
||||||
|
CryptoPy Artisitic License Version 1.0
|
||||||
|
See the wonderful pure python package cryptopy-1.2.5
|
||||||
|
and read its LICENSE.txt for complete license details.
|
||||||
|
"""
|
||||||
|
|
||||||
|
class CryptoError(Exception):
|
||||||
|
""" Base class for crypto exceptions """
|
||||||
|
def __init__(self,errorMessage='Error!'):
|
||||||
|
self.message = errorMessage
|
||||||
|
def __str__(self):
|
||||||
|
return self.message
|
||||||
|
|
||||||
|
class InitCryptoError(CryptoError):
|
||||||
|
""" Crypto errors during algorithm initialization """
|
||||||
|
class BadKeySizeError(InitCryptoError):
|
||||||
|
""" Bad key size error """
|
||||||
|
class EncryptError(CryptoError):
|
||||||
|
""" Error in encryption processing """
|
||||||
|
class DecryptError(CryptoError):
|
||||||
|
""" Error in decryption processing """
|
||||||
|
class DecryptNotBlockAlignedError(DecryptError):
|
||||||
|
""" Error in decryption processing """
|
||||||
|
|
||||||
|
def xorS(a,b):
|
||||||
|
""" XOR two strings """
|
||||||
|
assert len(a)==len(b)
|
||||||
|
x = []
|
||||||
|
for i in range(len(a)):
|
||||||
|
x.append( chr(ord(a[i])^ord(b[i])))
|
||||||
|
return ''.join(x)
|
||||||
|
|
||||||
|
def xor(a,b):
|
||||||
|
""" XOR two strings """
|
||||||
|
x = []
|
||||||
|
for i in range(min(len(a),len(b))):
|
||||||
|
x.append( chr(ord(a[i])^ord(b[i])))
|
||||||
|
return ''.join(x)
|
||||||
|
|
||||||
|
"""
|
||||||
|
Base 'BlockCipher' and Pad classes for cipher instances.
|
||||||
|
BlockCipher supports automatic padding and type conversion. The BlockCipher
|
||||||
|
class was written to make the actual algorithm code more readable and
|
||||||
|
not for performance.
|
||||||
|
"""
|
||||||
|
|
||||||
|
class BlockCipher:
|
||||||
|
""" Block ciphers """
|
||||||
|
def __init__(self):
|
||||||
|
self.reset()
|
||||||
|
|
||||||
|
def reset(self):
|
||||||
|
self.resetEncrypt()
|
||||||
|
self.resetDecrypt()
|
||||||
|
def resetEncrypt(self):
|
||||||
|
self.encryptBlockCount = 0
|
||||||
|
self.bytesToEncrypt = ''
|
||||||
|
def resetDecrypt(self):
|
||||||
|
self.decryptBlockCount = 0
|
||||||
|
self.bytesToDecrypt = ''
|
||||||
|
|
||||||
|
def encrypt(self, plainText, more = None):
|
||||||
|
""" Encrypt a string and return a binary string """
|
||||||
|
self.bytesToEncrypt += plainText # append plainText to any bytes from prior encrypt
|
||||||
|
numBlocks, numExtraBytes = divmod(len(self.bytesToEncrypt), self.blockSize)
|
||||||
|
cipherText = ''
|
||||||
|
for i in range(numBlocks):
|
||||||
|
bStart = i*self.blockSize
|
||||||
|
ctBlock = self.encryptBlock(self.bytesToEncrypt[bStart:bStart+self.blockSize])
|
||||||
|
self.encryptBlockCount += 1
|
||||||
|
cipherText += ctBlock
|
||||||
|
if numExtraBytes > 0: # save any bytes that are not block aligned
|
||||||
|
self.bytesToEncrypt = self.bytesToEncrypt[-numExtraBytes:]
|
||||||
|
else:
|
||||||
|
self.bytesToEncrypt = ''
|
||||||
|
|
||||||
|
if more == None: # no more data expected from caller
|
||||||
|
finalBytes = self.padding.addPad(self.bytesToEncrypt,self.blockSize)
|
||||||
|
if len(finalBytes) > 0:
|
||||||
|
ctBlock = self.encryptBlock(finalBytes)
|
||||||
|
self.encryptBlockCount += 1
|
||||||
|
cipherText += ctBlock
|
||||||
|
self.resetEncrypt()
|
||||||
|
return cipherText
|
||||||
|
|
||||||
|
def decrypt(self, cipherText, more = None):
|
||||||
|
""" Decrypt a string and return a string """
|
||||||
|
self.bytesToDecrypt += cipherText # append to any bytes from prior decrypt
|
||||||
|
|
||||||
|
numBlocks, numExtraBytes = divmod(len(self.bytesToDecrypt), self.blockSize)
|
||||||
|
if more == None: # no more calls to decrypt, should have all the data
|
||||||
|
if numExtraBytes != 0:
|
||||||
|
raise DecryptNotBlockAlignedError, 'Data not block aligned on decrypt'
|
||||||
|
|
||||||
|
# hold back some bytes in case last decrypt has zero len
|
||||||
|
if (more != None) and (numExtraBytes == 0) and (numBlocks >0) :
|
||||||
|
numBlocks -= 1
|
||||||
|
numExtraBytes = self.blockSize
|
||||||
|
|
||||||
|
plainText = ''
|
||||||
|
for i in range(numBlocks):
|
||||||
|
bStart = i*self.blockSize
|
||||||
|
ptBlock = self.decryptBlock(self.bytesToDecrypt[bStart : bStart+self.blockSize])
|
||||||
|
self.decryptBlockCount += 1
|
||||||
|
plainText += ptBlock
|
||||||
|
|
||||||
|
if numExtraBytes > 0: # save any bytes that are not block aligned
|
||||||
|
self.bytesToEncrypt = self.bytesToEncrypt[-numExtraBytes:]
|
||||||
|
else:
|
||||||
|
self.bytesToEncrypt = ''
|
||||||
|
|
||||||
|
if more == None: # last decrypt remove padding
|
||||||
|
plainText = self.padding.removePad(plainText, self.blockSize)
|
||||||
|
self.resetDecrypt()
|
||||||
|
return plainText
|
||||||
|
|
||||||
|
|
||||||
|
class Pad:
|
||||||
|
def __init__(self):
|
||||||
|
pass # eventually could put in calculation of min and max size extension
|
||||||
|
|
||||||
|
class padWithPadLen(Pad):
|
||||||
|
""" Pad a binary string with the length of the padding """
|
||||||
|
|
||||||
|
def addPad(self, extraBytes, blockSize):
|
||||||
|
""" Add padding to a binary string to make it an even multiple
|
||||||
|
of the block size """
|
||||||
|
blocks, numExtraBytes = divmod(len(extraBytes), blockSize)
|
||||||
|
padLength = blockSize - numExtraBytes
|
||||||
|
return extraBytes + padLength*chr(padLength)
|
||||||
|
|
||||||
|
def removePad(self, paddedBinaryString, blockSize):
|
||||||
|
""" Remove padding from a binary string """
|
||||||
|
if not(0<len(paddedBinaryString)):
|
||||||
|
raise DecryptNotBlockAlignedError, 'Expected More Data'
|
||||||
|
return paddedBinaryString[:-ord(paddedBinaryString[-1])]
|
||||||
|
|
||||||
|
class noPadding(Pad):
|
||||||
|
""" No padding. Use this to get ECB behavior from encrypt/decrypt """
|
||||||
|
|
||||||
|
def addPad(self, extraBytes, blockSize):
|
||||||
|
""" Add no padding """
|
||||||
|
return extraBytes
|
||||||
|
|
||||||
|
def removePad(self, paddedBinaryString, blockSize):
|
||||||
|
""" Remove no padding """
|
||||||
|
return paddedBinaryString
|
||||||
|
|
||||||
|
"""
|
||||||
|
Rijndael encryption algorithm
|
||||||
|
This byte oriented implementation is intended to closely
|
||||||
|
match FIPS specification for readability. It is not implemented
|
||||||
|
for performance.
|
||||||
|
"""
|
||||||
|
|
||||||
|
class Rijndael(BlockCipher):
|
||||||
|
""" Rijndael encryption algorithm """
|
||||||
|
def __init__(self, key = None, padding = padWithPadLen(), keySize=16, blockSize=16 ):
|
||||||
|
self.name = 'RIJNDAEL'
|
||||||
|
self.keySize = keySize
|
||||||
|
self.strength = keySize*8
|
||||||
|
self.blockSize = blockSize # blockSize is in bytes
|
||||||
|
self.padding = padding # change default to noPadding() to get normal ECB behavior
|
||||||
|
|
||||||
|
assert( keySize%4==0 and NrTable[4].has_key(keySize/4)),'key size must be 16,20,24,29 or 32 bytes'
|
||||||
|
assert( blockSize%4==0 and NrTable.has_key(blockSize/4)), 'block size must be 16,20,24,29 or 32 bytes'
|
||||||
|
|
||||||
|
self.Nb = self.blockSize/4 # Nb is number of columns of 32 bit words
|
||||||
|
self.Nk = keySize/4 # Nk is the key length in 32-bit words
|
||||||
|
self.Nr = NrTable[self.Nb][self.Nk] # The number of rounds (Nr) is a function of
|
||||||
|
# the block (Nb) and key (Nk) sizes.
|
||||||
|
if key != None:
|
||||||
|
self.setKey(key)
|
||||||
|
|
||||||
|
def setKey(self, key):
|
||||||
|
""" Set a key and generate the expanded key """
|
||||||
|
assert( len(key) == (self.Nk*4) ), 'Key length must be same as keySize parameter'
|
||||||
|
self.__expandedKey = keyExpansion(self, key)
|
||||||
|
self.reset() # BlockCipher.reset()
|
||||||
|
|
||||||
|
def encryptBlock(self, plainTextBlock):
|
||||||
|
""" Encrypt a block, plainTextBlock must be a array of bytes [Nb by 4] """
|
||||||
|
self.state = self._toBlock(plainTextBlock)
|
||||||
|
AddRoundKey(self, self.__expandedKey[0:self.Nb])
|
||||||
|
for round in range(1,self.Nr): #for round = 1 step 1 to Nr
|
||||||
|
SubBytes(self)
|
||||||
|
ShiftRows(self)
|
||||||
|
MixColumns(self)
|
||||||
|
AddRoundKey(self, self.__expandedKey[round*self.Nb:(round+1)*self.Nb])
|
||||||
|
SubBytes(self)
|
||||||
|
ShiftRows(self)
|
||||||
|
AddRoundKey(self, self.__expandedKey[self.Nr*self.Nb:(self.Nr+1)*self.Nb])
|
||||||
|
return self._toBString(self.state)
|
||||||
|
|
||||||
|
|
||||||
|
def decryptBlock(self, encryptedBlock):
|
||||||
|
""" decrypt a block (array of bytes) """
|
||||||
|
self.state = self._toBlock(encryptedBlock)
|
||||||
|
AddRoundKey(self, self.__expandedKey[self.Nr*self.Nb:(self.Nr+1)*self.Nb])
|
||||||
|
for round in range(self.Nr-1,0,-1):
|
||||||
|
InvShiftRows(self)
|
||||||
|
InvSubBytes(self)
|
||||||
|
AddRoundKey(self, self.__expandedKey[round*self.Nb:(round+1)*self.Nb])
|
||||||
|
InvMixColumns(self)
|
||||||
|
InvShiftRows(self)
|
||||||
|
InvSubBytes(self)
|
||||||
|
AddRoundKey(self, self.__expandedKey[0:self.Nb])
|
||||||
|
return self._toBString(self.state)
|
||||||
|
|
||||||
|
def _toBlock(self, bs):
|
||||||
|
""" Convert binary string to array of bytes, state[col][row]"""
|
||||||
|
assert ( len(bs) == 4*self.Nb ), 'Rijndarl blocks must be of size blockSize'
|
||||||
|
return [[ord(bs[4*i]),ord(bs[4*i+1]),ord(bs[4*i+2]),ord(bs[4*i+3])] for i in range(self.Nb)]
|
||||||
|
|
||||||
|
def _toBString(self, block):
|
||||||
|
""" Convert block (array of bytes) to binary string """
|
||||||
|
l = []
|
||||||
|
for col in block:
|
||||||
|
for rowElement in col:
|
||||||
|
l.append(chr(rowElement))
|
||||||
|
return ''.join(l)
|
||||||
|
#-------------------------------------
|
||||||
|
""" Number of rounds Nr = NrTable[Nb][Nk]
|
||||||
|
|
||||||
|
Nb Nk=4 Nk=5 Nk=6 Nk=7 Nk=8
|
||||||
|
------------------------------------- """
|
||||||
|
NrTable = {4: {4:10, 5:11, 6:12, 7:13, 8:14},
|
||||||
|
5: {4:11, 5:11, 6:12, 7:13, 8:14},
|
||||||
|
6: {4:12, 5:12, 6:12, 7:13, 8:14},
|
||||||
|
7: {4:13, 5:13, 6:13, 7:13, 8:14},
|
||||||
|
8: {4:14, 5:14, 6:14, 7:14, 8:14}}
|
||||||
|
#-------------------------------------
|
||||||
|
def keyExpansion(algInstance, keyString):
|
||||||
|
""" Expand a string of size keySize into a larger array """
|
||||||
|
Nk, Nb, Nr = algInstance.Nk, algInstance.Nb, algInstance.Nr # for readability
|
||||||
|
key = [ord(byte) for byte in keyString] # convert string to list
|
||||||
|
w = [[key[4*i],key[4*i+1],key[4*i+2],key[4*i+3]] for i in range(Nk)]
|
||||||
|
for i in range(Nk,Nb*(Nr+1)):
|
||||||
|
temp = w[i-1] # a four byte column
|
||||||
|
if (i%Nk) == 0 :
|
||||||
|
temp = temp[1:]+[temp[0]] # RotWord(temp)
|
||||||
|
temp = [ Sbox[byte] for byte in temp ]
|
||||||
|
temp[0] ^= Rcon[i/Nk]
|
||||||
|
elif Nk > 6 and i%Nk == 4 :
|
||||||
|
temp = [ Sbox[byte] for byte in temp ] # SubWord(temp)
|
||||||
|
w.append( [ w[i-Nk][byte]^temp[byte] for byte in range(4) ] )
|
||||||
|
return w
|
||||||
|
|
||||||
|
Rcon = (0,0x01,0x02,0x04,0x08,0x10,0x20,0x40,0x80,0x1b,0x36, # note extra '0' !!!
|
||||||
|
0x6c,0xd8,0xab,0x4d,0x9a,0x2f,0x5e,0xbc,0x63,0xc6,
|
||||||
|
0x97,0x35,0x6a,0xd4,0xb3,0x7d,0xfa,0xef,0xc5,0x91)
|
||||||
|
|
||||||
|
#-------------------------------------
|
||||||
|
def AddRoundKey(algInstance, keyBlock):
|
||||||
|
""" XOR the algorithm state with a block of key material """
|
||||||
|
for column in range(algInstance.Nb):
|
||||||
|
for row in range(4):
|
||||||
|
algInstance.state[column][row] ^= keyBlock[column][row]
|
||||||
|
#-------------------------------------
|
||||||
|
|
||||||
|
def SubBytes(algInstance):
|
||||||
|
for column in range(algInstance.Nb):
|
||||||
|
for row in range(4):
|
||||||
|
algInstance.state[column][row] = Sbox[algInstance.state[column][row]]
|
||||||
|
|
||||||
|
def InvSubBytes(algInstance):
|
||||||
|
for column in range(algInstance.Nb):
|
||||||
|
for row in range(4):
|
||||||
|
algInstance.state[column][row] = InvSbox[algInstance.state[column][row]]
|
||||||
|
|
||||||
|
Sbox = (0x63,0x7c,0x77,0x7b,0xf2,0x6b,0x6f,0xc5,
|
||||||
|
0x30,0x01,0x67,0x2b,0xfe,0xd7,0xab,0x76,
|
||||||
|
0xca,0x82,0xc9,0x7d,0xfa,0x59,0x47,0xf0,
|
||||||
|
0xad,0xd4,0xa2,0xaf,0x9c,0xa4,0x72,0xc0,
|
||||||
|
0xb7,0xfd,0x93,0x26,0x36,0x3f,0xf7,0xcc,
|
||||||
|
0x34,0xa5,0xe5,0xf1,0x71,0xd8,0x31,0x15,
|
||||||
|
0x04,0xc7,0x23,0xc3,0x18,0x96,0x05,0x9a,
|
||||||
|
0x07,0x12,0x80,0xe2,0xeb,0x27,0xb2,0x75,
|
||||||
|
0x09,0x83,0x2c,0x1a,0x1b,0x6e,0x5a,0xa0,
|
||||||
|
0x52,0x3b,0xd6,0xb3,0x29,0xe3,0x2f,0x84,
|
||||||
|
0x53,0xd1,0x00,0xed,0x20,0xfc,0xb1,0x5b,
|
||||||
|
0x6a,0xcb,0xbe,0x39,0x4a,0x4c,0x58,0xcf,
|
||||||
|
0xd0,0xef,0xaa,0xfb,0x43,0x4d,0x33,0x85,
|
||||||
|
0x45,0xf9,0x02,0x7f,0x50,0x3c,0x9f,0xa8,
|
||||||
|
0x51,0xa3,0x40,0x8f,0x92,0x9d,0x38,0xf5,
|
||||||
|
0xbc,0xb6,0xda,0x21,0x10,0xff,0xf3,0xd2,
|
||||||
|
0xcd,0x0c,0x13,0xec,0x5f,0x97,0x44,0x17,
|
||||||
|
0xc4,0xa7,0x7e,0x3d,0x64,0x5d,0x19,0x73,
|
||||||
|
0x60,0x81,0x4f,0xdc,0x22,0x2a,0x90,0x88,
|
||||||
|
0x46,0xee,0xb8,0x14,0xde,0x5e,0x0b,0xdb,
|
||||||
|
0xe0,0x32,0x3a,0x0a,0x49,0x06,0x24,0x5c,
|
||||||
|
0xc2,0xd3,0xac,0x62,0x91,0x95,0xe4,0x79,
|
||||||
|
0xe7,0xc8,0x37,0x6d,0x8d,0xd5,0x4e,0xa9,
|
||||||
|
0x6c,0x56,0xf4,0xea,0x65,0x7a,0xae,0x08,
|
||||||
|
0xba,0x78,0x25,0x2e,0x1c,0xa6,0xb4,0xc6,
|
||||||
|
0xe8,0xdd,0x74,0x1f,0x4b,0xbd,0x8b,0x8a,
|
||||||
|
0x70,0x3e,0xb5,0x66,0x48,0x03,0xf6,0x0e,
|
||||||
|
0x61,0x35,0x57,0xb9,0x86,0xc1,0x1d,0x9e,
|
||||||
|
0xe1,0xf8,0x98,0x11,0x69,0xd9,0x8e,0x94,
|
||||||
|
0x9b,0x1e,0x87,0xe9,0xce,0x55,0x28,0xdf,
|
||||||
|
0x8c,0xa1,0x89,0x0d,0xbf,0xe6,0x42,0x68,
|
||||||
|
0x41,0x99,0x2d,0x0f,0xb0,0x54,0xbb,0x16)
|
||||||
|
|
||||||
|
InvSbox = (0x52,0x09,0x6a,0xd5,0x30,0x36,0xa5,0x38,
|
||||||
|
0xbf,0x40,0xa3,0x9e,0x81,0xf3,0xd7,0xfb,
|
||||||
|
0x7c,0xe3,0x39,0x82,0x9b,0x2f,0xff,0x87,
|
||||||
|
0x34,0x8e,0x43,0x44,0xc4,0xde,0xe9,0xcb,
|
||||||
|
0x54,0x7b,0x94,0x32,0xa6,0xc2,0x23,0x3d,
|
||||||
|
0xee,0x4c,0x95,0x0b,0x42,0xfa,0xc3,0x4e,
|
||||||
|
0x08,0x2e,0xa1,0x66,0x28,0xd9,0x24,0xb2,
|
||||||
|
0x76,0x5b,0xa2,0x49,0x6d,0x8b,0xd1,0x25,
|
||||||
|
0x72,0xf8,0xf6,0x64,0x86,0x68,0x98,0x16,
|
||||||
|
0xd4,0xa4,0x5c,0xcc,0x5d,0x65,0xb6,0x92,
|
||||||
|
0x6c,0x70,0x48,0x50,0xfd,0xed,0xb9,0xda,
|
||||||
|
0x5e,0x15,0x46,0x57,0xa7,0x8d,0x9d,0x84,
|
||||||
|
0x90,0xd8,0xab,0x00,0x8c,0xbc,0xd3,0x0a,
|
||||||
|
0xf7,0xe4,0x58,0x05,0xb8,0xb3,0x45,0x06,
|
||||||
|
0xd0,0x2c,0x1e,0x8f,0xca,0x3f,0x0f,0x02,
|
||||||
|
0xc1,0xaf,0xbd,0x03,0x01,0x13,0x8a,0x6b,
|
||||||
|
0x3a,0x91,0x11,0x41,0x4f,0x67,0xdc,0xea,
|
||||||
|
0x97,0xf2,0xcf,0xce,0xf0,0xb4,0xe6,0x73,
|
||||||
|
0x96,0xac,0x74,0x22,0xe7,0xad,0x35,0x85,
|
||||||
|
0xe2,0xf9,0x37,0xe8,0x1c,0x75,0xdf,0x6e,
|
||||||
|
0x47,0xf1,0x1a,0x71,0x1d,0x29,0xc5,0x89,
|
||||||
|
0x6f,0xb7,0x62,0x0e,0xaa,0x18,0xbe,0x1b,
|
||||||
|
0xfc,0x56,0x3e,0x4b,0xc6,0xd2,0x79,0x20,
|
||||||
|
0x9a,0xdb,0xc0,0xfe,0x78,0xcd,0x5a,0xf4,
|
||||||
|
0x1f,0xdd,0xa8,0x33,0x88,0x07,0xc7,0x31,
|
||||||
|
0xb1,0x12,0x10,0x59,0x27,0x80,0xec,0x5f,
|
||||||
|
0x60,0x51,0x7f,0xa9,0x19,0xb5,0x4a,0x0d,
|
||||||
|
0x2d,0xe5,0x7a,0x9f,0x93,0xc9,0x9c,0xef,
|
||||||
|
0xa0,0xe0,0x3b,0x4d,0xae,0x2a,0xf5,0xb0,
|
||||||
|
0xc8,0xeb,0xbb,0x3c,0x83,0x53,0x99,0x61,
|
||||||
|
0x17,0x2b,0x04,0x7e,0xba,0x77,0xd6,0x26,
|
||||||
|
0xe1,0x69,0x14,0x63,0x55,0x21,0x0c,0x7d)
|
||||||
|
|
||||||
|
#-------------------------------------
|
||||||
|
""" For each block size (Nb), the ShiftRow operation shifts row i
|
||||||
|
by the amount Ci. Note that row 0 is not shifted.
|
||||||
|
Nb C1 C2 C3
|
||||||
|
------------------- """
|
||||||
|
shiftOffset = { 4 : ( 0, 1, 2, 3),
|
||||||
|
5 : ( 0, 1, 2, 3),
|
||||||
|
6 : ( 0, 1, 2, 3),
|
||||||
|
7 : ( 0, 1, 2, 4),
|
||||||
|
8 : ( 0, 1, 3, 4) }
|
||||||
|
def ShiftRows(algInstance):
|
||||||
|
tmp = [0]*algInstance.Nb # list of size Nb
|
||||||
|
for r in range(1,4): # row 0 reamains unchanged and can be skipped
|
||||||
|
for c in range(algInstance.Nb):
|
||||||
|
tmp[c] = algInstance.state[(c+shiftOffset[algInstance.Nb][r]) % algInstance.Nb][r]
|
||||||
|
for c in range(algInstance.Nb):
|
||||||
|
algInstance.state[c][r] = tmp[c]
|
||||||
|
def InvShiftRows(algInstance):
|
||||||
|
tmp = [0]*algInstance.Nb # list of size Nb
|
||||||
|
for r in range(1,4): # row 0 reamains unchanged and can be skipped
|
||||||
|
for c in range(algInstance.Nb):
|
||||||
|
tmp[c] = algInstance.state[(c+algInstance.Nb-shiftOffset[algInstance.Nb][r]) % algInstance.Nb][r]
|
||||||
|
for c in range(algInstance.Nb):
|
||||||
|
algInstance.state[c][r] = tmp[c]
|
||||||
|
#-------------------------------------
|
||||||
|
def MixColumns(a):
|
||||||
|
Sprime = [0,0,0,0]
|
||||||
|
for j in range(a.Nb): # for each column
|
||||||
|
Sprime[0] = mul(2,a.state[j][0])^mul(3,a.state[j][1])^mul(1,a.state[j][2])^mul(1,a.state[j][3])
|
||||||
|
Sprime[1] = mul(1,a.state[j][0])^mul(2,a.state[j][1])^mul(3,a.state[j][2])^mul(1,a.state[j][3])
|
||||||
|
Sprime[2] = mul(1,a.state[j][0])^mul(1,a.state[j][1])^mul(2,a.state[j][2])^mul(3,a.state[j][3])
|
||||||
|
Sprime[3] = mul(3,a.state[j][0])^mul(1,a.state[j][1])^mul(1,a.state[j][2])^mul(2,a.state[j][3])
|
||||||
|
for i in range(4):
|
||||||
|
a.state[j][i] = Sprime[i]
|
||||||
|
|
||||||
|
def InvMixColumns(a):
|
||||||
|
""" Mix the four bytes of every column in a linear way
|
||||||
|
This is the opposite operation of Mixcolumn """
|
||||||
|
Sprime = [0,0,0,0]
|
||||||
|
for j in range(a.Nb): # for each column
|
||||||
|
Sprime[0] = mul(0x0E,a.state[j][0])^mul(0x0B,a.state[j][1])^mul(0x0D,a.state[j][2])^mul(0x09,a.state[j][3])
|
||||||
|
Sprime[1] = mul(0x09,a.state[j][0])^mul(0x0E,a.state[j][1])^mul(0x0B,a.state[j][2])^mul(0x0D,a.state[j][3])
|
||||||
|
Sprime[2] = mul(0x0D,a.state[j][0])^mul(0x09,a.state[j][1])^mul(0x0E,a.state[j][2])^mul(0x0B,a.state[j][3])
|
||||||
|
Sprime[3] = mul(0x0B,a.state[j][0])^mul(0x0D,a.state[j][1])^mul(0x09,a.state[j][2])^mul(0x0E,a.state[j][3])
|
||||||
|
for i in range(4):
|
||||||
|
a.state[j][i] = Sprime[i]
|
||||||
|
|
||||||
|
#-------------------------------------
|
||||||
|
def mul(a, b):
|
||||||
|
""" Multiply two elements of GF(2^m)
|
||||||
|
needed for MixColumn and InvMixColumn """
|
||||||
|
if (a !=0 and b!=0):
|
||||||
|
return Alogtable[(Logtable[a] + Logtable[b])%255]
|
||||||
|
else:
|
||||||
|
return 0
|
||||||
|
|
||||||
|
Logtable = ( 0, 0, 25, 1, 50, 2, 26, 198, 75, 199, 27, 104, 51, 238, 223, 3,
|
||||||
|
100, 4, 224, 14, 52, 141, 129, 239, 76, 113, 8, 200, 248, 105, 28, 193,
|
||||||
|
125, 194, 29, 181, 249, 185, 39, 106, 77, 228, 166, 114, 154, 201, 9, 120,
|
||||||
|
101, 47, 138, 5, 33, 15, 225, 36, 18, 240, 130, 69, 53, 147, 218, 142,
|
||||||
|
150, 143, 219, 189, 54, 208, 206, 148, 19, 92, 210, 241, 64, 70, 131, 56,
|
||||||
|
102, 221, 253, 48, 191, 6, 139, 98, 179, 37, 226, 152, 34, 136, 145, 16,
|
||||||
|
126, 110, 72, 195, 163, 182, 30, 66, 58, 107, 40, 84, 250, 133, 61, 186,
|
||||||
|
43, 121, 10, 21, 155, 159, 94, 202, 78, 212, 172, 229, 243, 115, 167, 87,
|
||||||
|
175, 88, 168, 80, 244, 234, 214, 116, 79, 174, 233, 213, 231, 230, 173, 232,
|
||||||
|
44, 215, 117, 122, 235, 22, 11, 245, 89, 203, 95, 176, 156, 169, 81, 160,
|
||||||
|
127, 12, 246, 111, 23, 196, 73, 236, 216, 67, 31, 45, 164, 118, 123, 183,
|
||||||
|
204, 187, 62, 90, 251, 96, 177, 134, 59, 82, 161, 108, 170, 85, 41, 157,
|
||||||
|
151, 178, 135, 144, 97, 190, 220, 252, 188, 149, 207, 205, 55, 63, 91, 209,
|
||||||
|
83, 57, 132, 60, 65, 162, 109, 71, 20, 42, 158, 93, 86, 242, 211, 171,
|
||||||
|
68, 17, 146, 217, 35, 32, 46, 137, 180, 124, 184, 38, 119, 153, 227, 165,
|
||||||
|
103, 74, 237, 222, 197, 49, 254, 24, 13, 99, 140, 128, 192, 247, 112, 7)
|
||||||
|
|
||||||
|
Alogtable= ( 1, 3, 5, 15, 17, 51, 85, 255, 26, 46, 114, 150, 161, 248, 19, 53,
|
||||||
|
95, 225, 56, 72, 216, 115, 149, 164, 247, 2, 6, 10, 30, 34, 102, 170,
|
||||||
|
229, 52, 92, 228, 55, 89, 235, 38, 106, 190, 217, 112, 144, 171, 230, 49,
|
||||||
|
83, 245, 4, 12, 20, 60, 68, 204, 79, 209, 104, 184, 211, 110, 178, 205,
|
||||||
|
76, 212, 103, 169, 224, 59, 77, 215, 98, 166, 241, 8, 24, 40, 120, 136,
|
||||||
|
131, 158, 185, 208, 107, 189, 220, 127, 129, 152, 179, 206, 73, 219, 118, 154,
|
||||||
|
181, 196, 87, 249, 16, 48, 80, 240, 11, 29, 39, 105, 187, 214, 97, 163,
|
||||||
|
254, 25, 43, 125, 135, 146, 173, 236, 47, 113, 147, 174, 233, 32, 96, 160,
|
||||||
|
251, 22, 58, 78, 210, 109, 183, 194, 93, 231, 50, 86, 250, 21, 63, 65,
|
||||||
|
195, 94, 226, 61, 71, 201, 64, 192, 91, 237, 44, 116, 156, 191, 218, 117,
|
||||||
|
159, 186, 213, 100, 172, 239, 42, 126, 130, 157, 188, 223, 122, 142, 137, 128,
|
||||||
|
155, 182, 193, 88, 232, 35, 101, 175, 234, 37, 111, 177, 200, 67, 197, 84,
|
||||||
|
252, 31, 33, 99, 165, 244, 7, 9, 27, 45, 119, 153, 176, 203, 70, 202,
|
||||||
|
69, 207, 74, 222, 121, 139, 134, 145, 168, 227, 62, 66, 198, 81, 243, 14,
|
||||||
|
18, 54, 90, 238, 41, 123, 141, 140, 143, 138, 133, 148, 167, 242, 13, 23,
|
||||||
|
57, 75, 221, 124, 132, 151, 162, 253, 28, 36, 108, 180, 199, 82, 246, 1)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
AES Encryption Algorithm
|
||||||
|
The AES algorithm is just Rijndael algorithm restricted to the default
|
||||||
|
blockSize of 128 bits.
|
||||||
|
"""
|
||||||
|
|
||||||
|
class AES(Rijndael):
|
||||||
|
""" The AES algorithm is the Rijndael block cipher restricted to block
|
||||||
|
sizes of 128 bits and key sizes of 128, 192 or 256 bits
|
||||||
|
"""
|
||||||
|
def __init__(self, key = None, padding = padWithPadLen(), keySize=16):
|
||||||
|
""" Initialize AES, keySize is in bytes """
|
||||||
|
if not (keySize == 16 or keySize == 24 or keySize == 32) :
|
||||||
|
raise BadKeySizeError, 'Illegal AES key size, must be 16, 24, or 32 bytes'
|
||||||
|
|
||||||
|
Rijndael.__init__( self, key, padding=padding, keySize=keySize, blockSize=16 )
|
||||||
|
|
||||||
|
self.name = 'AES'
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
CBC mode of encryption for block ciphers.
|
||||||
|
This algorithm mode wraps any BlockCipher to make a
|
||||||
|
Cipher Block Chaining mode.
|
||||||
|
"""
|
||||||
|
from random import Random # should change to crypto.random!!!
|
||||||
|
|
||||||
|
|
||||||
|
class CBC(BlockCipher):
|
||||||
|
""" The CBC class wraps block ciphers to make cipher block chaining (CBC) mode
|
||||||
|
algorithms. The initialization (IV) is automatic if set to None. Padding
|
||||||
|
is also automatic based on the Pad class used to initialize the algorithm
|
||||||
|
"""
|
||||||
|
def __init__(self, blockCipherInstance, padding = padWithPadLen()):
|
||||||
|
""" CBC algorithms are created by initializing with a BlockCipher instance """
|
||||||
|
self.baseCipher = blockCipherInstance
|
||||||
|
self.name = self.baseCipher.name + '_CBC'
|
||||||
|
self.blockSize = self.baseCipher.blockSize
|
||||||
|
self.keySize = self.baseCipher.keySize
|
||||||
|
self.padding = padding
|
||||||
|
self.baseCipher.padding = noPadding() # baseCipher should NOT pad!!
|
||||||
|
self.r = Random() # for IV generation, currently uses
|
||||||
|
# mediocre standard distro version <----------------
|
||||||
|
import time
|
||||||
|
newSeed = time.ctime()+str(self.r) # seed with instance location
|
||||||
|
self.r.seed(newSeed) # to make unique
|
||||||
|
self.reset()
|
||||||
|
|
||||||
|
def setKey(self, key):
|
||||||
|
self.baseCipher.setKey(key)
|
||||||
|
|
||||||
|
# Overload to reset both CBC state and the wrapped baseCipher
|
||||||
|
def resetEncrypt(self):
|
||||||
|
BlockCipher.resetEncrypt(self) # reset CBC encrypt state (super class)
|
||||||
|
self.baseCipher.resetEncrypt() # reset base cipher encrypt state
|
||||||
|
|
||||||
|
def resetDecrypt(self):
|
||||||
|
BlockCipher.resetDecrypt(self) # reset CBC state (super class)
|
||||||
|
self.baseCipher.resetDecrypt() # reset base cipher decrypt state
|
||||||
|
|
||||||
|
def encrypt(self, plainText, iv=None, more=None):
|
||||||
|
""" CBC encryption - overloads baseCipher to allow optional explicit IV
|
||||||
|
when iv=None, iv is auto generated!
|
||||||
|
"""
|
||||||
|
if self.encryptBlockCount == 0:
|
||||||
|
self.iv = iv
|
||||||
|
else:
|
||||||
|
assert(iv==None), 'IV used only on first call to encrypt'
|
||||||
|
|
||||||
|
return BlockCipher.encrypt(self,plainText, more=more)
|
||||||
|
|
||||||
|
def decrypt(self, cipherText, iv=None, more=None):
|
||||||
|
""" CBC decryption - overloads baseCipher to allow optional explicit IV
|
||||||
|
when iv=None, iv is auto generated!
|
||||||
|
"""
|
||||||
|
if self.decryptBlockCount == 0:
|
||||||
|
self.iv = iv
|
||||||
|
else:
|
||||||
|
assert(iv==None), 'IV used only on first call to decrypt'
|
||||||
|
|
||||||
|
return BlockCipher.decrypt(self, cipherText, more=more)
|
||||||
|
|
||||||
|
def encryptBlock(self, plainTextBlock):
|
||||||
|
""" CBC block encryption, IV is set with 'encrypt' """
|
||||||
|
auto_IV = ''
|
||||||
|
if self.encryptBlockCount == 0:
|
||||||
|
if self.iv == None:
|
||||||
|
# generate IV and use
|
||||||
|
self.iv = ''.join([chr(self.r.randrange(256)) for i in range(self.blockSize)])
|
||||||
|
self.prior_encr_CT_block = self.iv
|
||||||
|
auto_IV = self.prior_encr_CT_block # prepend IV if it's automatic
|
||||||
|
else: # application provided IV
|
||||||
|
assert(len(self.iv) == self.blockSize ),'IV must be same length as block'
|
||||||
|
self.prior_encr_CT_block = self.iv
|
||||||
|
""" encrypt the prior CT XORed with the PT """
|
||||||
|
ct = self.baseCipher.encryptBlock( xor(self.prior_encr_CT_block, plainTextBlock) )
|
||||||
|
self.prior_encr_CT_block = ct
|
||||||
|
return auto_IV+ct
|
||||||
|
|
||||||
|
def decryptBlock(self, encryptedBlock):
|
||||||
|
""" Decrypt a single block """
|
||||||
|
|
||||||
|
if self.decryptBlockCount == 0: # first call, process IV
|
||||||
|
if self.iv == None: # auto decrypt IV?
|
||||||
|
self.prior_CT_block = encryptedBlock
|
||||||
|
return ''
|
||||||
|
else:
|
||||||
|
assert(len(self.iv)==self.blockSize),"Bad IV size on CBC decryption"
|
||||||
|
self.prior_CT_block = self.iv
|
||||||
|
|
||||||
|
dct = self.baseCipher.decryptBlock(encryptedBlock)
|
||||||
|
""" XOR the prior decrypted CT with the prior CT """
|
||||||
|
dct_XOR_priorCT = xor( self.prior_CT_block, dct )
|
||||||
|
|
||||||
|
self.prior_CT_block = encryptedBlock
|
||||||
|
|
||||||
|
return dct_XOR_priorCT
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
AES_CBC Encryption Algorithm
|
||||||
|
"""
|
||||||
|
|
||||||
|
class AES_CBC(CBC):
|
||||||
|
""" AES encryption in CBC feedback mode """
|
||||||
|
def __init__(self, key=None, padding=padWithPadLen(), keySize=16):
|
||||||
|
CBC.__init__( self, AES(key, noPadding(), keySize), padding)
|
||||||
|
self.name = 'AES_CBC'
|
||||||
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,290 @@
|
|||||||
|
#! /usr/bin/env python
|
||||||
|
|
||||||
|
import sys, os
|
||||||
|
import hmac
|
||||||
|
from struct import pack
|
||||||
|
import hashlib
|
||||||
|
|
||||||
|
|
||||||
|
# interface to needed routines libalfcrypto
|
||||||
|
def _load_libalfcrypto():
|
||||||
|
import ctypes
|
||||||
|
from ctypes import CDLL, byref, POINTER, c_void_p, c_char_p, c_int, c_long, \
|
||||||
|
Structure, c_ulong, create_string_buffer, addressof, string_at, cast, sizeof
|
||||||
|
|
||||||
|
pointer_size = ctypes.sizeof(ctypes.c_voidp)
|
||||||
|
name_of_lib = None
|
||||||
|
if sys.platform.startswith('darwin'):
|
||||||
|
name_of_lib = 'libalfcrypto.dylib'
|
||||||
|
elif sys.platform.startswith('win'):
|
||||||
|
if pointer_size == 4:
|
||||||
|
name_of_lib = 'alfcrypto.dll'
|
||||||
|
else:
|
||||||
|
name_of_lib = 'alfcrypto64.dll'
|
||||||
|
else:
|
||||||
|
if pointer_size == 4:
|
||||||
|
name_of_lib = 'libalfcrypto32.so'
|
||||||
|
else:
|
||||||
|
name_of_lib = 'libalfcrypto64.so'
|
||||||
|
|
||||||
|
libalfcrypto = sys.path[0] + os.sep + name_of_lib
|
||||||
|
|
||||||
|
if not os.path.isfile(libalfcrypto):
|
||||||
|
raise Exception('libalfcrypto not found')
|
||||||
|
|
||||||
|
libalfcrypto = CDLL(libalfcrypto)
|
||||||
|
|
||||||
|
c_char_pp = POINTER(c_char_p)
|
||||||
|
c_int_p = POINTER(c_int)
|
||||||
|
|
||||||
|
|
||||||
|
def F(restype, name, argtypes):
|
||||||
|
func = getattr(libalfcrypto, name)
|
||||||
|
func.restype = restype
|
||||||
|
func.argtypes = argtypes
|
||||||
|
return func
|
||||||
|
|
||||||
|
# aes cbc decryption
|
||||||
|
#
|
||||||
|
# struct aes_key_st {
|
||||||
|
# unsigned long rd_key[4 *(AES_MAXNR + 1)];
|
||||||
|
# int rounds;
|
||||||
|
# };
|
||||||
|
#
|
||||||
|
# typedef struct aes_key_st AES_KEY;
|
||||||
|
#
|
||||||
|
# int AES_set_decrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key);
|
||||||
|
#
|
||||||
|
#
|
||||||
|
# void AES_cbc_encrypt(const unsigned char *in, unsigned char *out,
|
||||||
|
# const unsigned long length, const AES_KEY *key,
|
||||||
|
# unsigned char *ivec, const int enc);
|
||||||
|
|
||||||
|
AES_MAXNR = 14
|
||||||
|
|
||||||
|
class AES_KEY(Structure):
|
||||||
|
_fields_ = [('rd_key', c_long * (4 * (AES_MAXNR + 1))), ('rounds', c_int)]
|
||||||
|
|
||||||
|
AES_KEY_p = POINTER(AES_KEY)
|
||||||
|
AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',[c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p, c_int])
|
||||||
|
AES_set_decrypt_key = F(c_int, 'AES_set_decrypt_key',[c_char_p, c_int, AES_KEY_p])
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Pukall 1 Cipher
|
||||||
|
# unsigned char *PC1(const unsigned char *key, unsigned int klen, const unsigned char *src,
|
||||||
|
# unsigned char *dest, unsigned int len, int decryption);
|
||||||
|
|
||||||
|
PC1 = F(c_char_p, 'PC1', [c_char_p, c_ulong, c_char_p, c_char_p, c_ulong, c_ulong])
|
||||||
|
|
||||||
|
# Topaz Encryption
|
||||||
|
# typedef struct _TpzCtx {
|
||||||
|
# unsigned int v[2];
|
||||||
|
# } TpzCtx;
|
||||||
|
#
|
||||||
|
# void topazCryptoInit(TpzCtx *ctx, const unsigned char *key, int klen);
|
||||||
|
# void topazCryptoDecrypt(const TpzCtx *ctx, const unsigned char *in, unsigned char *out, int len);
|
||||||
|
|
||||||
|
class TPZ_CTX(Structure):
|
||||||
|
_fields_ = [('v', c_long * 2)]
|
||||||
|
|
||||||
|
TPZ_CTX_p = POINTER(TPZ_CTX)
|
||||||
|
topazCryptoInit = F(None, 'topazCryptoInit', [TPZ_CTX_p, c_char_p, c_ulong])
|
||||||
|
topazCryptoDecrypt = F(None, 'topazCryptoDecrypt', [TPZ_CTX_p, c_char_p, c_char_p, c_ulong])
|
||||||
|
|
||||||
|
|
||||||
|
class AES_CBC(object):
|
||||||
|
def __init__(self):
|
||||||
|
self._blocksize = 0
|
||||||
|
self._keyctx = None
|
||||||
|
self._iv = 0
|
||||||
|
|
||||||
|
def set_decrypt_key(self, userkey, iv):
|
||||||
|
self._blocksize = len(userkey)
|
||||||
|
if (self._blocksize != 16) and (self._blocksize != 24) and (self._blocksize != 32) :
|
||||||
|
raise Exception('AES CBC improper key used')
|
||||||
|
return
|
||||||
|
keyctx = self._keyctx = AES_KEY()
|
||||||
|
self._iv = iv
|
||||||
|
rv = AES_set_decrypt_key(userkey, len(userkey) * 8, keyctx)
|
||||||
|
if rv < 0:
|
||||||
|
raise Exception('Failed to initialize AES CBC key')
|
||||||
|
|
||||||
|
def decrypt(self, data):
|
||||||
|
out = create_string_buffer(len(data))
|
||||||
|
mutable_iv = create_string_buffer(self._iv, len(self._iv))
|
||||||
|
rv = AES_cbc_encrypt(data, out, len(data), self._keyctx, mutable_iv, 0)
|
||||||
|
if rv == 0:
|
||||||
|
raise Exception('AES CBC decryption failed')
|
||||||
|
return out.raw
|
||||||
|
|
||||||
|
class Pukall_Cipher(object):
|
||||||
|
def __init__(self):
|
||||||
|
self.key = None
|
||||||
|
|
||||||
|
def PC1(self, key, src, decryption=True):
|
||||||
|
self.key = key
|
||||||
|
out = create_string_buffer(len(src))
|
||||||
|
de = 0
|
||||||
|
if decryption:
|
||||||
|
de = 1
|
||||||
|
rv = PC1(key, len(key), src, out, len(src), de)
|
||||||
|
return out.raw
|
||||||
|
|
||||||
|
class Topaz_Cipher(object):
|
||||||
|
def __init__(self):
|
||||||
|
self._ctx = None
|
||||||
|
|
||||||
|
def ctx_init(self, key):
|
||||||
|
tpz_ctx = self._ctx = TPZ_CTX()
|
||||||
|
topazCryptoInit(tpz_ctx, key, len(key))
|
||||||
|
return tpz_ctx
|
||||||
|
|
||||||
|
def decrypt(self, data, ctx=None):
|
||||||
|
if ctx == None:
|
||||||
|
ctx = self._ctx
|
||||||
|
out = create_string_buffer(len(data))
|
||||||
|
topazCryptoDecrypt(ctx, data, out, len(data))
|
||||||
|
return out.raw
|
||||||
|
|
||||||
|
print "Using Library AlfCrypto DLL/DYLIB/SO"
|
||||||
|
return (AES_CBC, Pukall_Cipher, Topaz_Cipher)
|
||||||
|
|
||||||
|
|
||||||
|
def _load_python_alfcrypto():
|
||||||
|
|
||||||
|
import aescbc
|
||||||
|
|
||||||
|
class Pukall_Cipher(object):
|
||||||
|
def __init__(self):
|
||||||
|
self.key = None
|
||||||
|
|
||||||
|
def PC1(self, key, src, decryption=True):
|
||||||
|
sum1 = 0;
|
||||||
|
sum2 = 0;
|
||||||
|
keyXorVal = 0;
|
||||||
|
if len(key)!=16:
|
||||||
|
print "Bad key length!"
|
||||||
|
return None
|
||||||
|
wkey = []
|
||||||
|
for i in xrange(8):
|
||||||
|
wkey.append(ord(key[i*2])<<8 | ord(key[i*2+1]))
|
||||||
|
dst = ""
|
||||||
|
for i in xrange(len(src)):
|
||||||
|
temp1 = 0;
|
||||||
|
byteXorVal = 0;
|
||||||
|
for j in xrange(8):
|
||||||
|
temp1 ^= wkey[j]
|
||||||
|
sum2 = (sum2+j)*20021 + sum1
|
||||||
|
sum1 = (temp1*346)&0xFFFF
|
||||||
|
sum2 = (sum2+sum1)&0xFFFF
|
||||||
|
temp1 = (temp1*20021+1)&0xFFFF
|
||||||
|
byteXorVal ^= temp1 ^ sum2
|
||||||
|
curByte = ord(src[i])
|
||||||
|
if not decryption:
|
||||||
|
keyXorVal = curByte * 257;
|
||||||
|
curByte = ((curByte ^ (byteXorVal >> 8)) ^ byteXorVal) & 0xFF
|
||||||
|
if decryption:
|
||||||
|
keyXorVal = curByte * 257;
|
||||||
|
for j in xrange(8):
|
||||||
|
wkey[j] ^= keyXorVal;
|
||||||
|
dst+=chr(curByte)
|
||||||
|
return dst
|
||||||
|
|
||||||
|
class Topaz_Cipher(object):
|
||||||
|
def __init__(self):
|
||||||
|
self._ctx = None
|
||||||
|
|
||||||
|
def ctx_init(self, key):
|
||||||
|
ctx1 = 0x0CAFFE19E
|
||||||
|
for keyChar in key:
|
||||||
|
keyByte = ord(keyChar)
|
||||||
|
ctx2 = ctx1
|
||||||
|
ctx1 = ((((ctx1 >>2) * (ctx1 >>7))&0xFFFFFFFF) ^ (keyByte * keyByte * 0x0F902007)& 0xFFFFFFFF )
|
||||||
|
self._ctx = [ctx1, ctx2]
|
||||||
|
return [ctx1,ctx2]
|
||||||
|
|
||||||
|
def decrypt(self, data, ctx=None):
|
||||||
|
if ctx == None:
|
||||||
|
ctx = self._ctx
|
||||||
|
ctx1 = ctx[0]
|
||||||
|
ctx2 = ctx[1]
|
||||||
|
plainText = ""
|
||||||
|
for dataChar in data:
|
||||||
|
dataByte = ord(dataChar)
|
||||||
|
m = (dataByte ^ ((ctx1 >> 3) &0xFF) ^ ((ctx2<<3) & 0xFF)) &0xFF
|
||||||
|
ctx2 = ctx1
|
||||||
|
ctx1 = (((ctx1 >> 2) * (ctx1 >> 7)) &0xFFFFFFFF) ^((m * m * 0x0F902007) &0xFFFFFFFF)
|
||||||
|
plainText += chr(m)
|
||||||
|
return plainText
|
||||||
|
|
||||||
|
class AES_CBC(object):
|
||||||
|
def __init__(self):
|
||||||
|
self._key = None
|
||||||
|
self._iv = None
|
||||||
|
self.aes = None
|
||||||
|
|
||||||
|
def set_decrypt_key(self, userkey, iv):
|
||||||
|
self._key = userkey
|
||||||
|
self._iv = iv
|
||||||
|
self.aes = aescbc.AES_CBC(userkey, aescbc.noPadding(), len(userkey))
|
||||||
|
|
||||||
|
def decrypt(self, data):
|
||||||
|
iv = self._iv
|
||||||
|
cleartext = self.aes.decrypt(iv + data)
|
||||||
|
return cleartext
|
||||||
|
|
||||||
|
return (AES_CBC, Pukall_Cipher, Topaz_Cipher)
|
||||||
|
|
||||||
|
|
||||||
|
def _load_crypto():
|
||||||
|
AES_CBC = Pukall_Cipher = Topaz_Cipher = None
|
||||||
|
cryptolist = (_load_libalfcrypto, _load_python_alfcrypto)
|
||||||
|
for loader in cryptolist:
|
||||||
|
try:
|
||||||
|
AES_CBC, Pukall_Cipher, Topaz_Cipher = loader()
|
||||||
|
break
|
||||||
|
except (ImportError, Exception):
|
||||||
|
pass
|
||||||
|
return AES_CBC, Pukall_Cipher, Topaz_Cipher
|
||||||
|
|
||||||
|
AES_CBC, Pukall_Cipher, Topaz_Cipher = _load_crypto()
|
||||||
|
|
||||||
|
|
||||||
|
class KeyIVGen(object):
|
||||||
|
# this only exists in openssl so we will use pure python implementation instead
|
||||||
|
# PKCS5_PBKDF2_HMAC_SHA1 = F(c_int, 'PKCS5_PBKDF2_HMAC_SHA1',
|
||||||
|
# [c_char_p, c_ulong, c_char_p, c_ulong, c_ulong, c_ulong, c_char_p])
|
||||||
|
def pbkdf2(self, passwd, salt, iter, keylen):
|
||||||
|
|
||||||
|
def xorstr( a, b ):
|
||||||
|
if len(a) != len(b):
|
||||||
|
raise Exception("xorstr(): lengths differ")
|
||||||
|
return ''.join((chr(ord(x)^ord(y)) for x, y in zip(a, b)))
|
||||||
|
|
||||||
|
def prf( h, data ):
|
||||||
|
hm = h.copy()
|
||||||
|
hm.update( data )
|
||||||
|
return hm.digest()
|
||||||
|
|
||||||
|
def pbkdf2_F( h, salt, itercount, blocknum ):
|
||||||
|
U = prf( h, salt + pack('>i',blocknum ) )
|
||||||
|
T = U
|
||||||
|
for i in range(2, itercount+1):
|
||||||
|
U = prf( h, U )
|
||||||
|
T = xorstr( T, U )
|
||||||
|
return T
|
||||||
|
|
||||||
|
sha = hashlib.sha1
|
||||||
|
digest_size = sha().digest_size
|
||||||
|
# l - number of output blocks to produce
|
||||||
|
l = keylen / digest_size
|
||||||
|
if keylen % digest_size != 0:
|
||||||
|
l += 1
|
||||||
|
h = hmac.new( passwd, None, sha )
|
||||||
|
T = ""
|
||||||
|
for i in range(1, l+1):
|
||||||
|
T += pbkdf2_F( h, salt, iter, i )
|
||||||
|
return T[0: keylen]
|
||||||
|
|
||||||
|
|
||||||
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,899 @@
|
|||||||
|
#! /usr/bin/python
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
Comprehensive Mazama Book DRM with Topaz Cryptography V2.2
|
||||||
|
|
||||||
|
-----BEGIN PUBLIC KEY-----
|
||||||
|
MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDdBHJ4CNc6DNFCw4MRCw4SWAK6
|
||||||
|
M8hYfnNEI0yQmn5Ti+W8biT7EatpauE/5jgQMPBmdNrDr1hbHyHBSP7xeC2qlRWC
|
||||||
|
B62UCxeu/fpfnvNHDN/wPWWH4jynZ2M6cdcnE5LQ+FfeKqZn7gnG2No1U9h7oOHx
|
||||||
|
y2/pHuYme7U1TsgSjwIDAQAB
|
||||||
|
-----END PUBLIC KEY-----
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import with_statement
|
||||||
|
|
||||||
|
import csv
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
import getopt
|
||||||
|
import zlib
|
||||||
|
from struct import pack
|
||||||
|
from struct import unpack
|
||||||
|
from ctypes import windll, c_char_p, c_wchar_p, c_uint, POINTER, byref, \
|
||||||
|
create_unicode_buffer, create_string_buffer, CFUNCTYPE, addressof, \
|
||||||
|
string_at, Structure, c_void_p, cast
|
||||||
|
import _winreg as winreg
|
||||||
|
import Tkinter
|
||||||
|
import Tkconstants
|
||||||
|
import tkMessageBox
|
||||||
|
import traceback
|
||||||
|
import hashlib
|
||||||
|
|
||||||
|
MAX_PATH = 255
|
||||||
|
|
||||||
|
kernel32 = windll.kernel32
|
||||||
|
advapi32 = windll.advapi32
|
||||||
|
crypt32 = windll.crypt32
|
||||||
|
|
||||||
|
global kindleDatabase
|
||||||
|
global bookFile
|
||||||
|
global bookPayloadOffset
|
||||||
|
global bookHeaderRecords
|
||||||
|
global bookMetadata
|
||||||
|
global bookKey
|
||||||
|
global command
|
||||||
|
|
||||||
|
#
|
||||||
|
# Various character maps used to decrypt books. Probably supposed to act as obfuscation
|
||||||
|
#
|
||||||
|
|
||||||
|
charMap1 = "n5Pr6St7Uv8Wx9YzAb0Cd1Ef2Gh3Jk4M"
|
||||||
|
charMap2 = "AaZzB0bYyCc1XxDdW2wEeVv3FfUuG4g-TtHh5SsIiR6rJjQq7KkPpL8lOoMm9Nn_"
|
||||||
|
charMap3 = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
|
||||||
|
charMap4 = "ABCDEFGHIJKLMNPQRSTUVWXYZ123456789"
|
||||||
|
|
||||||
|
#
|
||||||
|
# Exceptions for all the problems that might happen during the script
|
||||||
|
#
|
||||||
|
|
||||||
|
class CMBDTCError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
class CMBDTCFatal(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
#
|
||||||
|
# Stolen stuff
|
||||||
|
#
|
||||||
|
|
||||||
|
class DataBlob(Structure):
|
||||||
|
_fields_ = [('cbData', c_uint),
|
||||||
|
('pbData', c_void_p)]
|
||||||
|
DataBlob_p = POINTER(DataBlob)
|
||||||
|
|
||||||
|
def GetSystemDirectory():
|
||||||
|
GetSystemDirectoryW = kernel32.GetSystemDirectoryW
|
||||||
|
GetSystemDirectoryW.argtypes = [c_wchar_p, c_uint]
|
||||||
|
GetSystemDirectoryW.restype = c_uint
|
||||||
|
def GetSystemDirectory():
|
||||||
|
buffer = create_unicode_buffer(MAX_PATH + 1)
|
||||||
|
GetSystemDirectoryW(buffer, len(buffer))
|
||||||
|
return buffer.value
|
||||||
|
return GetSystemDirectory
|
||||||
|
GetSystemDirectory = GetSystemDirectory()
|
||||||
|
|
||||||
|
|
||||||
|
def GetVolumeSerialNumber():
|
||||||
|
GetVolumeInformationW = kernel32.GetVolumeInformationW
|
||||||
|
GetVolumeInformationW.argtypes = [c_wchar_p, c_wchar_p, c_uint,
|
||||||
|
POINTER(c_uint), POINTER(c_uint),
|
||||||
|
POINTER(c_uint), c_wchar_p, c_uint]
|
||||||
|
GetVolumeInformationW.restype = c_uint
|
||||||
|
def GetVolumeSerialNumber(path):
|
||||||
|
vsn = c_uint(0)
|
||||||
|
GetVolumeInformationW(path, None, 0, byref(vsn), None, None, None, 0)
|
||||||
|
return vsn.value
|
||||||
|
return GetVolumeSerialNumber
|
||||||
|
GetVolumeSerialNumber = GetVolumeSerialNumber()
|
||||||
|
|
||||||
|
|
||||||
|
def GetUserName():
|
||||||
|
GetUserNameW = advapi32.GetUserNameW
|
||||||
|
GetUserNameW.argtypes = [c_wchar_p, POINTER(c_uint)]
|
||||||
|
GetUserNameW.restype = c_uint
|
||||||
|
def GetUserName():
|
||||||
|
buffer = create_unicode_buffer(32)
|
||||||
|
size = c_uint(len(buffer))
|
||||||
|
while not GetUserNameW(buffer, byref(size)):
|
||||||
|
buffer = create_unicode_buffer(len(buffer) * 2)
|
||||||
|
size.value = len(buffer)
|
||||||
|
return buffer.value.encode('utf-16-le')[::2]
|
||||||
|
return GetUserName
|
||||||
|
GetUserName = GetUserName()
|
||||||
|
|
||||||
|
|
||||||
|
def CryptUnprotectData():
|
||||||
|
_CryptUnprotectData = crypt32.CryptUnprotectData
|
||||||
|
_CryptUnprotectData.argtypes = [DataBlob_p, c_wchar_p, DataBlob_p,
|
||||||
|
c_void_p, c_void_p, c_uint, DataBlob_p]
|
||||||
|
_CryptUnprotectData.restype = c_uint
|
||||||
|
def CryptUnprotectData(indata, entropy):
|
||||||
|
indatab = create_string_buffer(indata)
|
||||||
|
indata = DataBlob(len(indata), cast(indatab, c_void_p))
|
||||||
|
entropyb = create_string_buffer(entropy)
|
||||||
|
entropy = DataBlob(len(entropy), cast(entropyb, c_void_p))
|
||||||
|
outdata = DataBlob()
|
||||||
|
if not _CryptUnprotectData(byref(indata), None, byref(entropy),
|
||||||
|
None, None, 0, byref(outdata)):
|
||||||
|
raise CMBDTCFatal("Failed to Unprotect Data")
|
||||||
|
return string_at(outdata.pbData, outdata.cbData)
|
||||||
|
return CryptUnprotectData
|
||||||
|
CryptUnprotectData = CryptUnprotectData()
|
||||||
|
|
||||||
|
#
|
||||||
|
# Returns the MD5 digest of "message"
|
||||||
|
#
|
||||||
|
|
||||||
|
def MD5(message):
|
||||||
|
ctx = hashlib.md5()
|
||||||
|
ctx.update(message)
|
||||||
|
return ctx.digest()
|
||||||
|
|
||||||
|
#
|
||||||
|
# Returns the MD5 digest of "message"
|
||||||
|
#
|
||||||
|
|
||||||
|
def SHA1(message):
|
||||||
|
ctx = hashlib.sha1()
|
||||||
|
ctx.update(message)
|
||||||
|
return ctx.digest()
|
||||||
|
|
||||||
|
#
|
||||||
|
# Open the book file at path
|
||||||
|
#
|
||||||
|
|
||||||
|
def openBook(path):
|
||||||
|
try:
|
||||||
|
return open(path,'rb')
|
||||||
|
except:
|
||||||
|
raise CMBDTCFatal("Could not open book file: " + path)
|
||||||
|
#
|
||||||
|
# Encode the bytes in data with the characters in map
|
||||||
|
#
|
||||||
|
|
||||||
|
def encode(data, map):
|
||||||
|
result = ""
|
||||||
|
for char in data:
|
||||||
|
value = ord(char)
|
||||||
|
Q = (value ^ 0x80) // len(map)
|
||||||
|
R = value % len(map)
|
||||||
|
result += map[Q]
|
||||||
|
result += map[R]
|
||||||
|
return result
|
||||||
|
|
||||||
|
#
|
||||||
|
# Hash the bytes in data and then encode the digest with the characters in map
|
||||||
|
#
|
||||||
|
|
||||||
|
def encodeHash(data,map):
|
||||||
|
return encode(MD5(data),map)
|
||||||
|
|
||||||
|
#
|
||||||
|
# Decode the string in data with the characters in map. Returns the decoded bytes
|
||||||
|
#
|
||||||
|
|
||||||
|
def decode(data,map):
|
||||||
|
result = ""
|
||||||
|
for i in range (0,len(data),2):
|
||||||
|
high = map.find(data[i])
|
||||||
|
low = map.find(data[i+1])
|
||||||
|
value = (((high * 0x40) ^ 0x80) & 0xFF) + low
|
||||||
|
result += pack("B",value)
|
||||||
|
return result
|
||||||
|
|
||||||
|
#
|
||||||
|
# Locate and open the Kindle.info file (Hopefully in the way it is done in the Kindle application)
|
||||||
|
#
|
||||||
|
|
||||||
|
def openKindleInfo():
|
||||||
|
regkey = winreg.OpenKey(winreg.HKEY_CURRENT_USER, "Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Shell Folders\\")
|
||||||
|
path = winreg.QueryValueEx(regkey, 'Local AppData')[0]
|
||||||
|
return open(path+'\\Amazon\\Kindle For PC\\{AMAwzsaPaaZAzmZzZQzgZCAkZ3AjA_AY}\\kindle.info','r')
|
||||||
|
|
||||||
|
#
|
||||||
|
# Parse the Kindle.info file and return the records as a list of key-values
|
||||||
|
#
|
||||||
|
|
||||||
|
def parseKindleInfo():
|
||||||
|
DB = {}
|
||||||
|
infoReader = openKindleInfo()
|
||||||
|
infoReader.read(1)
|
||||||
|
data = infoReader.read()
|
||||||
|
items = data.split('{')
|
||||||
|
|
||||||
|
for item in items:
|
||||||
|
splito = item.split(':')
|
||||||
|
DB[splito[0]] =splito[1]
|
||||||
|
return DB
|
||||||
|
|
||||||
|
#
|
||||||
|
# Find if the original string for a hashed/encoded string is known. If so return the original string othwise return an empty string. (Totally not optimal)
|
||||||
|
#
|
||||||
|
|
||||||
|
def findNameForHash(hash):
|
||||||
|
names = ["kindle.account.tokens","kindle.cookie.item","eulaVersionAccepted","login_date","kindle.token.item","login","kindle.key.item","kindle.name.info","kindle.device.info", "MazamaRandomNumber"]
|
||||||
|
result = ""
|
||||||
|
for name in names:
|
||||||
|
if hash == encodeHash(name, charMap2):
|
||||||
|
result = name
|
||||||
|
break
|
||||||
|
return name
|
||||||
|
|
||||||
|
#
|
||||||
|
# Print all the records from the kindle.info file (option -i)
|
||||||
|
#
|
||||||
|
|
||||||
|
def printKindleInfo():
|
||||||
|
for record in kindleDatabase:
|
||||||
|
name = findNameForHash(record)
|
||||||
|
if name != "" :
|
||||||
|
print (name)
|
||||||
|
print ("--------------------------\n")
|
||||||
|
else :
|
||||||
|
print ("Unknown Record")
|
||||||
|
print getKindleInfoValueForHash(record)
|
||||||
|
print "\n"
|
||||||
|
#
|
||||||
|
# Get a record from the Kindle.info file for the key "hashedKey" (already hashed and encoded). Return the decoded and decrypted record
|
||||||
|
#
|
||||||
|
|
||||||
|
def getKindleInfoValueForHash(hashedKey):
|
||||||
|
global kindleDatabase
|
||||||
|
encryptedValue = decode(kindleDatabase[hashedKey],charMap2)
|
||||||
|
return CryptUnprotectData(encryptedValue,"")
|
||||||
|
|
||||||
|
#
|
||||||
|
# Get a record from the Kindle.info file for the string in "key" (plaintext). Return the decoded and decrypted record
|
||||||
|
#
|
||||||
|
|
||||||
|
def getKindleInfoValueForKey(key):
|
||||||
|
return getKindleInfoValueForHash(encodeHash(key,charMap2))
|
||||||
|
|
||||||
|
#
|
||||||
|
# Get a 7 bit encoded number from the book file
|
||||||
|
#
|
||||||
|
|
||||||
|
def bookReadEncodedNumber():
|
||||||
|
flag = False
|
||||||
|
data = ord(bookFile.read(1))
|
||||||
|
|
||||||
|
if data == 0xFF:
|
||||||
|
flag = True
|
||||||
|
data = ord(bookFile.read(1))
|
||||||
|
|
||||||
|
if data >= 0x80:
|
||||||
|
datax = (data & 0x7F)
|
||||||
|
while data >= 0x80 :
|
||||||
|
data = ord(bookFile.read(1))
|
||||||
|
datax = (datax <<7) + (data & 0x7F)
|
||||||
|
data = datax
|
||||||
|
|
||||||
|
if flag:
|
||||||
|
data = -data
|
||||||
|
return data
|
||||||
|
|
||||||
|
#
|
||||||
|
# Encode a number in 7 bit format
|
||||||
|
#
|
||||||
|
|
||||||
|
def encodeNumber(number):
|
||||||
|
result = ""
|
||||||
|
negative = False
|
||||||
|
flag = 0
|
||||||
|
|
||||||
|
if number < 0 :
|
||||||
|
number = -number + 1
|
||||||
|
negative = True
|
||||||
|
|
||||||
|
while True:
|
||||||
|
byte = number & 0x7F
|
||||||
|
number = number >> 7
|
||||||
|
byte += flag
|
||||||
|
result += chr(byte)
|
||||||
|
flag = 0x80
|
||||||
|
if number == 0 :
|
||||||
|
if (byte == 0xFF and negative == False) :
|
||||||
|
result += chr(0x80)
|
||||||
|
break
|
||||||
|
|
||||||
|
if negative:
|
||||||
|
result += chr(0xFF)
|
||||||
|
|
||||||
|
return result[::-1]
|
||||||
|
|
||||||
|
#
|
||||||
|
# Get a length prefixed string from the file
|
||||||
|
#
|
||||||
|
|
||||||
|
def bookReadString():
|
||||||
|
stringLength = bookReadEncodedNumber()
|
||||||
|
return unpack(str(stringLength)+"s",bookFile.read(stringLength))[0]
|
||||||
|
|
||||||
|
#
|
||||||
|
# Returns a length prefixed string
|
||||||
|
#
|
||||||
|
|
||||||
|
def lengthPrefixString(data):
|
||||||
|
return encodeNumber(len(data))+data
|
||||||
|
|
||||||
|
|
||||||
|
#
|
||||||
|
# Read and return the data of one header record at the current book file position [[offset,compressedLength,decompressedLength],...]
|
||||||
|
#
|
||||||
|
|
||||||
|
def bookReadHeaderRecordData():
|
||||||
|
nbValues = bookReadEncodedNumber()
|
||||||
|
values = []
|
||||||
|
for i in range (0,nbValues):
|
||||||
|
values.append([bookReadEncodedNumber(),bookReadEncodedNumber(),bookReadEncodedNumber()])
|
||||||
|
return values
|
||||||
|
|
||||||
|
#
|
||||||
|
# Read and parse one header record at the current book file position and return the associated data [[offset,compressedLength,decompressedLength],...]
|
||||||
|
#
|
||||||
|
|
||||||
|
def parseTopazHeaderRecord():
|
||||||
|
if ord(bookFile.read(1)) != 0x63:
|
||||||
|
raise CMBDTCFatal("Parse Error : Invalid Header")
|
||||||
|
|
||||||
|
tag = bookReadString()
|
||||||
|
record = bookReadHeaderRecordData()
|
||||||
|
return [tag,record]
|
||||||
|
|
||||||
|
#
|
||||||
|
# Parse the header of a Topaz file, get all the header records and the offset for the payload
|
||||||
|
#
|
||||||
|
|
||||||
|
def parseTopazHeader():
|
||||||
|
global bookHeaderRecords
|
||||||
|
global bookPayloadOffset
|
||||||
|
magic = unpack("4s",bookFile.read(4))[0]
|
||||||
|
|
||||||
|
if magic != 'TPZ0':
|
||||||
|
raise CMBDTCFatal("Parse Error : Invalid Header, not a Topaz file")
|
||||||
|
|
||||||
|
nbRecords = bookReadEncodedNumber()
|
||||||
|
bookHeaderRecords = {}
|
||||||
|
|
||||||
|
for i in range (0,nbRecords):
|
||||||
|
result = parseTopazHeaderRecord()
|
||||||
|
bookHeaderRecords[result[0]] = result[1]
|
||||||
|
|
||||||
|
if ord(bookFile.read(1)) != 0x64 :
|
||||||
|
raise CMBDTCFatal("Parse Error : Invalid Header")
|
||||||
|
|
||||||
|
bookPayloadOffset = bookFile.tell()
|
||||||
|
|
||||||
|
#
|
||||||
|
# Get a record in the book payload, given its name and index. If necessary the record is decrypted. The record is not decompressed
|
||||||
|
#
|
||||||
|
|
||||||
|
def getBookPayloadRecord(name, index):
|
||||||
|
encrypted = False
|
||||||
|
|
||||||
|
try:
|
||||||
|
recordOffset = bookHeaderRecords[name][index][0]
|
||||||
|
except:
|
||||||
|
raise CMBDTCFatal("Parse Error : Invalid Record, record not found")
|
||||||
|
|
||||||
|
bookFile.seek(bookPayloadOffset + recordOffset)
|
||||||
|
|
||||||
|
tag = bookReadString()
|
||||||
|
if tag != name :
|
||||||
|
raise CMBDTCFatal("Parse Error : Invalid Record, record name doesn't match")
|
||||||
|
|
||||||
|
recordIndex = bookReadEncodedNumber()
|
||||||
|
|
||||||
|
if recordIndex < 0 :
|
||||||
|
encrypted = True
|
||||||
|
recordIndex = -recordIndex -1
|
||||||
|
|
||||||
|
if recordIndex != index :
|
||||||
|
raise CMBDTCFatal("Parse Error : Invalid Record, index doesn't match")
|
||||||
|
|
||||||
|
if bookHeaderRecords[name][index][2] != 0 :
|
||||||
|
record = bookFile.read(bookHeaderRecords[name][index][2])
|
||||||
|
else:
|
||||||
|
record = bookFile.read(bookHeaderRecords[name][index][1])
|
||||||
|
|
||||||
|
if encrypted:
|
||||||
|
ctx = topazCryptoInit(bookKey)
|
||||||
|
record = topazCryptoDecrypt(record,ctx)
|
||||||
|
|
||||||
|
return record
|
||||||
|
|
||||||
|
#
|
||||||
|
# Extract, decrypt and decompress a book record indicated by name and index and print it or save it in "filename"
|
||||||
|
#
|
||||||
|
|
||||||
|
def extractBookPayloadRecord(name, index, filename):
|
||||||
|
compressed = False
|
||||||
|
|
||||||
|
try:
|
||||||
|
compressed = bookHeaderRecords[name][index][2] != 0
|
||||||
|
record = getBookPayloadRecord(name,index)
|
||||||
|
except:
|
||||||
|
print("Could not find record")
|
||||||
|
|
||||||
|
if compressed:
|
||||||
|
try:
|
||||||
|
record = zlib.decompress(record)
|
||||||
|
except:
|
||||||
|
raise CMBDTCFatal("Could not decompress record")
|
||||||
|
|
||||||
|
if filename != "":
|
||||||
|
try:
|
||||||
|
file = open(filename,"wb")
|
||||||
|
file.write(record)
|
||||||
|
file.close()
|
||||||
|
except:
|
||||||
|
raise CMBDTCFatal("Could not write to destination file")
|
||||||
|
else:
|
||||||
|
print(record)
|
||||||
|
|
||||||
|
#
|
||||||
|
# return next record [key,value] from the book metadata from the current book position
|
||||||
|
#
|
||||||
|
|
||||||
|
def readMetadataRecord():
|
||||||
|
return [bookReadString(),bookReadString()]
|
||||||
|
|
||||||
|
#
|
||||||
|
# Parse the metadata record from the book payload and return a list of [key,values]
|
||||||
|
#
|
||||||
|
|
||||||
|
def parseMetadata():
|
||||||
|
global bookHeaderRecords
|
||||||
|
global bookPayloadAddress
|
||||||
|
global bookMetadata
|
||||||
|
bookMetadata = {}
|
||||||
|
bookFile.seek(bookPayloadOffset + bookHeaderRecords["metadata"][0][0])
|
||||||
|
tag = bookReadString()
|
||||||
|
if tag != "metadata" :
|
||||||
|
raise CMBDTCFatal("Parse Error : Record Names Don't Match")
|
||||||
|
|
||||||
|
flags = ord(bookFile.read(1))
|
||||||
|
nbRecords = ord(bookFile.read(1))
|
||||||
|
|
||||||
|
for i in range (0,nbRecords) :
|
||||||
|
record =readMetadataRecord()
|
||||||
|
bookMetadata[record[0]] = record[1]
|
||||||
|
|
||||||
|
#
|
||||||
|
# Returns two bit at offset from a bit field
|
||||||
|
#
|
||||||
|
|
||||||
|
def getTwoBitsFromBitField(bitField,offset):
|
||||||
|
byteNumber = offset // 4
|
||||||
|
bitPosition = 6 - 2*(offset % 4)
|
||||||
|
|
||||||
|
return ord(bitField[byteNumber]) >> bitPosition & 3
|
||||||
|
|
||||||
|
#
|
||||||
|
# Returns the six bits at offset from a bit field
|
||||||
|
#
|
||||||
|
|
||||||
|
def getSixBitsFromBitField(bitField,offset):
|
||||||
|
offset *= 3
|
||||||
|
value = (getTwoBitsFromBitField(bitField,offset) <<4) + (getTwoBitsFromBitField(bitField,offset+1) << 2) +getTwoBitsFromBitField(bitField,offset+2)
|
||||||
|
return value
|
||||||
|
|
||||||
|
#
|
||||||
|
# 8 bits to six bits encoding from hash to generate PID string
|
||||||
|
#
|
||||||
|
|
||||||
|
def encodePID(hash):
|
||||||
|
global charMap3
|
||||||
|
PID = ""
|
||||||
|
for position in range (0,8):
|
||||||
|
PID += charMap3[getSixBitsFromBitField(hash,position)]
|
||||||
|
return PID
|
||||||
|
|
||||||
|
#
|
||||||
|
# Context initialisation for the Topaz Crypto
|
||||||
|
#
|
||||||
|
|
||||||
|
def topazCryptoInit(key):
|
||||||
|
ctx1 = 0x0CAFFE19E
|
||||||
|
|
||||||
|
for keyChar in key:
|
||||||
|
keyByte = ord(keyChar)
|
||||||
|
ctx2 = ctx1
|
||||||
|
ctx1 = ((((ctx1 >>2) * (ctx1 >>7))&0xFFFFFFFF) ^ (keyByte * keyByte * 0x0F902007)& 0xFFFFFFFF )
|
||||||
|
return [ctx1,ctx2]
|
||||||
|
|
||||||
|
#
|
||||||
|
# decrypt data with the context prepared by topazCryptoInit()
|
||||||
|
#
|
||||||
|
|
||||||
|
def topazCryptoDecrypt(data, ctx):
|
||||||
|
ctx1 = ctx[0]
|
||||||
|
ctx2 = ctx[1]
|
||||||
|
|
||||||
|
plainText = ""
|
||||||
|
|
||||||
|
for dataChar in data:
|
||||||
|
dataByte = ord(dataChar)
|
||||||
|
m = (dataByte ^ ((ctx1 >> 3) &0xFF) ^ ((ctx2<<3) & 0xFF)) &0xFF
|
||||||
|
ctx2 = ctx1
|
||||||
|
ctx1 = (((ctx1 >> 2) * (ctx1 >> 7)) &0xFFFFFFFF) ^((m * m * 0x0F902007) &0xFFFFFFFF)
|
||||||
|
plainText += chr(m)
|
||||||
|
|
||||||
|
return plainText
|
||||||
|
|
||||||
|
#
|
||||||
|
# Decrypt a payload record with the PID
|
||||||
|
#
|
||||||
|
|
||||||
|
def decryptRecord(data,PID):
|
||||||
|
ctx = topazCryptoInit(PID)
|
||||||
|
return topazCryptoDecrypt(data, ctx)
|
||||||
|
|
||||||
|
#
|
||||||
|
# Try to decrypt a dkey record (contains the book PID)
|
||||||
|
#
|
||||||
|
|
||||||
|
def decryptDkeyRecord(data,PID):
|
||||||
|
record = decryptRecord(data,PID)
|
||||||
|
fields = unpack("3sB8sB8s3s",record)
|
||||||
|
|
||||||
|
if fields[0] != "PID" or fields[5] != "pid" :
|
||||||
|
raise CMBDTCError("Didn't find PID magic numbers in record")
|
||||||
|
elif fields[1] != 8 or fields[3] != 8 :
|
||||||
|
raise CMBDTCError("Record didn't contain correct length fields")
|
||||||
|
elif fields[2] != PID :
|
||||||
|
raise CMBDTCError("Record didn't contain PID")
|
||||||
|
|
||||||
|
return fields[4]
|
||||||
|
|
||||||
|
#
|
||||||
|
# Decrypt all the book's dkey records (contain the book PID)
|
||||||
|
#
|
||||||
|
|
||||||
|
def decryptDkeyRecords(data,PID):
|
||||||
|
nbKeyRecords = ord(data[0])
|
||||||
|
records = []
|
||||||
|
data = data[1:]
|
||||||
|
for i in range (0,nbKeyRecords):
|
||||||
|
length = ord(data[0])
|
||||||
|
try:
|
||||||
|
key = decryptDkeyRecord(data[1:length+1],PID)
|
||||||
|
records.append(key)
|
||||||
|
except CMBDTCError:
|
||||||
|
pass
|
||||||
|
data = data[1+length:]
|
||||||
|
|
||||||
|
return records
|
||||||
|
|
||||||
|
#
|
||||||
|
# Encryption table used to generate the device PID
|
||||||
|
#
|
||||||
|
|
||||||
|
def generatePidEncryptionTable() :
|
||||||
|
table = []
|
||||||
|
for counter1 in range (0,0x100):
|
||||||
|
value = counter1
|
||||||
|
for counter2 in range (0,8):
|
||||||
|
if (value & 1 == 0) :
|
||||||
|
value = value >> 1
|
||||||
|
else :
|
||||||
|
value = value >> 1
|
||||||
|
value = value ^ 0xEDB88320
|
||||||
|
table.append(value)
|
||||||
|
return table
|
||||||
|
|
||||||
|
#
|
||||||
|
# Seed value used to generate the device PID
|
||||||
|
#
|
||||||
|
|
||||||
|
def generatePidSeed(table,dsn) :
|
||||||
|
value = 0
|
||||||
|
for counter in range (0,4) :
|
||||||
|
index = (ord(dsn[counter]) ^ value) &0xFF
|
||||||
|
value = (value >> 8) ^ table[index]
|
||||||
|
return value
|
||||||
|
|
||||||
|
#
|
||||||
|
# Generate the device PID
|
||||||
|
#
|
||||||
|
|
||||||
|
def generateDevicePID(table,dsn,nbRoll):
|
||||||
|
seed = generatePidSeed(table,dsn)
|
||||||
|
pidAscii = ""
|
||||||
|
pid = [(seed >>24) &0xFF,(seed >> 16) &0xff,(seed >> 8) &0xFF,(seed) & 0xFF,(seed>>24) & 0xFF,(seed >> 16) &0xff,(seed >> 8) &0xFF,(seed) & 0xFF]
|
||||||
|
index = 0
|
||||||
|
|
||||||
|
for counter in range (0,nbRoll):
|
||||||
|
pid[index] = pid[index] ^ ord(dsn[counter])
|
||||||
|
index = (index+1) %8
|
||||||
|
|
||||||
|
for counter in range (0,8):
|
||||||
|
index = ((((pid[counter] >>5) & 3) ^ pid[counter]) & 0x1f) + (pid[counter] >> 7)
|
||||||
|
pidAscii += charMap4[index]
|
||||||
|
return pidAscii
|
||||||
|
|
||||||
|
#
|
||||||
|
# Create decrypted book payload
|
||||||
|
#
|
||||||
|
|
||||||
|
def createDecryptedPayload(payload):
|
||||||
|
|
||||||
|
# store data to be able to create the header later
|
||||||
|
headerData= []
|
||||||
|
currentOffset = 0
|
||||||
|
|
||||||
|
# Add social DRM to decrypted files
|
||||||
|
|
||||||
|
try:
|
||||||
|
data = getKindleInfoValueForKey("kindle.name.info")+":"+ getKindleInfoValueForKey("login")
|
||||||
|
if payload!= None:
|
||||||
|
payload.write(lengthPrefixString("sdrm"))
|
||||||
|
payload.write(encodeNumber(0))
|
||||||
|
payload.write(data)
|
||||||
|
else:
|
||||||
|
currentOffset += len(lengthPrefixString("sdrm"))
|
||||||
|
currentOffset += len(encodeNumber(0))
|
||||||
|
currentOffset += len(data)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
for headerRecord in bookHeaderRecords:
|
||||||
|
name = headerRecord
|
||||||
|
newRecord = []
|
||||||
|
|
||||||
|
if name != "dkey" :
|
||||||
|
|
||||||
|
for index in range (0,len(bookHeaderRecords[name])) :
|
||||||
|
offset = currentOffset
|
||||||
|
|
||||||
|
if payload != None:
|
||||||
|
# write tag
|
||||||
|
payload.write(lengthPrefixString(name))
|
||||||
|
# write data
|
||||||
|
payload.write(encodeNumber(index))
|
||||||
|
payload.write(getBookPayloadRecord(name, index))
|
||||||
|
|
||||||
|
else :
|
||||||
|
currentOffset += len(lengthPrefixString(name))
|
||||||
|
currentOffset += len(encodeNumber(index))
|
||||||
|
currentOffset += len(getBookPayloadRecord(name, index))
|
||||||
|
newRecord.append([offset,bookHeaderRecords[name][index][1],bookHeaderRecords[name][index][2]])
|
||||||
|
|
||||||
|
headerData.append([name,newRecord])
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
return headerData
|
||||||
|
|
||||||
|
#
|
||||||
|
# Create decrypted book
|
||||||
|
#
|
||||||
|
|
||||||
|
def createDecryptedBook(outputFile):
|
||||||
|
outputFile = open(outputFile,"wb")
|
||||||
|
# Write the payload in a temporary file
|
||||||
|
headerData = createDecryptedPayload(None)
|
||||||
|
outputFile.write("TPZ0")
|
||||||
|
outputFile.write(encodeNumber(len(headerData)))
|
||||||
|
|
||||||
|
for header in headerData :
|
||||||
|
outputFile.write(chr(0x63))
|
||||||
|
outputFile.write(lengthPrefixString(header[0]))
|
||||||
|
outputFile.write(encodeNumber(len(header[1])))
|
||||||
|
for numbers in header[1] :
|
||||||
|
outputFile.write(encodeNumber(numbers[0]))
|
||||||
|
outputFile.write(encodeNumber(numbers[1]))
|
||||||
|
outputFile.write(encodeNumber(numbers[2]))
|
||||||
|
|
||||||
|
outputFile.write(chr(0x64))
|
||||||
|
createDecryptedPayload(outputFile)
|
||||||
|
outputFile.close()
|
||||||
|
|
||||||
|
#
|
||||||
|
# Set the command to execute by the programm according to cmdLine parameters
|
||||||
|
#
|
||||||
|
|
||||||
|
def setCommand(name) :
|
||||||
|
global command
|
||||||
|
if command != "" :
|
||||||
|
raise CMBDTCFatal("Invalid command line parameters")
|
||||||
|
else :
|
||||||
|
command = name
|
||||||
|
|
||||||
|
#
|
||||||
|
# Program usage
|
||||||
|
#
|
||||||
|
|
||||||
|
def usage():
|
||||||
|
print("\nUsage:")
|
||||||
|
print("\nCMBDTC.py [options] bookFileName\n")
|
||||||
|
print("-p Adds a PID to the list of PIDs that are tried to decrypt the book key (can be used several times)")
|
||||||
|
print("-d Saves a decrypted copy of the book")
|
||||||
|
print("-r Prints or writes to disk a record indicated in the form name:index (e.g \"img:0\")")
|
||||||
|
print("-o Output file name to write records and decrypted books")
|
||||||
|
print("-v Verbose (can be used several times)")
|
||||||
|
print("-i Prints kindle.info database")
|
||||||
|
|
||||||
|
#
|
||||||
|
# Main
|
||||||
|
#
|
||||||
|
|
||||||
|
def main(argv=sys.argv):
|
||||||
|
global kindleDatabase
|
||||||
|
global bookMetadata
|
||||||
|
global bookKey
|
||||||
|
global bookFile
|
||||||
|
global command
|
||||||
|
|
||||||
|
progname = os.path.basename(argv[0])
|
||||||
|
|
||||||
|
verbose = 0
|
||||||
|
recordName = ""
|
||||||
|
recordIndex = 0
|
||||||
|
outputFile = ""
|
||||||
|
PIDs = []
|
||||||
|
kindleDatabase = None
|
||||||
|
command = ""
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
opts, args = getopt.getopt(sys.argv[1:], "vdir:o:p:")
|
||||||
|
except getopt.GetoptError, err:
|
||||||
|
# print help information and exit:
|
||||||
|
print str(err) # will print something like "option -a not recognized"
|
||||||
|
usage()
|
||||||
|
sys.exit(2)
|
||||||
|
|
||||||
|
if len(opts) == 0 and len(args) == 0 :
|
||||||
|
usage()
|
||||||
|
sys.exit(2)
|
||||||
|
|
||||||
|
for o, a in opts:
|
||||||
|
if o == "-v":
|
||||||
|
verbose+=1
|
||||||
|
if o == "-i":
|
||||||
|
setCommand("printInfo")
|
||||||
|
if o =="-o":
|
||||||
|
if a == None :
|
||||||
|
raise CMBDTCFatal("Invalid parameter for -o")
|
||||||
|
outputFile = a
|
||||||
|
if o =="-r":
|
||||||
|
setCommand("printRecord")
|
||||||
|
try:
|
||||||
|
recordName,recordIndex = a.split(':')
|
||||||
|
except:
|
||||||
|
raise CMBDTCFatal("Invalid parameter for -r")
|
||||||
|
if o =="-p":
|
||||||
|
PIDs.append(a)
|
||||||
|
if o =="-d":
|
||||||
|
setCommand("doit")
|
||||||
|
|
||||||
|
if command == "" :
|
||||||
|
raise CMBDTCFatal("No action supplied on command line")
|
||||||
|
|
||||||
|
#
|
||||||
|
# Read the encrypted database
|
||||||
|
#
|
||||||
|
|
||||||
|
try:
|
||||||
|
kindleDatabase = parseKindleInfo()
|
||||||
|
except Exception, message:
|
||||||
|
if verbose>0:
|
||||||
|
print(message)
|
||||||
|
|
||||||
|
if kindleDatabase != None :
|
||||||
|
if command == "printInfo" :
|
||||||
|
printKindleInfo()
|
||||||
|
|
||||||
|
#
|
||||||
|
# Compute the DSN
|
||||||
|
#
|
||||||
|
|
||||||
|
# Get the Mazama Random number
|
||||||
|
MazamaRandomNumber = getKindleInfoValueForKey("MazamaRandomNumber")
|
||||||
|
|
||||||
|
# Get the HDD serial
|
||||||
|
encodedSystemVolumeSerialNumber = encodeHash(str(GetVolumeSerialNumber(GetSystemDirectory().split('\\')[0] + '\\')),charMap1)
|
||||||
|
|
||||||
|
# Get the current user name
|
||||||
|
encodedUsername = encodeHash(GetUserName(),charMap1)
|
||||||
|
|
||||||
|
# concat, hash and encode
|
||||||
|
DSN = encode(SHA1(MazamaRandomNumber+encodedSystemVolumeSerialNumber+encodedUsername),charMap1)
|
||||||
|
|
||||||
|
if verbose >1:
|
||||||
|
print("DSN: " + DSN)
|
||||||
|
|
||||||
|
#
|
||||||
|
# Compute the device PID
|
||||||
|
#
|
||||||
|
|
||||||
|
table = generatePidEncryptionTable()
|
||||||
|
devicePID = generateDevicePID(table,DSN,4)
|
||||||
|
PIDs.append(devicePID)
|
||||||
|
|
||||||
|
if verbose > 0:
|
||||||
|
print("Device PID: " + devicePID)
|
||||||
|
|
||||||
|
#
|
||||||
|
# Open book and parse metadata
|
||||||
|
#
|
||||||
|
|
||||||
|
if len(args) == 1:
|
||||||
|
|
||||||
|
bookFile = openBook(args[0])
|
||||||
|
parseTopazHeader()
|
||||||
|
parseMetadata()
|
||||||
|
|
||||||
|
#
|
||||||
|
# Compute book PID
|
||||||
|
#
|
||||||
|
|
||||||
|
# Get the account token
|
||||||
|
|
||||||
|
if kindleDatabase != None:
|
||||||
|
kindleAccountToken = getKindleInfoValueForKey("kindle.account.tokens")
|
||||||
|
|
||||||
|
if verbose >1:
|
||||||
|
print("Account Token: " + kindleAccountToken)
|
||||||
|
|
||||||
|
keysRecord = bookMetadata["keys"]
|
||||||
|
keysRecordRecord = bookMetadata[keysRecord]
|
||||||
|
|
||||||
|
pidHash = SHA1(DSN+kindleAccountToken+keysRecord+keysRecordRecord)
|
||||||
|
|
||||||
|
bookPID = encodePID(pidHash)
|
||||||
|
PIDs.append(bookPID)
|
||||||
|
|
||||||
|
if verbose > 0:
|
||||||
|
print ("Book PID: " + bookPID )
|
||||||
|
|
||||||
|
#
|
||||||
|
# Decrypt book key
|
||||||
|
#
|
||||||
|
|
||||||
|
dkey = getBookPayloadRecord('dkey', 0)
|
||||||
|
|
||||||
|
bookKeys = []
|
||||||
|
for PID in PIDs :
|
||||||
|
bookKeys+=decryptDkeyRecords(dkey,PID)
|
||||||
|
|
||||||
|
if len(bookKeys) == 0 :
|
||||||
|
if verbose > 0 :
|
||||||
|
print ("Book key could not be found. Maybe this book is not registered with this device.")
|
||||||
|
else :
|
||||||
|
bookKey = bookKeys[0]
|
||||||
|
if verbose > 0:
|
||||||
|
print("Book key: " + bookKey.encode('hex'))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
if command == "printRecord" :
|
||||||
|
extractBookPayloadRecord(recordName,int(recordIndex),outputFile)
|
||||||
|
if outputFile != "" and verbose>0 :
|
||||||
|
print("Wrote record to file: "+outputFile)
|
||||||
|
elif command == "doit" :
|
||||||
|
if outputFile!="" :
|
||||||
|
createDecryptedBook(outputFile)
|
||||||
|
if verbose >0 :
|
||||||
|
print ("Decrypted book saved. Don't pirate!")
|
||||||
|
elif verbose > 0:
|
||||||
|
print("Output file name was not supplied.")
|
||||||
|
|
||||||
|
return 0
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
sys.exit(main())
|
||||||
@@ -20,6 +20,8 @@ import getopt
|
|||||||
from struct import pack
|
from struct import pack
|
||||||
from struct import unpack
|
from struct import unpack
|
||||||
|
|
||||||
|
class TpzDRMError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
# Get a 7 bit encoded number from string. The most
|
# Get a 7 bit encoded number from string. The most
|
||||||
# significant byte comes first and has the high bit (8th) set
|
# significant byte comes first and has the high bit (8th) set
|
||||||
@@ -138,7 +140,8 @@ class Dictionary(object):
|
|||||||
return self.stable[self.pos]
|
return self.stable[self.pos]
|
||||||
else:
|
else:
|
||||||
print "Error - %d outside of string table limits" % val
|
print "Error - %d outside of string table limits" % val
|
||||||
sys.exit(-1)
|
raise TpzDRMError('outside of string table limits')
|
||||||
|
# sys.exit(-1)
|
||||||
|
|
||||||
def getSize(self):
|
def getSize(self):
|
||||||
return self.size
|
return self.size
|
||||||
@@ -243,6 +246,7 @@ class PageParser(object):
|
|||||||
'region.y' : (1, 'scalar_number', 0, 0),
|
'region.y' : (1, 'scalar_number', 0, 0),
|
||||||
'region.h' : (1, 'scalar_number', 0, 0),
|
'region.h' : (1, 'scalar_number', 0, 0),
|
||||||
'region.w' : (1, 'scalar_number', 0, 0),
|
'region.w' : (1, 'scalar_number', 0, 0),
|
||||||
|
'region.orientation' : (1, 'scalar_number', 0, 0),
|
||||||
|
|
||||||
'empty_text_region' : (1, 'snippets', 1, 0),
|
'empty_text_region' : (1, 'snippets', 1, 0),
|
||||||
|
|
||||||
@@ -258,6 +262,13 @@ class PageParser(object):
|
|||||||
'paragraph.class' : (1, 'scalar_text', 0, 0),
|
'paragraph.class' : (1, 'scalar_text', 0, 0),
|
||||||
'paragraph.firstWord' : (1, 'scalar_number', 0, 0),
|
'paragraph.firstWord' : (1, 'scalar_number', 0, 0),
|
||||||
'paragraph.lastWord' : (1, 'scalar_number', 0, 0),
|
'paragraph.lastWord' : (1, 'scalar_number', 0, 0),
|
||||||
|
'paragraph.lastWord' : (1, 'scalar_number', 0, 0),
|
||||||
|
'paragraph.gridSize' : (1, 'scalar_number', 0, 0),
|
||||||
|
'paragraph.gridBottomCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'paragraph.gridTopCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'paragraph.gridBeginCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'paragraph.gridEndCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
|
||||||
|
|
||||||
'word_semantic' : (1, 'snippets', 1, 1),
|
'word_semantic' : (1, 'snippets', 1, 1),
|
||||||
'word_semantic.type' : (1, 'scalar_text', 0, 0),
|
'word_semantic.type' : (1, 'scalar_text', 0, 0),
|
||||||
@@ -272,11 +283,21 @@ class PageParser(object):
|
|||||||
|
|
||||||
'_span' : (1, 'snippets', 1, 0),
|
'_span' : (1, 'snippets', 1, 0),
|
||||||
'_span.firstWord' : (1, 'scalar_number', 0, 0),
|
'_span.firstWord' : (1, 'scalar_number', 0, 0),
|
||||||
'-span.lastWord' : (1, 'scalar_number', 0, 0),
|
'_span.lastWord' : (1, 'scalar_number', 0, 0),
|
||||||
|
'_span.gridSize' : (1, 'scalar_number', 0, 0),
|
||||||
|
'_span.gridBottomCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'_span.gridTopCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'_span.gridBeginCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'_span.gridEndCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
|
||||||
'span' : (1, 'snippets', 1, 0),
|
'span' : (1, 'snippets', 1, 0),
|
||||||
'span.firstWord' : (1, 'scalar_number', 0, 0),
|
'span.firstWord' : (1, 'scalar_number', 0, 0),
|
||||||
'span.lastWord' : (1, 'scalar_number', 0, 0),
|
'span.lastWord' : (1, 'scalar_number', 0, 0),
|
||||||
|
'span.gridSize' : (1, 'scalar_number', 0, 0),
|
||||||
|
'span.gridBottomCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'span.gridTopCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'span.gridBeginCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'span.gridEndCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
|
||||||
'extratokens' : (1, 'snippets', 1, 0),
|
'extratokens' : (1, 'snippets', 1, 0),
|
||||||
'extratokens.type' : (1, 'scalar_text', 0, 0),
|
'extratokens.type' : (1, 'scalar_text', 0, 0),
|
||||||
@@ -595,28 +616,30 @@ class PageParser(object):
|
|||||||
nodename = fullpathname.pop()
|
nodename = fullpathname.pop()
|
||||||
ilvl = len(fullpathname)
|
ilvl = len(fullpathname)
|
||||||
indent = ' ' * (3 * ilvl)
|
indent = ' ' * (3 * ilvl)
|
||||||
result = indent + '<' + nodename + '>'
|
rlst = []
|
||||||
|
rlst.append(indent + '<' + nodename + '>')
|
||||||
if len(argList) > 0:
|
if len(argList) > 0:
|
||||||
argres = ''
|
alst = []
|
||||||
for j in argList:
|
for j in argList:
|
||||||
if (argtype == 'text') or (argtype == 'scalar_text') :
|
if (argtype == 'text') or (argtype == 'scalar_text') :
|
||||||
argres += j + '|'
|
alst.append(j + '|')
|
||||||
else :
|
else :
|
||||||
argres += str(j) + ','
|
alst.append(str(j) + ',')
|
||||||
|
argres = "".join(alst)
|
||||||
argres = argres[0:-1]
|
argres = argres[0:-1]
|
||||||
if argtype == 'snippets' :
|
if argtype == 'snippets' :
|
||||||
result += 'snippets:' + argres
|
rlst.append('snippets:' + argres)
|
||||||
else :
|
else :
|
||||||
result += argres
|
rlst.append(argres)
|
||||||
if len(subtagList) > 0 :
|
if len(subtagList) > 0 :
|
||||||
result += '\n'
|
rlst.append('\n')
|
||||||
for j in subtagList:
|
for j in subtagList:
|
||||||
if len(j) > 0 :
|
if len(j) > 0 :
|
||||||
result += self.formatTag(j)
|
rlst.append(self.formatTag(j))
|
||||||
result += indent + '</' + nodename + '>\n'
|
rlst.append(indent + '</' + nodename + '>\n')
|
||||||
else:
|
else:
|
||||||
result += '</' + nodename + '>\n'
|
rlst.append('</' + nodename + '>\n')
|
||||||
return result
|
return "".join(rlst)
|
||||||
|
|
||||||
|
|
||||||
# flatten tag
|
# flatten tag
|
||||||
@@ -625,35 +648,38 @@ class PageParser(object):
|
|||||||
subtagList = node[1]
|
subtagList = node[1]
|
||||||
argtype = node[2]
|
argtype = node[2]
|
||||||
argList = node[3]
|
argList = node[3]
|
||||||
result = name
|
rlst = []
|
||||||
|
rlst.append(name)
|
||||||
if (len(argList) > 0):
|
if (len(argList) > 0):
|
||||||
argres = ''
|
alst = []
|
||||||
for j in argList:
|
for j in argList:
|
||||||
if (argtype == 'text') or (argtype == 'scalar_text') :
|
if (argtype == 'text') or (argtype == 'scalar_text') :
|
||||||
argres += j + '|'
|
alst.append(j + '|')
|
||||||
else :
|
else :
|
||||||
argres += str(j) + '|'
|
alst.append(str(j) + '|')
|
||||||
|
argres = "".join(alst)
|
||||||
argres = argres[0:-1]
|
argres = argres[0:-1]
|
||||||
if argtype == 'snippets' :
|
if argtype == 'snippets' :
|
||||||
result += '.snippets=' + argres
|
rlst.append('.snippets=' + argres)
|
||||||
else :
|
else :
|
||||||
result += '=' + argres
|
rlst.append('=' + argres)
|
||||||
result += '\n'
|
rlst.append('\n')
|
||||||
for j in subtagList:
|
for j in subtagList:
|
||||||
if len(j) > 0 :
|
if len(j) > 0 :
|
||||||
result += self.flattenTag(j)
|
rlst.append(self.flattenTag(j))
|
||||||
return result
|
return "".join(rlst)
|
||||||
|
|
||||||
|
|
||||||
# reduce create xml output
|
# reduce create xml output
|
||||||
def formatDoc(self, flat_xml):
|
def formatDoc(self, flat_xml):
|
||||||
result = ''
|
rlst = []
|
||||||
for j in self.doc :
|
for j in self.doc :
|
||||||
if len(j) > 0:
|
if len(j) > 0:
|
||||||
if flat_xml:
|
if flat_xml:
|
||||||
result += self.flattenTag(j)
|
rlst.append(self.flattenTag(j))
|
||||||
else:
|
else:
|
||||||
result += self.formatTag(j)
|
rlst.append(self.formatTag(j))
|
||||||
|
result = "".join(rlst)
|
||||||
if self.debug : print result
|
if self.debug : print result
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|||||||
@@ -86,4 +86,3 @@ def main(argv=sys.argv):
|
|||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
sys.exit(main())
|
sys.exit(main())
|
||||||
|
|
||||||
|
|||||||
@@ -43,4 +43,3 @@ def main(argv=sys.argv):
|
|||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
sys.exit(main())
|
sys.exit(main())
|
||||||
|
|
||||||
|
|||||||
@@ -52,4 +52,3 @@ def main(argv=sys.argv):
|
|||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
sys.exit(main())
|
sys.exit(main())
|
||||||
|
|
||||||
|
|||||||
@@ -59,8 +59,11 @@
|
|||||||
# 0.18 - on Windows try PyCrypto first and OpenSSL next
|
# 0.18 - on Windows try PyCrypto first and OpenSSL next
|
||||||
# 0.19 - Modify the interface to allow use of import
|
# 0.19 - Modify the interface to allow use of import
|
||||||
# 0.20 - modify to allow use inside new interface for calibre plugins
|
# 0.20 - modify to allow use inside new interface for calibre plugins
|
||||||
|
# 0.21 - Support eReader (drm) version 11.
|
||||||
|
# - Don't reject dictionary format.
|
||||||
|
# - Ignore sidebars for dictionaries (different format?)
|
||||||
|
|
||||||
__version__='0.20'
|
__version__='0.21'
|
||||||
|
|
||||||
class Unbuffered:
|
class Unbuffered:
|
||||||
def __init__(self, stream):
|
def __init__(self, stream):
|
||||||
@@ -140,11 +143,17 @@ logging.basicConfig()
|
|||||||
|
|
||||||
|
|
||||||
class Sectionizer(object):
|
class Sectionizer(object):
|
||||||
|
bkType = "Book"
|
||||||
|
|
||||||
def __init__(self, filename, ident):
|
def __init__(self, filename, ident):
|
||||||
self.contents = file(filename, 'rb').read()
|
self.contents = file(filename, 'rb').read()
|
||||||
self.header = self.contents[0:72]
|
self.header = self.contents[0:72]
|
||||||
self.num_sections, = struct.unpack('>H', self.contents[76:78])
|
self.num_sections, = struct.unpack('>H', self.contents[76:78])
|
||||||
|
# Dictionary or normal content (TODO: Not hard-coded)
|
||||||
if self.header[0x3C:0x3C+8] != ident:
|
if self.header[0x3C:0x3C+8] != ident:
|
||||||
|
if self.header[0x3C:0x3C+8] == "PDctPPrs":
|
||||||
|
self.bkType = "Dict"
|
||||||
|
else:
|
||||||
raise ValueError('Invalid file format')
|
raise ValueError('Invalid file format')
|
||||||
self.sections = []
|
self.sections = []
|
||||||
for i in xrange(self.num_sections):
|
for i in xrange(self.num_sections):
|
||||||
@@ -182,15 +191,15 @@ def deXOR(text, sp, table):
|
|||||||
return r
|
return r
|
||||||
|
|
||||||
class EreaderProcessor(object):
|
class EreaderProcessor(object):
|
||||||
def __init__(self, section_reader, username, creditcard):
|
def __init__(self, sect, username, creditcard):
|
||||||
self.section_reader = section_reader
|
self.section_reader = sect.loadSection
|
||||||
data = section_reader(0)
|
data = self.section_reader(0)
|
||||||
version, = struct.unpack('>H', data[0:2])
|
version, = struct.unpack('>H', data[0:2])
|
||||||
self.version = version
|
self.version = version
|
||||||
logging.info('eReader file format version %s', version)
|
logging.info('eReader file format version %s', version)
|
||||||
if version != 272 and version != 260 and version != 259:
|
if version != 272 and version != 260 and version != 259:
|
||||||
raise ValueError('incorrect eReader version %d (error 1)' % version)
|
raise ValueError('incorrect eReader version %d (error 1)' % version)
|
||||||
data = section_reader(1)
|
data = self.section_reader(1)
|
||||||
self.data = data
|
self.data = data
|
||||||
des = Des(fixKey(data[0:8]))
|
des = Des(fixKey(data[0:8]))
|
||||||
cookie_shuf, cookie_size = struct.unpack('>LL', des.decrypt(data[-8:]))
|
cookie_shuf, cookie_size = struct.unpack('>LL', des.decrypt(data[-8:]))
|
||||||
@@ -219,9 +228,15 @@ class EreaderProcessor(object):
|
|||||||
self.num_text_pages = struct.unpack('>H', r[2:4])[0] - 1
|
self.num_text_pages = struct.unpack('>H', r[2:4])[0] - 1
|
||||||
self.num_image_pages = struct.unpack('>H', r[26:26+2])[0]
|
self.num_image_pages = struct.unpack('>H', r[26:26+2])[0]
|
||||||
self.first_image_page = struct.unpack('>H', r[24:24+2])[0]
|
self.first_image_page = struct.unpack('>H', r[24:24+2])[0]
|
||||||
|
# Default values
|
||||||
|
self.num_footnote_pages = 0
|
||||||
|
self.num_sidebar_pages = 0
|
||||||
|
self.first_footnote_page = -1
|
||||||
|
self.first_sidebar_page = -1
|
||||||
if self.version == 272:
|
if self.version == 272:
|
||||||
self.num_footnote_pages = struct.unpack('>H', r[46:46+2])[0]
|
self.num_footnote_pages = struct.unpack('>H', r[46:46+2])[0]
|
||||||
self.first_footnote_page = struct.unpack('>H', r[44:44+2])[0]
|
self.first_footnote_page = struct.unpack('>H', r[44:44+2])[0]
|
||||||
|
if (sect.bkType == "Book"):
|
||||||
self.num_sidebar_pages = struct.unpack('>H', r[38:38+2])[0]
|
self.num_sidebar_pages = struct.unpack('>H', r[38:38+2])[0]
|
||||||
self.first_sidebar_page = struct.unpack('>H', r[36:36+2])[0]
|
self.first_sidebar_page = struct.unpack('>H', r[36:36+2])[0]
|
||||||
# self.num_bookinfo_pages = struct.unpack('>H', r[34:34+2])[0]
|
# self.num_bookinfo_pages = struct.unpack('>H', r[34:34+2])[0]
|
||||||
@@ -239,10 +254,8 @@ class EreaderProcessor(object):
|
|||||||
self.xortable_size = struct.unpack('>H', r[42:42+2])[0]
|
self.xortable_size = struct.unpack('>H', r[42:42+2])[0]
|
||||||
self.xortable = self.data[self.xortable_offset:self.xortable_offset + self.xortable_size]
|
self.xortable = self.data[self.xortable_offset:self.xortable_offset + self.xortable_size]
|
||||||
else:
|
else:
|
||||||
self.num_footnote_pages = 0
|
# Nothing needs to be done
|
||||||
self.num_sidebar_pages = 0
|
pass
|
||||||
self.first_footnote_page = -1
|
|
||||||
self.first_sidebar_page = -1
|
|
||||||
# self.num_bookinfo_pages = 0
|
# self.num_bookinfo_pages = 0
|
||||||
# self.num_chapter_pages = 0
|
# self.num_chapter_pages = 0
|
||||||
# self.num_link_pages = 0
|
# self.num_link_pages = 0
|
||||||
@@ -267,10 +280,14 @@ class EreaderProcessor(object):
|
|||||||
encrypted_key_sha = r[44:44+20]
|
encrypted_key_sha = r[44:44+20]
|
||||||
encrypted_key = r[64:64+8]
|
encrypted_key = r[64:64+8]
|
||||||
elif version == 260:
|
elif version == 260:
|
||||||
if drm_sub_version != 13:
|
if drm_sub_version != 13 and drm_sub_version != 11:
|
||||||
raise ValueError('incorrect eReader version %d (error 3)' % drm_sub_version)
|
raise ValueError('incorrect eReader version %d (error 3)' % drm_sub_version)
|
||||||
|
if drm_sub_version == 13:
|
||||||
encrypted_key = r[44:44+8]
|
encrypted_key = r[44:44+8]
|
||||||
encrypted_key_sha = r[52:52+20]
|
encrypted_key_sha = r[52:52+20]
|
||||||
|
else:
|
||||||
|
encrypted_key = r[64:64+8]
|
||||||
|
encrypted_key_sha = r[44:44+20]
|
||||||
elif version == 272:
|
elif version == 272:
|
||||||
encrypted_key = r[172:172+8]
|
encrypted_key = r[172:172+8]
|
||||||
encrypted_key_sha = r[56:56+20]
|
encrypted_key_sha = r[56:56+20]
|
||||||
@@ -356,6 +373,12 @@ class EreaderProcessor(object):
|
|||||||
r += fmarker
|
r += fmarker
|
||||||
fnote_ids = fnote_ids[id_len+4:]
|
fnote_ids = fnote_ids[id_len+4:]
|
||||||
|
|
||||||
|
# TODO: Handle dictionary index (?) pages - which are also marked as
|
||||||
|
# sidebar_pages (?). For now dictionary sidebars are ignored
|
||||||
|
# For dictionaries - record 0 is null terminated strings, followed by
|
||||||
|
# blocks of around 62000 bytes and a final block. Not sure of the
|
||||||
|
# encoding
|
||||||
|
|
||||||
# now handle sidebar pages
|
# now handle sidebar pages
|
||||||
if self.num_sidebar_pages > 0:
|
if self.num_sidebar_pages > 0:
|
||||||
r += '\n'
|
r += '\n'
|
||||||
@@ -368,7 +391,7 @@ class EreaderProcessor(object):
|
|||||||
id_len = ord(sbar_ids[2])
|
id_len = ord(sbar_ids[2])
|
||||||
id = sbar_ids[3:3+id_len]
|
id = sbar_ids[3:3+id_len]
|
||||||
smarker = '<sidebar id="%s">\n' % id
|
smarker = '<sidebar id="%s">\n' % id
|
||||||
smarker += zlib.decompress(des.decrypt(self.section_reader(self.first_footnote_page + i)))
|
smarker += zlib.decompress(des.decrypt(self.section_reader(self.first_sidebar_page + i)))
|
||||||
smarker += '\n</sidebar>\n'
|
smarker += '\n</sidebar>\n'
|
||||||
r += smarker
|
r += smarker
|
||||||
sbar_ids = sbar_ids[id_len+4:]
|
sbar_ids = sbar_ids[id_len+4:]
|
||||||
@@ -389,7 +412,7 @@ def convertEreaderToPml(infile, name, cc, outdir):
|
|||||||
bookname = os.path.splitext(os.path.basename(infile))[0]
|
bookname = os.path.splitext(os.path.basename(infile))[0]
|
||||||
print " Decoding File"
|
print " Decoding File"
|
||||||
sect = Sectionizer(infile, 'PNRdPPrs')
|
sect = Sectionizer(infile, 'PNRdPPrs')
|
||||||
er = EreaderProcessor(sect.loadSection, name, cc)
|
er = EreaderProcessor(sect, name, cc)
|
||||||
|
|
||||||
if er.getNumImages() > 0:
|
if er.getNumImages() > 0:
|
||||||
print " Extracting images"
|
print " Extracting images"
|
||||||
@@ -501,4 +524,3 @@ def main(argv=None):
|
|||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
sys.stdout=Unbuffered(sys.stdout)
|
sys.stdout=Unbuffered(sys.stdout)
|
||||||
sys.exit(main())
|
sys.exit(main())
|
||||||
|
|
||||||
|
|||||||
@@ -271,6 +271,9 @@ class DocParser(object):
|
|||||||
|
|
||||||
pclass = self.getClass(pclass)
|
pclass = self.getClass(pclass)
|
||||||
|
|
||||||
|
# if paragraph uses extratokens (extra glyphs) then make it fixed
|
||||||
|
(pos, extraglyphs) = self.findinDoc('paragraph.extratokens',start,end)
|
||||||
|
|
||||||
# build up a description of the paragraph in result and return it
|
# build up a description of the paragraph in result and return it
|
||||||
# first check for the basic - all words paragraph
|
# first check for the basic - all words paragraph
|
||||||
(pos, sfirst) = self.findinDoc('paragraph.firstWord',start,end)
|
(pos, sfirst) = self.findinDoc('paragraph.firstWord',start,end)
|
||||||
@@ -280,6 +283,7 @@ class DocParser(object):
|
|||||||
last = int(slast)
|
last = int(slast)
|
||||||
|
|
||||||
makeImage = (regtype == 'vertical') or (regtype == 'table')
|
makeImage = (regtype == 'vertical') or (regtype == 'table')
|
||||||
|
makeImage = makeImage or (extraglyphs != None)
|
||||||
if self.fixedimage:
|
if self.fixedimage:
|
||||||
makeImage = makeImage or (regtype == 'fixed')
|
makeImage = makeImage or (regtype == 'fixed')
|
||||||
|
|
||||||
@@ -288,6 +292,11 @@ class DocParser(object):
|
|||||||
if self.fixedimage :
|
if self.fixedimage :
|
||||||
makeImage = makeImage or (pclass.find('cl-f-') >= 0)
|
makeImage = makeImage or (pclass.find('cl-f-') >= 0)
|
||||||
|
|
||||||
|
# before creating an image make sure glyph info exists
|
||||||
|
gidList = self.getData('info.glyph.glyphID',0,-1)
|
||||||
|
|
||||||
|
makeImage = makeImage & (len(gidList) > 0)
|
||||||
|
|
||||||
if not makeImage :
|
if not makeImage :
|
||||||
# standard all word paragraph
|
# standard all word paragraph
|
||||||
for wordnum in xrange(first, last):
|
for wordnum in xrange(first, last):
|
||||||
@@ -353,6 +362,8 @@ class DocParser(object):
|
|||||||
|
|
||||||
word_class = ''
|
word_class = ''
|
||||||
|
|
||||||
|
word_semantic_type = ''
|
||||||
|
|
||||||
while (line < end) :
|
while (line < end) :
|
||||||
|
|
||||||
(name, argres) = self.lineinDoc(line)
|
(name, argres) = self.lineinDoc(line)
|
||||||
@@ -512,13 +523,80 @@ class DocParser(object):
|
|||||||
return parares
|
return parares
|
||||||
|
|
||||||
|
|
||||||
|
def buildTOCEntry(self, pdesc) :
|
||||||
|
parares = ''
|
||||||
|
sep =''
|
||||||
|
tocentry = ''
|
||||||
|
handle_links = len(self.link_id) > 0
|
||||||
|
|
||||||
|
lstart = 0
|
||||||
|
|
||||||
|
cnt = len(pdesc)
|
||||||
|
for j in xrange( 0, cnt) :
|
||||||
|
|
||||||
|
(wtype, num) = pdesc[j]
|
||||||
|
|
||||||
|
if wtype == 'ocr' :
|
||||||
|
word = self.ocrtext[num]
|
||||||
|
sep = ' '
|
||||||
|
|
||||||
|
if handle_links:
|
||||||
|
link = self.link_id[num]
|
||||||
|
if (link > 0):
|
||||||
|
linktype = self.link_type[link-1]
|
||||||
|
title = self.link_title[link-1]
|
||||||
|
title = title.rstrip('. ')
|
||||||
|
alt_title = parares[lstart:]
|
||||||
|
alt_title = alt_title.strip()
|
||||||
|
# now strip off the actual printed page number
|
||||||
|
alt_title = alt_title.rstrip('01234567890ivxldIVXLD-.')
|
||||||
|
alt_title = alt_title.rstrip('. ')
|
||||||
|
# skip over any external links - can't have them in a books toc
|
||||||
|
if linktype == 'external' :
|
||||||
|
title = ''
|
||||||
|
alt_title = ''
|
||||||
|
linkpage = ''
|
||||||
|
else :
|
||||||
|
if len(self.link_page) >= link :
|
||||||
|
ptarget = self.link_page[link-1] - 1
|
||||||
|
linkpage = '%04d' % ptarget
|
||||||
|
else :
|
||||||
|
# just link to the current page
|
||||||
|
linkpage = self.id[4:]
|
||||||
|
if len(alt_title) >= len(title):
|
||||||
|
title = alt_title
|
||||||
|
if title != '' and linkpage != '':
|
||||||
|
tocentry += title + '|' + linkpage + '\n'
|
||||||
|
lstart = len(parares)
|
||||||
|
if word == '_link_' : word = ''
|
||||||
|
elif (link < 0) :
|
||||||
|
if word == '_link_' : word = ''
|
||||||
|
|
||||||
|
if word == '_lb_':
|
||||||
|
word = ''
|
||||||
|
sep = ''
|
||||||
|
|
||||||
|
if num in self.dehyphen_rootid :
|
||||||
|
word = word[0:-1]
|
||||||
|
sep = ''
|
||||||
|
|
||||||
|
parares += word + sep
|
||||||
|
|
||||||
|
else :
|
||||||
|
continue
|
||||||
|
|
||||||
|
return tocentry
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# walk the document tree collecting the information needed
|
# walk the document tree collecting the information needed
|
||||||
# to build an html page using the ocrText
|
# to build an html page using the ocrText
|
||||||
|
|
||||||
def process(self):
|
def process(self):
|
||||||
|
|
||||||
htmlpage = ''
|
tocinfo = ''
|
||||||
|
hlst = []
|
||||||
|
|
||||||
# get the ocr text
|
# get the ocr text
|
||||||
(pos, argres) = self.findinDoc('info.word.ocrText',0,-1)
|
(pos, argres) = self.findinDoc('info.word.ocrText',0,-1)
|
||||||
@@ -575,8 +653,8 @@ class DocParser(object):
|
|||||||
|
|
||||||
# set anchor for link target on this page
|
# set anchor for link target on this page
|
||||||
if not anchorSet and not first_para_continued:
|
if not anchorSet and not first_para_continued:
|
||||||
htmlpage += '<div style="visibility: hidden; height: 0; width: 0;" id="'
|
hlst.append('<div style="visibility: hidden; height: 0; width: 0;" id="')
|
||||||
htmlpage += self.id + '" title="pagetype_' + pagetype + '"></div>\n'
|
hlst.append(self.id + '" title="pagetype_' + pagetype + '"></div>\n')
|
||||||
anchorSet = True
|
anchorSet = True
|
||||||
|
|
||||||
# handle groups of graphics with text captions
|
# handle groups of graphics with text captions
|
||||||
@@ -585,12 +663,12 @@ class DocParser(object):
|
|||||||
if grptype != None:
|
if grptype != None:
|
||||||
if grptype == 'graphic':
|
if grptype == 'graphic':
|
||||||
gcstr = ' class="' + grptype + '"'
|
gcstr = ' class="' + grptype + '"'
|
||||||
htmlpage += '<div' + gcstr + '>'
|
hlst.append('<div' + gcstr + '>')
|
||||||
inGroup = True
|
inGroup = True
|
||||||
|
|
||||||
elif (etype == 'grpend'):
|
elif (etype == 'grpend'):
|
||||||
if inGroup:
|
if inGroup:
|
||||||
htmlpage += '</div>\n'
|
hlst.append('</div>\n')
|
||||||
inGroup = False
|
inGroup = False
|
||||||
|
|
||||||
else:
|
else:
|
||||||
@@ -600,25 +678,25 @@ class DocParser(object):
|
|||||||
(pos, simgsrc) = self.findinDoc('img.src',start,end)
|
(pos, simgsrc) = self.findinDoc('img.src',start,end)
|
||||||
if simgsrc:
|
if simgsrc:
|
||||||
if inGroup:
|
if inGroup:
|
||||||
htmlpage += '<img src="img/img%04d.jpg" alt="" />' % int(simgsrc)
|
hlst.append('<img src="img/img%04d.jpg" alt="" />' % int(simgsrc))
|
||||||
else:
|
else:
|
||||||
htmlpage += '<div class="graphic"><img src="img/img%04d.jpg" alt="" /></div>' % int(simgsrc)
|
hlst.append('<div class="graphic"><img src="img/img%04d.jpg" alt="" /></div>' % int(simgsrc))
|
||||||
|
|
||||||
elif regtype == 'chapterheading' :
|
elif regtype == 'chapterheading' :
|
||||||
(pclass, pdesc) = self.getParaDescription(start,end, regtype)
|
(pclass, pdesc) = self.getParaDescription(start,end, regtype)
|
||||||
if not breakSet:
|
if not breakSet:
|
||||||
htmlpage += '<div style="page-break-after: always;"> </div>\n'
|
hlst.append('<div style="page-break-after: always;"> </div>\n')
|
||||||
breakSet = True
|
breakSet = True
|
||||||
tag = 'h1'
|
tag = 'h1'
|
||||||
if pclass and (len(pclass) >= 7):
|
if pclass and (len(pclass) >= 7):
|
||||||
if pclass[3:7] == 'ch1-' : tag = 'h1'
|
if pclass[3:7] == 'ch1-' : tag = 'h1'
|
||||||
if pclass[3:7] == 'ch2-' : tag = 'h2'
|
if pclass[3:7] == 'ch2-' : tag = 'h2'
|
||||||
if pclass[3:7] == 'ch3-' : tag = 'h3'
|
if pclass[3:7] == 'ch3-' : tag = 'h3'
|
||||||
htmlpage += '<' + tag + ' class="' + pclass + '">'
|
hlst.append('<' + tag + ' class="' + pclass + '">')
|
||||||
else:
|
else:
|
||||||
htmlpage += '<' + tag + '>'
|
hlst.append('<' + tag + '>')
|
||||||
htmlpage += self.buildParagraph(pclass, pdesc, 'middle', regtype)
|
hlst.append(self.buildParagraph(pclass, pdesc, 'middle', regtype))
|
||||||
htmlpage += '</' + tag + '>'
|
hlst.append('</' + tag + '>')
|
||||||
|
|
||||||
elif (regtype == 'text') or (regtype == 'fixed') or (regtype == 'insert') or (regtype == 'listitem'):
|
elif (regtype == 'text') or (regtype == 'fixed') or (regtype == 'insert') or (regtype == 'listitem'):
|
||||||
ptype = 'full'
|
ptype = 'full'
|
||||||
@@ -632,11 +710,11 @@ class DocParser(object):
|
|||||||
if pclass[3:6] == 'h1-' : tag = 'h4'
|
if pclass[3:6] == 'h1-' : tag = 'h4'
|
||||||
if pclass[3:6] == 'h2-' : tag = 'h5'
|
if pclass[3:6] == 'h2-' : tag = 'h5'
|
||||||
if pclass[3:6] == 'h3-' : tag = 'h6'
|
if pclass[3:6] == 'h3-' : tag = 'h6'
|
||||||
htmlpage += '<' + tag + ' class="' + pclass + '">'
|
hlst.append('<' + tag + ' class="' + pclass + '">')
|
||||||
htmlpage += self.buildParagraph(pclass, pdesc, 'middle', regtype)
|
hlst.append(self.buildParagraph(pclass, pdesc, 'middle', regtype))
|
||||||
htmlpage += '</' + tag + '>'
|
hlst.append('</' + tag + '>')
|
||||||
else :
|
else :
|
||||||
htmlpage += self.buildParagraph(pclass, pdesc, ptype, regtype)
|
hlst.append(self.buildParagraph(pclass, pdesc, ptype, regtype))
|
||||||
|
|
||||||
elif (regtype == 'tocentry') :
|
elif (regtype == 'tocentry') :
|
||||||
ptype = 'full'
|
ptype = 'full'
|
||||||
@@ -644,8 +722,8 @@ class DocParser(object):
|
|||||||
ptype = 'end'
|
ptype = 'end'
|
||||||
first_para_continued = False
|
first_para_continued = False
|
||||||
(pclass, pdesc) = self.getParaDescription(start,end, regtype)
|
(pclass, pdesc) = self.getParaDescription(start,end, regtype)
|
||||||
htmlpage += self.buildParagraph(pclass, pdesc, ptype, regtype)
|
tocinfo += self.buildTOCEntry(pdesc)
|
||||||
|
hlst.append(self.buildParagraph(pclass, pdesc, ptype, regtype))
|
||||||
|
|
||||||
elif (regtype == 'vertical') or (regtype == 'table') :
|
elif (regtype == 'vertical') or (regtype == 'table') :
|
||||||
ptype = 'full'
|
ptype = 'full'
|
||||||
@@ -655,13 +733,13 @@ class DocParser(object):
|
|||||||
ptype = 'end'
|
ptype = 'end'
|
||||||
first_para_continued = False
|
first_para_continued = False
|
||||||
(pclass, pdesc) = self.getParaDescription(start, end, regtype)
|
(pclass, pdesc) = self.getParaDescription(start, end, regtype)
|
||||||
htmlpage += self.buildParagraph(pclass, pdesc, ptype, regtype)
|
hlst.append(self.buildParagraph(pclass, pdesc, ptype, regtype))
|
||||||
|
|
||||||
|
|
||||||
elif (regtype == 'synth_fcvr.center'):
|
elif (regtype == 'synth_fcvr.center'):
|
||||||
(pos, simgsrc) = self.findinDoc('img.src',start,end)
|
(pos, simgsrc) = self.findinDoc('img.src',start,end)
|
||||||
if simgsrc:
|
if simgsrc:
|
||||||
htmlpage += '<div class="graphic"><img src="img/img%04d.jpg" alt="" /></div>' % int(simgsrc)
|
hlst.append('<div class="graphic"><img src="img/img%04d.jpg" alt="" /></div>' % int(simgsrc))
|
||||||
|
|
||||||
else :
|
else :
|
||||||
print ' Making region type', regtype,
|
print ' Making region type', regtype,
|
||||||
@@ -687,29 +765,29 @@ class DocParser(object):
|
|||||||
if pclass[3:6] == 'h1-' : tag = 'h4'
|
if pclass[3:6] == 'h1-' : tag = 'h4'
|
||||||
if pclass[3:6] == 'h2-' : tag = 'h5'
|
if pclass[3:6] == 'h2-' : tag = 'h5'
|
||||||
if pclass[3:6] == 'h3-' : tag = 'h6'
|
if pclass[3:6] == 'h3-' : tag = 'h6'
|
||||||
htmlpage += '<' + tag + ' class="' + pclass + '">'
|
hlst.append('<' + tag + ' class="' + pclass + '">')
|
||||||
htmlpage += self.buildParagraph(pclass, pdesc, 'middle', regtype)
|
hlst.append(self.buildParagraph(pclass, pdesc, 'middle', regtype))
|
||||||
htmlpage += '</' + tag + '>'
|
hlst.append('</' + tag + '>')
|
||||||
else :
|
else :
|
||||||
htmlpage += self.buildParagraph(pclass, pdesc, ptype, regtype)
|
hlst.append(self.buildParagraph(pclass, pdesc, ptype, regtype))
|
||||||
else :
|
else :
|
||||||
print ' a "graphic" region'
|
print ' a "graphic" region'
|
||||||
(pos, simgsrc) = self.findinDoc('img.src',start,end)
|
(pos, simgsrc) = self.findinDoc('img.src',start,end)
|
||||||
if simgsrc:
|
if simgsrc:
|
||||||
htmlpage += '<div class="graphic"><img src="img/img%04d.jpg" alt="" /></div>' % int(simgsrc)
|
hlst.append('<div class="graphic"><img src="img/img%04d.jpg" alt="" /></div>' % int(simgsrc))
|
||||||
|
|
||||||
|
|
||||||
|
htmlpage = "".join(hlst)
|
||||||
if last_para_continued :
|
if last_para_continued :
|
||||||
if htmlpage[-4:] == '</p>':
|
if htmlpage[-4:] == '</p>':
|
||||||
htmlpage = htmlpage[0:-4]
|
htmlpage = htmlpage[0:-4]
|
||||||
last_para_continued = False
|
last_para_continued = False
|
||||||
|
|
||||||
return htmlpage
|
return htmlpage, tocinfo
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def convert2HTML(flatxml, classlst, fileid, bookDir, gdict, fixedimage):
|
def convert2HTML(flatxml, classlst, fileid, bookDir, gdict, fixedimage):
|
||||||
# create a document parser
|
# create a document parser
|
||||||
dp = DocParser(flatxml, classlst, fileid, bookDir, gdict, fixedimage)
|
dp = DocParser(flatxml, classlst, fileid, bookDir, gdict, fixedimage)
|
||||||
htmlpage = dp.process()
|
htmlpage, tocinfo = dp.process()
|
||||||
return htmlpage
|
return htmlpage, tocinfo
|
||||||
|
|||||||
@@ -10,17 +10,94 @@ from struct import unpack
|
|||||||
|
|
||||||
|
|
||||||
class PParser(object):
|
class PParser(object):
|
||||||
def __init__(self, gd, flatxml):
|
def __init__(self, gd, flatxml, meta_array):
|
||||||
self.gd = gd
|
self.gd = gd
|
||||||
self.flatdoc = flatxml.split('\n')
|
self.flatdoc = flatxml.split('\n')
|
||||||
|
self.docSize = len(self.flatdoc)
|
||||||
self.temp = []
|
self.temp = []
|
||||||
foo = self.getData('page.h') or self.getData('book.h')
|
|
||||||
self.ph = foo[0]
|
self.ph = -1
|
||||||
foo = self.getData('page.w') or self.getData('book.w')
|
self.pw = -1
|
||||||
self.pw = foo[0]
|
startpos = self.posinDoc('page.h') or self.posinDoc('book.h')
|
||||||
self.gx = self.getData('info.glyph.x')
|
for p in startpos:
|
||||||
self.gy = self.getData('info.glyph.y')
|
(name, argres) = self.lineinDoc(p)
|
||||||
self.gid = self.getData('info.glyph.glyphID')
|
self.ph = max(self.ph, int(argres))
|
||||||
|
startpos = self.posinDoc('page.w') or self.posinDoc('book.w')
|
||||||
|
for p in startpos:
|
||||||
|
(name, argres) = self.lineinDoc(p)
|
||||||
|
self.pw = max(self.pw, int(argres))
|
||||||
|
|
||||||
|
if self.ph <= 0:
|
||||||
|
self.ph = int(meta_array.get('pageHeight', '11000'))
|
||||||
|
if self.pw <= 0:
|
||||||
|
self.pw = int(meta_array.get('pageWidth', '8500'))
|
||||||
|
|
||||||
|
res = []
|
||||||
|
startpos = self.posinDoc('info.glyph.x')
|
||||||
|
for p in startpos:
|
||||||
|
argres = self.getDataatPos('info.glyph.x', p)
|
||||||
|
res.extend(argres)
|
||||||
|
self.gx = res
|
||||||
|
|
||||||
|
res = []
|
||||||
|
startpos = self.posinDoc('info.glyph.y')
|
||||||
|
for p in startpos:
|
||||||
|
argres = self.getDataatPos('info.glyph.y', p)
|
||||||
|
res.extend(argres)
|
||||||
|
self.gy = res
|
||||||
|
|
||||||
|
res = []
|
||||||
|
startpos = self.posinDoc('info.glyph.glyphID')
|
||||||
|
for p in startpos:
|
||||||
|
argres = self.getDataatPos('info.glyph.glyphID', p)
|
||||||
|
res.extend(argres)
|
||||||
|
self.gid = res
|
||||||
|
|
||||||
|
|
||||||
|
# return tag at line pos in document
|
||||||
|
def lineinDoc(self, pos) :
|
||||||
|
if (pos >= 0) and (pos < self.docSize) :
|
||||||
|
item = self.flatdoc[pos]
|
||||||
|
if item.find('=') >= 0:
|
||||||
|
(name, argres) = item.split('=',1)
|
||||||
|
else :
|
||||||
|
name = item
|
||||||
|
argres = ''
|
||||||
|
return name, argres
|
||||||
|
|
||||||
|
# find tag in doc if within pos to end inclusive
|
||||||
|
def findinDoc(self, tagpath, pos, end) :
|
||||||
|
result = None
|
||||||
|
if end == -1 :
|
||||||
|
end = self.docSize
|
||||||
|
else:
|
||||||
|
end = min(self.docSize, end)
|
||||||
|
foundat = -1
|
||||||
|
for j in xrange(pos, end):
|
||||||
|
item = self.flatdoc[j]
|
||||||
|
if item.find('=') >= 0:
|
||||||
|
(name, argres) = item.split('=',1)
|
||||||
|
else :
|
||||||
|
name = item
|
||||||
|
argres = ''
|
||||||
|
if name.endswith(tagpath) :
|
||||||
|
result = argres
|
||||||
|
foundat = j
|
||||||
|
break
|
||||||
|
return foundat, result
|
||||||
|
|
||||||
|
# return list of start positions for the tagpath
|
||||||
|
def posinDoc(self, tagpath):
|
||||||
|
startpos = []
|
||||||
|
pos = 0
|
||||||
|
res = ""
|
||||||
|
while res != None :
|
||||||
|
(foundpos, res) = self.findinDoc(tagpath, pos, -1)
|
||||||
|
if res != None :
|
||||||
|
startpos.append(foundpos)
|
||||||
|
pos = foundpos + 1
|
||||||
|
return startpos
|
||||||
|
|
||||||
def getData(self, path):
|
def getData(self, path):
|
||||||
result = None
|
result = None
|
||||||
cnt = len(self.flatdoc)
|
cnt = len(self.flatdoc)
|
||||||
@@ -39,6 +116,23 @@ class PParser(object):
|
|||||||
for j in xrange(0,len(argres)):
|
for j in xrange(0,len(argres)):
|
||||||
argres[j] = int(argres[j])
|
argres[j] = int(argres[j])
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
def getDataatPos(self, path, pos):
|
||||||
|
result = None
|
||||||
|
item = self.flatdoc[pos]
|
||||||
|
if item.find('=') >= 0:
|
||||||
|
(name, argt) = item.split('=')
|
||||||
|
argres = argt.split('|')
|
||||||
|
else:
|
||||||
|
name = item
|
||||||
|
argres = []
|
||||||
|
if (len(argres) > 0) :
|
||||||
|
for j in xrange(0,len(argres)):
|
||||||
|
argres[j] = int(argres[j])
|
||||||
|
if (name.endswith(path)):
|
||||||
|
result = argres
|
||||||
|
return result
|
||||||
|
|
||||||
def getDataTemp(self, path):
|
def getDataTemp(self, path):
|
||||||
result = None
|
result = None
|
||||||
cnt = len(self.temp)
|
cnt = len(self.temp)
|
||||||
@@ -58,6 +152,7 @@ class PParser(object):
|
|||||||
for j in xrange(0,len(argres)):
|
for j in xrange(0,len(argres)):
|
||||||
argres[j] = int(argres[j])
|
argres[j] = int(argres[j])
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def getImages(self):
|
def getImages(self):
|
||||||
result = []
|
result = []
|
||||||
self.temp = self.flatdoc
|
self.temp = self.flatdoc
|
||||||
@@ -69,6 +164,7 @@ class PParser(object):
|
|||||||
src = self.getDataTemp('img.src')[0]
|
src = self.getDataTemp('img.src')[0]
|
||||||
result.append('<image xlink:href="../img/img%04d.jpg" x="%d" y="%d" width="%d" height="%d" />\n' % (src, x, y, w, h))
|
result.append('<image xlink:href="../img/img%04d.jpg" x="%d" y="%d" width="%d" height="%d" />\n' % (src, x, y, w, h))
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def getGlyphs(self):
|
def getGlyphs(self):
|
||||||
result = []
|
result = []
|
||||||
if (self.gid != None) and (len(self.gid) > 0):
|
if (self.gid != None) and (len(self.gid) > 0):
|
||||||
@@ -84,68 +180,70 @@ class PParser(object):
|
|||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def convert2SVG(gdict, flat_xml, counter, numfiles, svgDir, raw, meta_array, scaledpi):
|
def convert2SVG(gdict, flat_xml, pageid, previd, nextid, svgDir, raw, meta_array, scaledpi):
|
||||||
ml = ''
|
mlst = []
|
||||||
pp = PParser(gdict, flat_xml)
|
pp = PParser(gdict, flat_xml, meta_array)
|
||||||
ml += '<?xml version="1.0" standalone="no"?>\n'
|
mlst.append('<?xml version="1.0" standalone="no"?>\n')
|
||||||
if (raw):
|
if (raw):
|
||||||
ml += '<!DOCTYPE svg PUBLIC "-//W3C/DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">\n'
|
mlst.append('<!DOCTYPE svg PUBLIC "-//W3C/DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">\n')
|
||||||
ml += '<svg width="%fin" height="%fin" viewBox="0 0 %d %d" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1">\n' % (pp.pw / scaledpi, pp.ph / scaledpi, pp.pw -1, pp.ph -1)
|
mlst.append('<svg width="%fin" height="%fin" viewBox="0 0 %d %d" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1">\n' % (pp.pw / scaledpi, pp.ph / scaledpi, pp.pw -1, pp.ph -1))
|
||||||
ml += '<title>Page %d - %s by %s</title>\n' % (counter, meta_array['Title'],meta_array['Authors'])
|
mlst.append('<title>Page %d - %s by %s</title>\n' % (pageid, meta_array['Title'],meta_array['Authors']))
|
||||||
else:
|
else:
|
||||||
ml += '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n'
|
mlst.append('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n')
|
||||||
ml += '<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" ><head>\n'
|
mlst.append('<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" ><head>\n')
|
||||||
ml += '<title>Page %d - %s by %s</title>\n' % (counter, meta_array['Title'],meta_array['Authors'])
|
mlst.append('<title>Page %d - %s by %s</title>\n' % (pageid, meta_array['Title'],meta_array['Authors']))
|
||||||
ml += '<script><![CDATA[\n'
|
mlst.append('<script><![CDATA[\n')
|
||||||
ml += 'function gd(){var p=window.location.href.replace(/^.*\?dpi=(\d+).*$/i,"$1");return p;}\n'
|
mlst.append('function gd(){var p=window.location.href.replace(/^.*\?dpi=(\d+).*$/i,"$1");return p;}\n')
|
||||||
ml += 'var dpi=%d;\n' % scaledpi
|
mlst.append('var dpi=%d;\n' % scaledpi)
|
||||||
if (counter) :
|
if (previd) :
|
||||||
ml += 'var prevpage="page%04d.xhtml";\n' % (counter - 1)
|
mlst.append('var prevpage="page%04d.xhtml";\n' % (previd))
|
||||||
if (counter < numfiles-1) :
|
if (nextid) :
|
||||||
ml += 'var nextpage="page%04d.xhtml";\n' % (counter + 1)
|
mlst.append('var nextpage="page%04d.xhtml";\n' % (nextid))
|
||||||
ml += 'var pw=%d;var ph=%d;' % (pp.pw, pp.ph)
|
mlst.append('var pw=%d;var ph=%d;' % (pp.pw, pp.ph))
|
||||||
ml += 'function zoomin(){dpi=dpi*(0.8);setsize();}\n'
|
mlst.append('function zoomin(){dpi=dpi*(0.8);setsize();}\n')
|
||||||
ml += 'function zoomout(){dpi=dpi*1.25;setsize();}\n'
|
mlst.append('function zoomout(){dpi=dpi*1.25;setsize();}\n')
|
||||||
ml += 'function setsize(){var svg=document.getElementById("svgimg");var prev=document.getElementById("prevsvg");var next=document.getElementById("nextsvg");var width=(pw/dpi)+"in";var height=(ph/dpi)+"in";svg.setAttribute("width",width);svg.setAttribute("height",height);prev.setAttribute("height",height);prev.setAttribute("width","50px");next.setAttribute("height",height);next.setAttribute("width","50px");}\n'
|
mlst.append('function setsize(){var svg=document.getElementById("svgimg");var prev=document.getElementById("prevsvg");var next=document.getElementById("nextsvg");var width=(pw/dpi)+"in";var height=(ph/dpi)+"in";svg.setAttribute("width",width);svg.setAttribute("height",height);prev.setAttribute("height",height);prev.setAttribute("width","50px");next.setAttribute("height",height);next.setAttribute("width","50px");}\n')
|
||||||
ml += 'function ppage(){window.location.href=prevpage+"?dpi="+Math.round(dpi);}\n'
|
mlst.append('function ppage(){window.location.href=prevpage+"?dpi="+Math.round(dpi);}\n')
|
||||||
ml += 'function npage(){window.location.href=nextpage+"?dpi="+Math.round(dpi);}\n'
|
mlst.append('function npage(){window.location.href=nextpage+"?dpi="+Math.round(dpi);}\n')
|
||||||
ml += 'var gt=gd();if(gt>0){dpi=gt;}\n'
|
mlst.append('var gt=gd();if(gt>0){dpi=gt;}\n')
|
||||||
ml += 'window.onload=setsize;\n'
|
mlst.append('window.onload=setsize;\n')
|
||||||
ml += ']]></script>\n'
|
mlst.append(']]></script>\n')
|
||||||
ml += '</head>\n'
|
mlst.append('</head>\n')
|
||||||
ml += '<body onLoad="setsize();" style="background-color:#777;text-align:center;">\n'
|
mlst.append('<body onLoad="setsize();" style="background-color:#777;text-align:center;">\n')
|
||||||
ml += '<div style="white-space:nowrap;">\n'
|
mlst.append('<div style="white-space:nowrap;">\n')
|
||||||
if (counter == 0) :
|
if previd == None:
|
||||||
ml += '<a href="javascript:ppage();"><svg id="prevsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"></svg></a>\n'
|
mlst.append('<a href="javascript:ppage();"><svg id="prevsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"></svg></a>\n')
|
||||||
else:
|
else:
|
||||||
ml += '<a href="javascript:ppage();"><svg id="prevsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"><polygon points="5,150,95,5,95,295" fill="#AAAAAA" /></svg></a>\n'
|
mlst.append('<a href="javascript:ppage();"><svg id="prevsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"><polygon points="5,150,95,5,95,295" fill="#AAAAAA" /></svg></a>\n')
|
||||||
ml += '<a href="javascript:npage();"><svg id="svgimg" viewBox="0 0 %d %d" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" style="background-color:#FFF;border:1px solid black;">' % (pp.pw, pp.ph)
|
|
||||||
|
mlst.append('<a href="javascript:npage();"><svg id="svgimg" viewBox="0 0 %d %d" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" style="background-color:#FFF;border:1px solid black;">' % (pp.pw, pp.ph))
|
||||||
if (pp.gid != None):
|
if (pp.gid != None):
|
||||||
ml += '<defs>\n'
|
mlst.append('<defs>\n')
|
||||||
gdefs = pp.getGlyphs()
|
gdefs = pp.getGlyphs()
|
||||||
for j in xrange(0,len(gdefs)):
|
for j in xrange(0,len(gdefs)):
|
||||||
ml += gdefs[j]
|
mlst.append(gdefs[j])
|
||||||
ml += '</defs>\n'
|
mlst.append('</defs>\n')
|
||||||
img = pp.getImages()
|
img = pp.getImages()
|
||||||
if (img != None):
|
if (img != None):
|
||||||
for j in xrange(0,len(img)):
|
for j in xrange(0,len(img)):
|
||||||
ml += img[j]
|
mlst.append(img[j])
|
||||||
if (pp.gid != None):
|
if (pp.gid != None):
|
||||||
for j in xrange(0,len(pp.gid)):
|
for j in xrange(0,len(pp.gid)):
|
||||||
ml += '<use xlink:href="#gl%d" x="%d" y="%d" />\n' % (pp.gid[j], pp.gx[j], pp.gy[j])
|
mlst.append('<use xlink:href="#gl%d" x="%d" y="%d" />\n' % (pp.gid[j], pp.gx[j], pp.gy[j]))
|
||||||
if (img == None or len(img) == 0) and (pp.gid == None or len(pp.gid) == 0):
|
if (img == None or len(img) == 0) and (pp.gid == None or len(pp.gid) == 0):
|
||||||
ml += '<text x="10" y="10" font-family="Helvetica" font-size="100" stroke="black">This page intentionally left blank.</text>\n<text x="10" y="110" font-family="Helvetica" font-size="50" stroke="black">Until this notice unintentionally gave it content. (gensvg.py)</text>\n'
|
xpos = "%d" % (pp.pw // 3)
|
||||||
|
ypos = "%d" % (pp.ph // 3)
|
||||||
|
mlst.append('<text x="' + xpos + '" y="' + ypos + '" font-size="' + meta_array['fontSize'] + '" font-family="Helvetica" stroke="black">This page intentionally left blank.</text>\n')
|
||||||
if (raw) :
|
if (raw) :
|
||||||
ml += '</svg>'
|
mlst.append('</svg>')
|
||||||
else :
|
else :
|
||||||
ml += '</svg></a>\n'
|
mlst.append('</svg></a>\n')
|
||||||
if (counter == numfiles - 1) :
|
if nextid == None:
|
||||||
ml += '<a href="javascript:npage();"><svg id="nextsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"></svg></a>\n'
|
mlst.append('<a href="javascript:npage();"><svg id="nextsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"></svg></a>\n')
|
||||||
else :
|
else :
|
||||||
ml += '<a href="javascript:npage();"><svg id="nextsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"><polygon points="5,5,5,295,95,150" fill="#AAAAAA" /></svg></a>\n'
|
mlst.append('<a href="javascript:npage();"><svg id="nextsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"><polygon points="5,5,5,295,95,150" fill="#AAAAAA" /></svg></a>\n')
|
||||||
ml += '</div>\n'
|
mlst.append('</div>\n')
|
||||||
ml += '<div><a href="javascript:zoomin();">zoom in</a> - <a href="javascript:zoomout();">zoom out</a></div>\n'
|
mlst.append('<div><a href="javascript:zoomin();">zoom in</a> - <a href="javascript:zoomout();">zoom out</a></div>\n')
|
||||||
ml += '</body>\n'
|
mlst.append('</body>\n')
|
||||||
ml += '</html>\n'
|
mlst.append('</html>\n')
|
||||||
return ml
|
return "".join(mlst)
|
||||||
|
|
||||||
|
|||||||
@@ -19,6 +19,8 @@ import getopt
|
|||||||
from struct import pack
|
from struct import pack
|
||||||
from struct import unpack
|
from struct import unpack
|
||||||
|
|
||||||
|
class TpzDRMError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
# local support routines
|
# local support routines
|
||||||
if 'calibre' in sys.modules:
|
if 'calibre' in sys.modules:
|
||||||
@@ -37,6 +39,8 @@ else :
|
|||||||
import flatxml2svg
|
import flatxml2svg
|
||||||
import stylexml2css
|
import stylexml2css
|
||||||
|
|
||||||
|
# global switch
|
||||||
|
buildXML = False
|
||||||
|
|
||||||
# Get a 7 bit encoded number from a file
|
# Get a 7 bit encoded number from a file
|
||||||
def readEncodedNumber(file):
|
def readEncodedNumber(file):
|
||||||
@@ -114,7 +118,8 @@ class Dictionary(object):
|
|||||||
return self.stable[self.pos]
|
return self.stable[self.pos]
|
||||||
else:
|
else:
|
||||||
print "Error - %d outside of string table limits" % val
|
print "Error - %d outside of string table limits" % val
|
||||||
sys.exit(-1)
|
raise TpzDRMError('outside or string table limits')
|
||||||
|
# sys.exit(-1)
|
||||||
def getSize(self):
|
def getSize(self):
|
||||||
return self.size
|
return self.size
|
||||||
def getPos(self):
|
def getPos(self):
|
||||||
@@ -295,6 +300,7 @@ def generateBook(bookDir, raw, fixedimage):
|
|||||||
if not os.path.exists(svgDir) :
|
if not os.path.exists(svgDir) :
|
||||||
os.makedirs(svgDir)
|
os.makedirs(svgDir)
|
||||||
|
|
||||||
|
if buildXML:
|
||||||
xmlDir = os.path.join(bookDir,'xml')
|
xmlDir = os.path.join(bookDir,'xml')
|
||||||
if not os.path.exists(xmlDir) :
|
if not os.path.exists(xmlDir) :
|
||||||
os.makedirs(xmlDir)
|
os.makedirs(xmlDir)
|
||||||
@@ -345,23 +351,38 @@ def generateBook(bookDir, raw, fixedimage):
|
|||||||
authors = authors.replace('>','>')
|
authors = authors.replace('>','>')
|
||||||
meta_array['Authors'] = authors
|
meta_array['Authors'] = authors
|
||||||
|
|
||||||
|
if buildXML:
|
||||||
xname = os.path.join(xmlDir, 'metadata.xml')
|
xname = os.path.join(xmlDir, 'metadata.xml')
|
||||||
metastr = ''
|
mlst = []
|
||||||
for key in meta_array:
|
for key in meta_array:
|
||||||
metastr += '<meta name="' + key + '" content="' + meta_array[key] + '" />\n'
|
mlst.append('<meta name="' + key + '" content="' + meta_array[key] + '" />\n')
|
||||||
|
metastr = "".join(mlst)
|
||||||
|
mlst = None
|
||||||
file(xname, 'wb').write(metastr)
|
file(xname, 'wb').write(metastr)
|
||||||
|
|
||||||
print 'Processing StyleSheet'
|
print 'Processing StyleSheet'
|
||||||
|
|
||||||
# get some scaling info from metadata to use while processing styles
|
# get some scaling info from metadata to use while processing styles
|
||||||
|
# and first page info
|
||||||
|
|
||||||
fontsize = '135'
|
fontsize = '135'
|
||||||
if 'fontSize' in meta_array:
|
if 'fontSize' in meta_array:
|
||||||
fontsize = meta_array['fontSize']
|
fontsize = meta_array['fontSize']
|
||||||
|
|
||||||
# also get the size of a normal text page
|
# also get the size of a normal text page
|
||||||
|
# get the total number of pages unpacked as a safety check
|
||||||
|
filenames = os.listdir(pageDir)
|
||||||
|
numfiles = len(filenames)
|
||||||
|
|
||||||
spage = '1'
|
spage = '1'
|
||||||
if 'firstTextPage' in meta_array:
|
if 'firstTextPage' in meta_array:
|
||||||
spage = meta_array['firstTextPage']
|
spage = meta_array['firstTextPage']
|
||||||
pnum = int(spage)
|
pnum = int(spage)
|
||||||
|
if pnum >= numfiles or pnum < 0:
|
||||||
|
# metadata is wrong so just select a page near the front
|
||||||
|
# 10% of the book to get a normal text page
|
||||||
|
pnum = int(0.10 * numfiles)
|
||||||
|
# print "first normal text page is", spage
|
||||||
|
|
||||||
# get page height and width from first text page for use in stylesheet scaling
|
# get page height and width from first text page for use in stylesheet scaling
|
||||||
pname = 'page%04d.dat' % (pnum + 1)
|
pname = 'page%04d.dat' % (pnum + 1)
|
||||||
@@ -371,12 +392,37 @@ def generateBook(bookDir, raw, fixedimage):
|
|||||||
(ph, pw) = getPageDim(flat_xml)
|
(ph, pw) = getPageDim(flat_xml)
|
||||||
if (ph == '-1') or (ph == '0') : ph = '11000'
|
if (ph == '-1') or (ph == '0') : ph = '11000'
|
||||||
if (pw == '-1') or (pw == '0') : pw = '8500'
|
if (pw == '-1') or (pw == '0') : pw = '8500'
|
||||||
|
meta_array['pageHeight'] = ph
|
||||||
|
meta_array['pageWidth'] = pw
|
||||||
|
if 'fontSize' not in meta_array.keys():
|
||||||
|
meta_array['fontSize'] = fontsize
|
||||||
|
|
||||||
# print ' ', 'other0000.dat'
|
# process other.dat for css info and for map of page files to svg images
|
||||||
|
# this map is needed because some pages actually are made up of multiple
|
||||||
|
# pageXXXX.xml files
|
||||||
xname = os.path.join(bookDir, 'style.css')
|
xname = os.path.join(bookDir, 'style.css')
|
||||||
flat_xml = convert2xml.fromData(dict, otherFile)
|
flat_xml = convert2xml.fromData(dict, otherFile)
|
||||||
|
|
||||||
|
# extract info.original.pid to get original page information
|
||||||
|
pageIDMap = {}
|
||||||
|
pageidnums = stylexml2css.getpageIDMap(flat_xml)
|
||||||
|
if len(pageidnums) == 0:
|
||||||
|
filenames = os.listdir(pageDir)
|
||||||
|
numfiles = len(filenames)
|
||||||
|
for k in range(numfiles):
|
||||||
|
pageidnums.append(k)
|
||||||
|
# create a map from page ids to list of page file nums to process for that page
|
||||||
|
for i in range(len(pageidnums)):
|
||||||
|
id = pageidnums[i]
|
||||||
|
if id in pageIDMap.keys():
|
||||||
|
pageIDMap[id].append(i)
|
||||||
|
else:
|
||||||
|
pageIDMap[id] = [i]
|
||||||
|
|
||||||
|
# now get the css info
|
||||||
cssstr , classlst = stylexml2css.convert2CSS(flat_xml, fontsize, ph, pw)
|
cssstr , classlst = stylexml2css.convert2CSS(flat_xml, fontsize, ph, pw)
|
||||||
file(xname, 'wb').write(cssstr)
|
file(xname, 'wb').write(cssstr)
|
||||||
|
if buildXML:
|
||||||
xname = os.path.join(xmlDir, 'other0000.xml')
|
xname = os.path.join(xmlDir, 'other0000.xml')
|
||||||
file(xname, 'wb').write(convert2xml.getXML(dict, otherFile))
|
file(xname, 'wb').write(convert2xml.getXML(dict, otherFile))
|
||||||
|
|
||||||
@@ -398,6 +444,7 @@ def generateBook(bookDir, raw, fixedimage):
|
|||||||
fname = os.path.join(glyphsDir,filename)
|
fname = os.path.join(glyphsDir,filename)
|
||||||
flat_xml = convert2xml.fromData(dict, fname)
|
flat_xml = convert2xml.fromData(dict, fname)
|
||||||
|
|
||||||
|
if buildXML:
|
||||||
xname = os.path.join(xmlDir, filename.replace('.dat','.xml'))
|
xname = os.path.join(xmlDir, filename.replace('.dat','.xml'))
|
||||||
file(xname, 'wb').write(convert2xml.getXML(dict, fname))
|
file(xname, 'wb').write(convert2xml.getXML(dict, fname))
|
||||||
|
|
||||||
@@ -414,108 +461,188 @@ def generateBook(bookDir, raw, fixedimage):
|
|||||||
glyfile.close()
|
glyfile.close()
|
||||||
print " "
|
print " "
|
||||||
|
|
||||||
|
|
||||||
# start up the html
|
# start up the html
|
||||||
|
# also build up tocentries while processing html
|
||||||
htmlFileName = "book.html"
|
htmlFileName = "book.html"
|
||||||
htmlstr = '<?xml version="1.0" encoding="utf-8"?>\n'
|
hlst = []
|
||||||
htmlstr += '<!DOCTYPE HTML PUBLIC "-//W3C//DTD XHTML 1.1 Strict//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11-strict.dtd">\n'
|
hlst.append('<?xml version="1.0" encoding="utf-8"?>\n')
|
||||||
htmlstr += '<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en">\n'
|
hlst.append('<!DOCTYPE HTML PUBLIC "-//W3C//DTD XHTML 1.1 Strict//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11-strict.dtd">\n')
|
||||||
htmlstr += '<head>\n'
|
hlst.append('<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en">\n')
|
||||||
htmlstr += '<meta http-equiv="content-type" content="text/html; charset=utf-8"/>\n'
|
hlst.append('<head>\n')
|
||||||
htmlstr += '<title>' + meta_array['Title'] + ' by ' + meta_array['Authors'] + '</title>\n'
|
hlst.append('<meta http-equiv="content-type" content="text/html; charset=utf-8"/>\n')
|
||||||
htmlstr += '<meta name="Author" content="' + meta_array['Authors'] + '" />\n'
|
hlst.append('<title>' + meta_array['Title'] + ' by ' + meta_array['Authors'] + '</title>\n')
|
||||||
htmlstr += '<meta name="Title" content="' + meta_array['Title'] + '" />\n'
|
hlst.append('<meta name="Author" content="' + meta_array['Authors'] + '" />\n')
|
||||||
|
hlst.append('<meta name="Title" content="' + meta_array['Title'] + '" />\n')
|
||||||
if 'ASIN' in meta_array:
|
if 'ASIN' in meta_array:
|
||||||
htmlstr += '<meta name="ASIN" content="' + meta_array['ASIN'] + '" />\n'
|
hlst.append('<meta name="ASIN" content="' + meta_array['ASIN'] + '" />\n')
|
||||||
if 'GUID' in meta_array:
|
if 'GUID' in meta_array:
|
||||||
htmlstr += '<meta name="GUID" content="' + meta_array['GUID'] + '" />\n'
|
hlst.append('<meta name="GUID" content="' + meta_array['GUID'] + '" />\n')
|
||||||
htmlstr += '<link href="style.css" rel="stylesheet" type="text/css" />\n'
|
hlst.append('<link href="style.css" rel="stylesheet" type="text/css" />\n')
|
||||||
htmlstr += '</head>\n<body>\n'
|
hlst.append('</head>\n<body>\n')
|
||||||
|
|
||||||
print 'Processing Pages'
|
print 'Processing Pages'
|
||||||
# Books are at 1440 DPI. This is rendering at twice that size for
|
# Books are at 1440 DPI. This is rendering at twice that size for
|
||||||
# readability when rendering to the screen.
|
# readability when rendering to the screen.
|
||||||
scaledpi = 1440.0
|
scaledpi = 1440.0
|
||||||
|
|
||||||
svgindex = '<?xml version="1.0" encoding="utf-8"?>\n'
|
|
||||||
svgindex += '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n'
|
|
||||||
svgindex += '<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" >'
|
|
||||||
svgindex += '<head>\n'
|
|
||||||
svgindex += '<title>' + meta_array['Title'] + '</title>\n'
|
|
||||||
svgindex += '<meta name="Author" content="' + meta_array['Authors'] + '" />\n'
|
|
||||||
svgindex += '<meta name="Title" content="' + meta_array['Title'] + '" />\n'
|
|
||||||
if 'ASIN' in meta_array:
|
|
||||||
svgindex += '<meta name="ASIN" content="' + meta_array['ASIN'] + '" />\n'
|
|
||||||
if 'GUID' in meta_array:
|
|
||||||
svgindex += '<meta name="GUID" content="' + meta_array['GUID'] + '" />\n'
|
|
||||||
svgindex += '</head>\n'
|
|
||||||
svgindex += '<body>\n'
|
|
||||||
|
|
||||||
filenames = os.listdir(pageDir)
|
filenames = os.listdir(pageDir)
|
||||||
filenames = sorted(filenames)
|
filenames = sorted(filenames)
|
||||||
numfiles = len(filenames)
|
numfiles = len(filenames)
|
||||||
counter = 0
|
|
||||||
|
xmllst = []
|
||||||
|
elst = []
|
||||||
|
|
||||||
for filename in filenames:
|
for filename in filenames:
|
||||||
# print ' ', filename
|
# print ' ', filename
|
||||||
print ".",
|
print ".",
|
||||||
|
|
||||||
fname = os.path.join(pageDir,filename)
|
fname = os.path.join(pageDir,filename)
|
||||||
flat_xml = convert2xml.fromData(dict, fname)
|
flat_xml = convert2xml.fromData(dict, fname)
|
||||||
|
|
||||||
|
# keep flat_xml for later svg processing
|
||||||
|
xmllst.append(flat_xml)
|
||||||
|
|
||||||
|
if buildXML:
|
||||||
xname = os.path.join(xmlDir, filename.replace('.dat','.xml'))
|
xname = os.path.join(xmlDir, filename.replace('.dat','.xml'))
|
||||||
file(xname, 'wb').write(convert2xml.getXML(dict, fname))
|
file(xname, 'wb').write(convert2xml.getXML(dict, fname))
|
||||||
|
|
||||||
# first get the html
|
# first get the html
|
||||||
htmlstr += flatxml2html.convert2HTML(flat_xml, classlst, fname, bookDir, gd, fixedimage)
|
pagehtml, tocinfo = flatxml2html.convert2HTML(flat_xml, classlst, fname, bookDir, gd, fixedimage)
|
||||||
|
elst.append(tocinfo)
|
||||||
|
hlst.append(pagehtml)
|
||||||
|
|
||||||
# now get the svg image of the page
|
# finish up the html string and output it
|
||||||
svgxml = flatxml2svg.convert2SVG(gd, flat_xml, counter, numfiles, svgDir, raw, meta_array, scaledpi)
|
hlst.append('</body>\n</html>\n')
|
||||||
|
htmlstr = "".join(hlst)
|
||||||
|
hlst = None
|
||||||
|
file(os.path.join(bookDir, htmlFileName), 'wb').write(htmlstr)
|
||||||
|
|
||||||
|
print " "
|
||||||
|
print 'Extracting Table of Contents from Amazon OCR'
|
||||||
|
|
||||||
|
# first create a table of contents file for the svg images
|
||||||
|
tlst = []
|
||||||
|
tlst.append('<?xml version="1.0" encoding="utf-8"?>\n')
|
||||||
|
tlst.append('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n')
|
||||||
|
tlst.append('<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" >')
|
||||||
|
tlst.append('<head>\n')
|
||||||
|
tlst.append('<title>' + meta_array['Title'] + '</title>\n')
|
||||||
|
tlst.append('<meta name="Author" content="' + meta_array['Authors'] + '" />\n')
|
||||||
|
tlst.append('<meta name="Title" content="' + meta_array['Title'] + '" />\n')
|
||||||
|
if 'ASIN' in meta_array:
|
||||||
|
tlst.append('<meta name="ASIN" content="' + meta_array['ASIN'] + '" />\n')
|
||||||
|
if 'GUID' in meta_array:
|
||||||
|
tlst.append('<meta name="GUID" content="' + meta_array['GUID'] + '" />\n')
|
||||||
|
tlst.append('</head>\n')
|
||||||
|
tlst.append('<body>\n')
|
||||||
|
|
||||||
|
tlst.append('<h2>Table of Contents</h2>\n')
|
||||||
|
start = pageidnums[0]
|
||||||
if (raw):
|
if (raw):
|
||||||
pfile = open(os.path.join(svgDir,filename.replace('.dat','.svg')), 'w')
|
startname = 'page%04d.svg' % start
|
||||||
svgindex += '<a href="svg/page%04d.svg">Page %d</a>\n' % (counter, counter)
|
|
||||||
else:
|
else:
|
||||||
pfile = open(os.path.join(svgDir,'page%04d.xhtml' % counter), 'w')
|
startname = 'page%04d.xhtml' % start
|
||||||
svgindex += '<a href="svg/page%04d.xhtml">Page %d</a>\n' % (counter, counter)
|
|
||||||
|
tlst.append('<h3><a href="' + startname + '">Start of Book</a></h3>\n')
|
||||||
|
# build up a table of contents for the svg xhtml output
|
||||||
|
tocentries = "".join(elst)
|
||||||
|
elst = None
|
||||||
|
toclst = tocentries.split('\n')
|
||||||
|
toclst.pop()
|
||||||
|
for entry in toclst:
|
||||||
|
print entry
|
||||||
|
title, pagenum = entry.split('|')
|
||||||
|
id = pageidnums[int(pagenum)]
|
||||||
|
if (raw):
|
||||||
|
fname = 'page%04d.svg' % id
|
||||||
|
else:
|
||||||
|
fname = 'page%04d.xhtml' % id
|
||||||
|
tlst.append('<h3><a href="'+ fname + '">' + title + '</a></h3>\n')
|
||||||
|
tlst.append('</body>\n')
|
||||||
|
tlst.append('</html>\n')
|
||||||
|
tochtml = "".join(tlst)
|
||||||
|
file(os.path.join(svgDir, 'toc.xhtml'), 'wb').write(tochtml)
|
||||||
|
|
||||||
|
|
||||||
|
# now create index_svg.xhtml that points to all required files
|
||||||
|
slst = []
|
||||||
|
slst.append('<?xml version="1.0" encoding="utf-8"?>\n')
|
||||||
|
slst.append('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n')
|
||||||
|
slst.append('<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" >')
|
||||||
|
slst.append('<head>\n')
|
||||||
|
slst.append('<title>' + meta_array['Title'] + '</title>\n')
|
||||||
|
slst.append('<meta name="Author" content="' + meta_array['Authors'] + '" />\n')
|
||||||
|
slst.append('<meta name="Title" content="' + meta_array['Title'] + '" />\n')
|
||||||
|
if 'ASIN' in meta_array:
|
||||||
|
slst.append('<meta name="ASIN" content="' + meta_array['ASIN'] + '" />\n')
|
||||||
|
if 'GUID' in meta_array:
|
||||||
|
slst.append('<meta name="GUID" content="' + meta_array['GUID'] + '" />\n')
|
||||||
|
slst.append('</head>\n')
|
||||||
|
slst.append('<body>\n')
|
||||||
|
|
||||||
|
print "Building svg images of each book page"
|
||||||
|
slst.append('<h2>List of Pages</h2>\n')
|
||||||
|
slst.append('<div>\n')
|
||||||
|
idlst = sorted(pageIDMap.keys())
|
||||||
|
numids = len(idlst)
|
||||||
|
cnt = len(idlst)
|
||||||
|
previd = None
|
||||||
|
for j in range(cnt):
|
||||||
|
pageid = idlst[j]
|
||||||
|
if j < cnt - 1:
|
||||||
|
nextid = idlst[j+1]
|
||||||
|
else:
|
||||||
|
nextid = None
|
||||||
|
print '.',
|
||||||
|
pagelst = pageIDMap[pageid]
|
||||||
|
flst = []
|
||||||
|
for page in pagelst:
|
||||||
|
flst.append(xmllst[page])
|
||||||
|
flat_svg = "".join(flst)
|
||||||
|
flst=None
|
||||||
|
svgxml = flatxml2svg.convert2SVG(gd, flat_svg, pageid, previd, nextid, svgDir, raw, meta_array, scaledpi)
|
||||||
|
if (raw) :
|
||||||
|
pfile = open(os.path.join(svgDir,'page%04d.svg' % pageid),'w')
|
||||||
|
slst.append('<a href="svg/page%04d.svg">Page %d</a>\n' % (pageid, pageid))
|
||||||
|
else :
|
||||||
|
pfile = open(os.path.join(svgDir,'page%04d.xhtml' % pageid), 'w')
|
||||||
|
slst.append('<a href="svg/page%04d.xhtml">Page %d</a>\n' % (pageid, pageid))
|
||||||
|
previd = pageid
|
||||||
pfile.write(svgxml)
|
pfile.write(svgxml)
|
||||||
pfile.close()
|
pfile.close()
|
||||||
|
|
||||||
counter += 1
|
counter += 1
|
||||||
|
slst.append('</div>\n')
|
||||||
|
slst.append('<h2><a href="svg/toc.xhtml">Table of Contents</a></h2>\n')
|
||||||
|
slst.append('</body>\n</html>\n')
|
||||||
|
svgindex = "".join(slst)
|
||||||
|
slst = None
|
||||||
|
file(os.path.join(bookDir, 'index_svg.xhtml'), 'wb').write(svgindex)
|
||||||
|
|
||||||
print " "
|
print " "
|
||||||
|
|
||||||
# finish up the html string and output it
|
|
||||||
htmlstr += '</body>\n</html>\n'
|
|
||||||
file(os.path.join(bookDir, htmlFileName), 'wb').write(htmlstr)
|
|
||||||
|
|
||||||
# finish up the svg index string and output it
|
|
||||||
svgindex += '</body>\n</html>\n'
|
|
||||||
file(os.path.join(bookDir, 'index_svg.xhtml'), 'wb').write(svgindex)
|
|
||||||
|
|
||||||
# build the opf file
|
# build the opf file
|
||||||
opfname = os.path.join(bookDir, 'book.opf')
|
opfname = os.path.join(bookDir, 'book.opf')
|
||||||
opfstr = '<?xml version="1.0" encoding="utf-8"?>\n'
|
olst = []
|
||||||
opfstr += '<package xmlns="http://www.idpf.org/2007/opf" unique-identifier="guid_id">\n'
|
olst.append('<?xml version="1.0" encoding="utf-8"?>\n')
|
||||||
|
olst.append('<package xmlns="http://www.idpf.org/2007/opf" unique-identifier="guid_id">\n')
|
||||||
# adding metadata
|
# adding metadata
|
||||||
opfstr += ' <metadata xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:opf="http://www.idpf.org/2007/opf">\n'
|
olst.append(' <metadata xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:opf="http://www.idpf.org/2007/opf">\n')
|
||||||
if 'GUID' in meta_array:
|
if 'GUID' in meta_array:
|
||||||
opfstr += ' <dc:identifier opf:scheme="GUID" id="guid_id">' + meta_array['GUID'] + '</dc:identifier>\n'
|
olst.append(' <dc:identifier opf:scheme="GUID" id="guid_id">' + meta_array['GUID'] + '</dc:identifier>\n')
|
||||||
if 'ASIN' in meta_array:
|
if 'ASIN' in meta_array:
|
||||||
opfstr += ' <dc:identifier opf:scheme="ASIN">' + meta_array['ASIN'] + '</dc:identifier>\n'
|
olst.append(' <dc:identifier opf:scheme="ASIN">' + meta_array['ASIN'] + '</dc:identifier>\n')
|
||||||
if 'oASIN' in meta_array:
|
if 'oASIN' in meta_array:
|
||||||
opfstr += ' <dc:identifier opf:scheme="oASIN">' + meta_array['oASIN'] + '</dc:identifier>\n'
|
olst.append(' <dc:identifier opf:scheme="oASIN">' + meta_array['oASIN'] + '</dc:identifier>\n')
|
||||||
opfstr += ' <dc:title>' + meta_array['Title'] + '</dc:title>\n'
|
olst.append(' <dc:title>' + meta_array['Title'] + '</dc:title>\n')
|
||||||
opfstr += ' <dc:creator opf:role="aut">' + meta_array['Authors'] + '</dc:creator>\n'
|
olst.append(' <dc:creator opf:role="aut">' + meta_array['Authors'] + '</dc:creator>\n')
|
||||||
opfstr += ' <dc:language>en</dc:language>\n'
|
olst.append(' <dc:language>en</dc:language>\n')
|
||||||
opfstr += ' <dc:date>' + meta_array['UpdateTime'] + '</dc:date>\n'
|
olst.append(' <dc:date>' + meta_array['UpdateTime'] + '</dc:date>\n')
|
||||||
if isCover:
|
if isCover:
|
||||||
opfstr += ' <meta name="cover" content="bookcover"/>\n'
|
olst.append(' <meta name="cover" content="bookcover"/>\n')
|
||||||
opfstr += ' </metadata>\n'
|
olst.append(' </metadata>\n')
|
||||||
opfstr += '<manifest>\n'
|
olst.append('<manifest>\n')
|
||||||
opfstr += ' <item id="book" href="book.html" media-type="application/xhtml+xml"/>\n'
|
olst.append(' <item id="book" href="book.html" media-type="application/xhtml+xml"/>\n')
|
||||||
opfstr += ' <item id="stylesheet" href="style.css" media-type="text/css"/>\n'
|
olst.append(' <item id="stylesheet" href="style.css" media-type="text/css"/>\n')
|
||||||
# adding image files to manifest
|
# adding image files to manifest
|
||||||
filenames = os.listdir(imgDir)
|
filenames = os.listdir(imgDir)
|
||||||
filenames = sorted(filenames)
|
filenames = sorted(filenames)
|
||||||
@@ -525,17 +652,19 @@ def generateBook(bookDir, raw, fixedimage):
|
|||||||
imgext = 'jpeg'
|
imgext = 'jpeg'
|
||||||
if imgext == '.svg':
|
if imgext == '.svg':
|
||||||
imgext = 'svg+xml'
|
imgext = 'svg+xml'
|
||||||
opfstr += ' <item id="' + imgname + '" href="img/' + filename + '" media-type="image/' + imgext + '"/>\n'
|
olst.append(' <item id="' + imgname + '" href="img/' + filename + '" media-type="image/' + imgext + '"/>\n')
|
||||||
if isCover:
|
if isCover:
|
||||||
opfstr += ' <item id="bookcover" href="cover.jpg" media-type="image/jpeg" />\n'
|
olst.append(' <item id="bookcover" href="cover.jpg" media-type="image/jpeg" />\n')
|
||||||
opfstr += '</manifest>\n'
|
olst.append('</manifest>\n')
|
||||||
# adding spine
|
# adding spine
|
||||||
opfstr += '<spine>\n <itemref idref="book" />\n</spine>\n'
|
olst.append('<spine>\n <itemref idref="book" />\n</spine>\n')
|
||||||
if isCover:
|
if isCover:
|
||||||
opfstr += ' <guide>\n'
|
olst.append(' <guide>\n')
|
||||||
opfstr += ' <reference href="cover.jpg" type="cover" title="Cover"/>\n'
|
olst.append(' <reference href="cover.jpg" type="cover" title="Cover"/>\n')
|
||||||
opfstr += ' </guide>\n'
|
olst.append(' </guide>\n')
|
||||||
opfstr += '</package>\n'
|
olst.append('</package>\n')
|
||||||
|
opfstr = "".join(olst)
|
||||||
|
olst = None
|
||||||
file(opfname, 'wb').write(opfstr)
|
file(opfname, 'wb').write(opfstr)
|
||||||
|
|
||||||
print 'Processing Complete'
|
print 'Processing Complete'
|
||||||
@@ -556,7 +685,6 @@ def usage():
|
|||||||
|
|
||||||
def main(argv):
|
def main(argv):
|
||||||
bookDir = ''
|
bookDir = ''
|
||||||
|
|
||||||
if len(argv) == 0:
|
if len(argv) == 0:
|
||||||
argv = sys.argv
|
argv = sys.argv
|
||||||
|
|
||||||
@@ -573,7 +701,7 @@ def main(argv):
|
|||||||
return 1
|
return 1
|
||||||
|
|
||||||
raw = 0
|
raw = 0
|
||||||
fixedimage = False
|
fixedimage = True
|
||||||
for o, a in opts:
|
for o, a in opts:
|
||||||
if o =="-h":
|
if o =="-h":
|
||||||
usage()
|
usage()
|
||||||
|
|||||||
@@ -293,6 +293,7 @@ def _load_crypto_pycrypto():
|
|||||||
return self._arc4.decrypt(data)
|
return self._arc4.decrypt(data)
|
||||||
|
|
||||||
class AES(object):
|
class AES(object):
|
||||||
|
MODE_CBC = _AES.MODE_CBC
|
||||||
@classmethod
|
@classmethod
|
||||||
def new(cls, userkey, mode, iv):
|
def new(cls, userkey, mode, iv):
|
||||||
self = AES()
|
self = AES()
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ from __future__ import with_statement
|
|||||||
# and many many others
|
# and many many others
|
||||||
|
|
||||||
|
|
||||||
__version__ = '3.1'
|
__version__ = '4.2'
|
||||||
|
|
||||||
class Unbuffered:
|
class Unbuffered:
|
||||||
def __init__(self, stream):
|
def __init__(self, stream):
|
||||||
@@ -32,6 +32,9 @@ import sys
|
|||||||
import os, csv, getopt
|
import os, csv, getopt
|
||||||
import string
|
import string
|
||||||
import re
|
import re
|
||||||
|
import traceback
|
||||||
|
|
||||||
|
buildXML = False
|
||||||
|
|
||||||
class DrmException(Exception):
|
class DrmException(Exception):
|
||||||
pass
|
pass
|
||||||
@@ -74,9 +77,11 @@ def cleanup_name(name):
|
|||||||
return one
|
return one
|
||||||
|
|
||||||
def decryptBook(infile, outdir, k4, kInfoFiles, serials, pids):
|
def decryptBook(infile, outdir, k4, kInfoFiles, serials, pids):
|
||||||
|
global buildXML
|
||||||
|
|
||||||
# handle the obvious cases at the beginning
|
# handle the obvious cases at the beginning
|
||||||
if not os.path.isfile(infile):
|
if not os.path.isfile(infile):
|
||||||
print "Error: Input file does not exist"
|
print >>sys.stderr, ('K4MobiDeDrm v%(__version__)s\n' % globals()) + "Error: Input file does not exist"
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
mobi = True
|
mobi = True
|
||||||
@@ -95,8 +100,14 @@ def decryptBook(infile, outdir, k4, kInfoFiles, serials, pids):
|
|||||||
print "Processing Book: ", title
|
print "Processing Book: ", title
|
||||||
filenametitle = cleanup_name(title)
|
filenametitle = cleanup_name(title)
|
||||||
outfilename = bookname
|
outfilename = bookname
|
||||||
if len(bookname)>4 and len(filenametitle)>4 and bookname[:4] != filenametitle[:4]:
|
if len(outfilename)<=8 or len(filenametitle)<=8:
|
||||||
outfilename = outfilename + "_" + filenametitle
|
outfilename = outfilename + "_" + filenametitle
|
||||||
|
elif outfilename[:8] != filenametitle[:8]:
|
||||||
|
outfilename = outfilename[:8] + "_" + filenametitle
|
||||||
|
|
||||||
|
# avoid excessively long file names
|
||||||
|
if len(outfilename)>150:
|
||||||
|
outfilename = outfilename[:150]
|
||||||
|
|
||||||
# build pid list
|
# build pid list
|
||||||
md1, md2 = mb.getPIDMetaInfo()
|
md1, md2 = mb.getPIDMetaInfo()
|
||||||
@@ -106,16 +117,21 @@ def decryptBook(infile, outdir, k4, kInfoFiles, serials, pids):
|
|||||||
mb.processBook(pidlst)
|
mb.processBook(pidlst)
|
||||||
|
|
||||||
except mobidedrm.DrmException, e:
|
except mobidedrm.DrmException, e:
|
||||||
print "Error: " + str(e) + "\nDRM Removal Failed.\n"
|
print >>sys.stderr, ('K4MobiDeDrm v%(__version__)s\n' % globals()) + "Error: " + str(e) + "\nDRM Removal Failed.\n"
|
||||||
return 1
|
return 1
|
||||||
except topazextract.TpzDRMError, e:
|
except topazextract.TpzDRMError, e:
|
||||||
print "Error: " + str(e) + "\nDRM Removal Failed.\n"
|
print >>sys.stderr, ('K4MobiDeDrm v%(__version__)s\n' % globals()) + "Error: " + str(e) + "\nDRM Removal Failed.\n"
|
||||||
return 1
|
return 1
|
||||||
except Exception, e:
|
except Exception, e:
|
||||||
print "Error: " + str(e) + "\nDRM Removal Failed.\n"
|
print >>sys.stderr, ('K4MobiDeDrm v%(__version__)s\n' % globals()) + "Error: " + str(e) + "\nDRM Removal Failed.\n"
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
if mobi:
|
if mobi:
|
||||||
|
if mb.getPrintReplica():
|
||||||
|
outfile = os.path.join(outdir, outfilename + '_nodrm' + '.azw4')
|
||||||
|
elif mb.getMobiVersion() >= 8:
|
||||||
|
outfile = os.path.join(outdir, outfilename + '_nodrm' + '.azw3')
|
||||||
|
else:
|
||||||
outfile = os.path.join(outdir, outfilename + '_nodrm' + '.mobi')
|
outfile = os.path.join(outdir, outfilename + '_nodrm' + '.mobi')
|
||||||
mb.getMobiFile(outfile)
|
mb.getMobiFile(outfile)
|
||||||
return 0
|
return 0
|
||||||
@@ -125,10 +141,11 @@ def decryptBook(infile, outdir, k4, kInfoFiles, serials, pids):
|
|||||||
zipname = os.path.join(outdir, outfilename + '_nodrm' + '.htmlz')
|
zipname = os.path.join(outdir, outfilename + '_nodrm' + '.htmlz')
|
||||||
mb.getHTMLZip(zipname)
|
mb.getHTMLZip(zipname)
|
||||||
|
|
||||||
print " Creating SVG HTMLZ Archive"
|
print " Creating SVG ZIP Archive"
|
||||||
zipname = os.path.join(outdir, outfilename + '_SVG' + '.htmlz')
|
zipname = os.path.join(outdir, outfilename + '_SVG' + '.zip')
|
||||||
mb.getSVGZip(zipname)
|
mb.getSVGZip(zipname)
|
||||||
|
|
||||||
|
if buildXML:
|
||||||
print " Creating XML ZIP Archive"
|
print " Creating XML ZIP Archive"
|
||||||
zipname = os.path.join(outdir, outfilename + '_XML' + '.zip')
|
zipname = os.path.join(outdir, outfilename + '_XML' + '.zip')
|
||||||
mb.getXMLZip(zipname)
|
mb.getXMLZip(zipname)
|
||||||
@@ -158,7 +175,6 @@ def main(argv=sys.argv):
|
|||||||
print ('K4MobiDeDrm v%(__version__)s '
|
print ('K4MobiDeDrm v%(__version__)s '
|
||||||
'provided by the work of many including DiapDealer, SomeUpdates, IHeartCabbages, CMBDTC, Skindle, DarkReverser, ApprenticeAlf, etc .' % globals())
|
'provided by the work of many including DiapDealer, SomeUpdates, IHeartCabbages, CMBDTC, Skindle, DarkReverser, ApprenticeAlf, etc .' % globals())
|
||||||
|
|
||||||
print ' '
|
|
||||||
try:
|
try:
|
||||||
opts, args = getopt.getopt(sys.argv[1:], "k:p:s:")
|
opts, args = getopt.getopt(sys.argv[1:], "k:p:s:")
|
||||||
except getopt.GetoptError, err:
|
except getopt.GetoptError, err:
|
||||||
@@ -196,4 +212,3 @@ def main(argv=sys.argv):
|
|||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
sys.stdout=Unbuffered(sys.stdout)
|
sys.stdout=Unbuffered(sys.stdout)
|
||||||
sys.exit(main())
|
sys.exit(main())
|
||||||
|
|
||||||
|
|||||||
@@ -1,11 +1,13 @@
|
|||||||
# standlone set of Mac OSX specific routines needed for K4DeDRM
|
# standlone set of Mac OSX specific routines needed for KindleBooks
|
||||||
|
|
||||||
from __future__ import with_statement
|
from __future__ import with_statement
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
import os
|
import os
|
||||||
|
import os.path
|
||||||
|
import re
|
||||||
|
import copy
|
||||||
import subprocess
|
import subprocess
|
||||||
|
|
||||||
from struct import pack, unpack, unpack_from
|
from struct import pack, unpack, unpack_from
|
||||||
|
|
||||||
class DrmException(Exception):
|
class DrmException(Exception):
|
||||||
@@ -23,6 +25,25 @@ def _load_crypto_libcrypto():
|
|||||||
raise DrmException('libcrypto not found')
|
raise DrmException('libcrypto not found')
|
||||||
libcrypto = CDLL(libcrypto)
|
libcrypto = CDLL(libcrypto)
|
||||||
|
|
||||||
|
# From OpenSSL's crypto aes header
|
||||||
|
#
|
||||||
|
# AES_ENCRYPT 1
|
||||||
|
# AES_DECRYPT 0
|
||||||
|
# AES_MAXNR 14 (in bytes)
|
||||||
|
# AES_BLOCK_SIZE 16 (in bytes)
|
||||||
|
#
|
||||||
|
# struct aes_key_st {
|
||||||
|
# unsigned long rd_key[4 *(AES_MAXNR + 1)];
|
||||||
|
# int rounds;
|
||||||
|
# };
|
||||||
|
# typedef struct aes_key_st AES_KEY;
|
||||||
|
#
|
||||||
|
# int AES_set_decrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key);
|
||||||
|
#
|
||||||
|
# note: the ivec string, and output buffer are mutable
|
||||||
|
# void AES_cbc_encrypt(const unsigned char *in, unsigned char *out,
|
||||||
|
# const unsigned long length, const AES_KEY *key, unsigned char *ivec, const int enc);
|
||||||
|
|
||||||
AES_MAXNR = 14
|
AES_MAXNR = 14
|
||||||
c_char_pp = POINTER(c_char_p)
|
c_char_pp = POINTER(c_char_p)
|
||||||
c_int_p = POINTER(c_int)
|
c_int_p = POINTER(c_int)
|
||||||
@@ -41,6 +62,12 @@ def _load_crypto_libcrypto():
|
|||||||
|
|
||||||
AES_set_decrypt_key = F(c_int, 'AES_set_decrypt_key',[c_char_p, c_int, AES_KEY_p])
|
AES_set_decrypt_key = F(c_int, 'AES_set_decrypt_key',[c_char_p, c_int, AES_KEY_p])
|
||||||
|
|
||||||
|
# From OpenSSL's Crypto evp/p5_crpt2.c
|
||||||
|
#
|
||||||
|
# int PKCS5_PBKDF2_HMAC_SHA1(const char *pass, int passlen,
|
||||||
|
# const unsigned char *salt, int saltlen, int iter,
|
||||||
|
# int keylen, unsigned char *out);
|
||||||
|
|
||||||
PKCS5_PBKDF2_HMAC_SHA1 = F(c_int, 'PKCS5_PBKDF2_HMAC_SHA1',
|
PKCS5_PBKDF2_HMAC_SHA1 = F(c_int, 'PKCS5_PBKDF2_HMAC_SHA1',
|
||||||
[c_char_p, c_ulong, c_char_p, c_ulong, c_ulong, c_ulong, c_char_p])
|
[c_char_p, c_ulong, c_char_p, c_ulong, c_ulong, c_ulong, c_char_p])
|
||||||
|
|
||||||
@@ -48,7 +75,7 @@ def _load_crypto_libcrypto():
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
self._blocksize = 0
|
self._blocksize = 0
|
||||||
self._keyctx = None
|
self._keyctx = None
|
||||||
self.iv = 0
|
self._iv = 0
|
||||||
|
|
||||||
def set_decrypt_key(self, userkey, iv):
|
def set_decrypt_key(self, userkey, iv):
|
||||||
self._blocksize = len(userkey)
|
self._blocksize = len(userkey)
|
||||||
@@ -56,23 +83,24 @@ def _load_crypto_libcrypto():
|
|||||||
raise DrmException('AES improper key used')
|
raise DrmException('AES improper key used')
|
||||||
return
|
return
|
||||||
keyctx = self._keyctx = AES_KEY()
|
keyctx = self._keyctx = AES_KEY()
|
||||||
self.iv = iv
|
self._iv = iv
|
||||||
|
self._userkey = userkey
|
||||||
rv = AES_set_decrypt_key(userkey, len(userkey) * 8, keyctx)
|
rv = AES_set_decrypt_key(userkey, len(userkey) * 8, keyctx)
|
||||||
if rv < 0:
|
if rv < 0:
|
||||||
raise DrmException('Failed to initialize AES key')
|
raise DrmException('Failed to initialize AES key')
|
||||||
|
|
||||||
def decrypt(self, data):
|
def decrypt(self, data):
|
||||||
out = create_string_buffer(len(data))
|
out = create_string_buffer(len(data))
|
||||||
rv = AES_cbc_encrypt(data, out, len(data), self._keyctx, self.iv, 0)
|
mutable_iv = create_string_buffer(self._iv, len(self._iv))
|
||||||
|
keyctx = self._keyctx
|
||||||
|
rv = AES_cbc_encrypt(data, out, len(data), keyctx, mutable_iv, 0)
|
||||||
if rv == 0:
|
if rv == 0:
|
||||||
raise DrmException('AES decryption failed')
|
raise DrmException('AES decryption failed')
|
||||||
return out.raw
|
return out.raw
|
||||||
|
|
||||||
def keyivgen(self, passwd, salt):
|
def keyivgen(self, passwd, salt, iter, keylen):
|
||||||
saltlen = len(salt)
|
saltlen = len(salt)
|
||||||
passlen = len(passwd)
|
passlen = len(passwd)
|
||||||
iter = 0x3e8
|
|
||||||
keylen = 80
|
|
||||||
out = create_string_buffer(keylen)
|
out = create_string_buffer(keylen)
|
||||||
rv = PKCS5_PBKDF2_HMAC_SHA1(passwd, passlen, salt, saltlen, iter, keylen, out)
|
rv = PKCS5_PBKDF2_HMAC_SHA1(passwd, passlen, salt, saltlen, iter, keylen, out)
|
||||||
return out.raw
|
return out.raw
|
||||||
@@ -114,8 +142,13 @@ def SHA256(message):
|
|||||||
charMap1 = "n5Pr6St7Uv8Wx9YzAb0Cd1Ef2Gh3Jk4M"
|
charMap1 = "n5Pr6St7Uv8Wx9YzAb0Cd1Ef2Gh3Jk4M"
|
||||||
charMap2 = "ZB0bYyc1xDdW2wEV3Ff7KkPpL8UuGA4gz-Tme9Nn_tHh5SvXCsIiR6rJjQaqlOoM"
|
charMap2 = "ZB0bYyc1xDdW2wEV3Ff7KkPpL8UuGA4gz-Tme9Nn_tHh5SvXCsIiR6rJjQaqlOoM"
|
||||||
|
|
||||||
# For Future Reference from .kinf approach of K4PC
|
# For kinf approach of K4Mac 1.6.X or later
|
||||||
charMap5 = "AzB0bYyCeVvaZ3FfUuG4g-TtHh5SsIiR6rJjQq7KkPpL8lOoMm9Nn_c1XxDdW2wE"
|
# On K4PC charMap5 = "AzB0bYyCeVvaZ3FfUuG4g-TtHh5SsIiR6rJjQq7KkPpL8lOoMm9Nn_c1XxDdW2wE"
|
||||||
|
# For Mac they seem to re-use charMap2 here
|
||||||
|
charMap5 = charMap2
|
||||||
|
|
||||||
|
# new in K4M 1.9.X
|
||||||
|
testMap8 = "YvaZ3FfUm9Nn_c1XuG4yCAzB0beVg-TtHh5SsIiR6rJjQdW2wEq7KkPpL8lOoMxD"
|
||||||
|
|
||||||
|
|
||||||
def encode(data, map):
|
def encode(data, map):
|
||||||
@@ -144,7 +177,7 @@ def decode(data,map):
|
|||||||
result += pack("B",value)
|
result += pack("B",value)
|
||||||
return result
|
return result
|
||||||
|
|
||||||
# For Future Reference from .kinf approach of K4PC
|
# For K4M 1.6.X and later
|
||||||
# generate table of prime number less than or equal to int n
|
# generate table of prime number less than or equal to int n
|
||||||
def primes(n):
|
def primes(n):
|
||||||
if n==2: return [2]
|
if n==2: return [2]
|
||||||
@@ -166,7 +199,6 @@ def primes(n):
|
|||||||
return [2]+[x for x in s if x]
|
return [2]+[x for x in s if x]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# uses a sub process to get the Hard Drive Serial Number using ioreg
|
# uses a sub process to get the Hard Drive Serial Number using ioreg
|
||||||
# returns with the serial number of drive whose BSD Name is "disk0"
|
# returns with the serial number of drive whose BSD Name is "disk0"
|
||||||
def GetVolumeSerialNumber():
|
def GetVolumeSerialNumber():
|
||||||
@@ -196,20 +228,234 @@ def GetVolumeSerialNumber():
|
|||||||
foundIt = True
|
foundIt = True
|
||||||
break
|
break
|
||||||
if not foundIt:
|
if not foundIt:
|
||||||
sernum = '9999999999'
|
sernum = ''
|
||||||
return sernum
|
return sernum
|
||||||
|
|
||||||
|
def GetUserHomeAppSupKindleDirParitionName():
|
||||||
|
home = os.getenv('HOME')
|
||||||
|
dpath = home + '/Library/Application Support/Kindle'
|
||||||
|
cmdline = '/sbin/mount'
|
||||||
|
cmdline = cmdline.encode(sys.getfilesystemencoding())
|
||||||
|
p = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
|
||||||
|
out1, out2 = p.communicate()
|
||||||
|
reslst = out1.split('\n')
|
||||||
|
cnt = len(reslst)
|
||||||
|
disk = ''
|
||||||
|
foundIt = False
|
||||||
|
for j in xrange(cnt):
|
||||||
|
resline = reslst[j]
|
||||||
|
if resline.startswith('/dev'):
|
||||||
|
(devpart, mpath) = resline.split(' on ')
|
||||||
|
dpart = devpart[5:]
|
||||||
|
pp = mpath.find('(')
|
||||||
|
if pp >= 0:
|
||||||
|
mpath = mpath[:pp-1]
|
||||||
|
if dpath.startswith(mpath):
|
||||||
|
disk = dpart
|
||||||
|
return disk
|
||||||
|
|
||||||
|
# uses a sub process to get the UUID of the specified disk partition using ioreg
|
||||||
|
def GetDiskPartitionUUID(diskpart):
|
||||||
|
uuidnum = os.getenv('MYUUIDNUMBER')
|
||||||
|
if uuidnum != None:
|
||||||
|
return uuidnum
|
||||||
|
cmdline = '/usr/sbin/ioreg -l -S -w 0 -r -c AppleAHCIDiskDriver'
|
||||||
|
cmdline = cmdline.encode(sys.getfilesystemencoding())
|
||||||
|
p = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
|
||||||
|
out1, out2 = p.communicate()
|
||||||
|
reslst = out1.split('\n')
|
||||||
|
cnt = len(reslst)
|
||||||
|
bsdname = None
|
||||||
|
uuidnum = None
|
||||||
|
foundIt = False
|
||||||
|
nest = 0
|
||||||
|
uuidnest = -1
|
||||||
|
partnest = -2
|
||||||
|
for j in xrange(cnt):
|
||||||
|
resline = reslst[j]
|
||||||
|
if resline.find('{') >= 0:
|
||||||
|
nest += 1
|
||||||
|
if resline.find('}') >= 0:
|
||||||
|
nest -= 1
|
||||||
|
pp = resline.find('"UUID" = "')
|
||||||
|
if pp >= 0:
|
||||||
|
uuidnum = resline[pp+10:-1]
|
||||||
|
uuidnum = uuidnum.strip()
|
||||||
|
uuidnest = nest
|
||||||
|
if partnest == uuidnest and uuidnest > 0:
|
||||||
|
foundIt = True
|
||||||
|
break
|
||||||
|
bb = resline.find('"BSD Name" = "')
|
||||||
|
if bb >= 0:
|
||||||
|
bsdname = resline[bb+14:-1]
|
||||||
|
bsdname = bsdname.strip()
|
||||||
|
if (bsdname == diskpart):
|
||||||
|
partnest = nest
|
||||||
|
else :
|
||||||
|
partnest = -2
|
||||||
|
if partnest == uuidnest and partnest > 0:
|
||||||
|
foundIt = True
|
||||||
|
break
|
||||||
|
if nest == 0:
|
||||||
|
partnest = -2
|
||||||
|
uuidnest = -1
|
||||||
|
uuidnum = None
|
||||||
|
bsdname = None
|
||||||
|
if not foundIt:
|
||||||
|
uuidnum = ''
|
||||||
|
return uuidnum
|
||||||
|
|
||||||
|
def GetMACAddressMunged():
|
||||||
|
macnum = os.getenv('MYMACNUM')
|
||||||
|
if macnum != None:
|
||||||
|
return macnum
|
||||||
|
cmdline = '/sbin/ifconfig en0'
|
||||||
|
cmdline = cmdline.encode(sys.getfilesystemencoding())
|
||||||
|
p = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
|
||||||
|
out1, out2 = p.communicate()
|
||||||
|
reslst = out1.split('\n')
|
||||||
|
cnt = len(reslst)
|
||||||
|
macnum = None
|
||||||
|
foundIt = False
|
||||||
|
for j in xrange(cnt):
|
||||||
|
resline = reslst[j]
|
||||||
|
pp = resline.find('ether ')
|
||||||
|
if pp >= 0:
|
||||||
|
macnum = resline[pp+6:-1]
|
||||||
|
macnum = macnum.strip()
|
||||||
|
# print "original mac", macnum
|
||||||
|
# now munge it up the way Kindle app does
|
||||||
|
# by xoring it with 0xa5 and swapping elements 3 and 4
|
||||||
|
maclst = macnum.split(':')
|
||||||
|
n = len(maclst)
|
||||||
|
if n != 6:
|
||||||
|
fountIt = False
|
||||||
|
break
|
||||||
|
for i in range(6):
|
||||||
|
maclst[i] = int('0x' + maclst[i], 0)
|
||||||
|
mlst = [0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
|
||||||
|
mlst[5] = maclst[5] ^ 0xa5
|
||||||
|
mlst[4] = maclst[3] ^ 0xa5
|
||||||
|
mlst[3] = maclst[4] ^ 0xa5
|
||||||
|
mlst[2] = maclst[2] ^ 0xa5
|
||||||
|
mlst[1] = maclst[1] ^ 0xa5
|
||||||
|
mlst[0] = maclst[0] ^ 0xa5
|
||||||
|
macnum = "%0.2x%0.2x%0.2x%0.2x%0.2x%0.2x" % (mlst[0], mlst[1], mlst[2], mlst[3], mlst[4], mlst[5])
|
||||||
|
foundIt = True
|
||||||
|
break
|
||||||
|
if not foundIt:
|
||||||
|
macnum = ''
|
||||||
|
return macnum
|
||||||
|
|
||||||
|
|
||||||
# uses unix env to get username instead of using sysctlbyname
|
# uses unix env to get username instead of using sysctlbyname
|
||||||
def GetUserName():
|
def GetUserName():
|
||||||
username = os.getenv('USER')
|
username = os.getenv('USER')
|
||||||
return username
|
return username
|
||||||
|
|
||||||
|
def isNewInstall():
|
||||||
|
home = os.getenv('HOME')
|
||||||
|
# soccer game fan anyone
|
||||||
|
dpath = home + '/Library/Application Support/Kindle/storage/.pes2011'
|
||||||
|
# print dpath, os.path.exists(dpath)
|
||||||
|
if os.path.exists(dpath):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def GetIDString():
|
||||||
|
# K4Mac now has an extensive set of ids strings it uses
|
||||||
|
# in encoding pids and in creating unique passwords
|
||||||
|
# for use in its own version of CryptUnprotectDataV2
|
||||||
|
|
||||||
|
# BUT Amazon has now become nasty enough to detect when its app
|
||||||
|
# is being run under a debugger and actually changes code paths
|
||||||
|
# including which one of these strings is chosen, all to try
|
||||||
|
# to prevent reverse engineering
|
||||||
|
|
||||||
|
# Sad really ... they will only hurt their own sales ...
|
||||||
|
# true book lovers really want to keep their books forever
|
||||||
|
# and move them to their devices and DRM prevents that so they
|
||||||
|
# will just buy from someplace else that they can remove
|
||||||
|
# the DRM from
|
||||||
|
|
||||||
|
# Amazon should know by now that true book lover's are not like
|
||||||
|
# penniless kids that pirate music, we do not pirate books
|
||||||
|
|
||||||
|
if isNewInstall():
|
||||||
|
mungedmac = GetMACAddressMunged()
|
||||||
|
if len(mungedmac) > 7:
|
||||||
|
return mungedmac
|
||||||
|
sernum = GetVolumeSerialNumber()
|
||||||
|
if len(sernum) > 7:
|
||||||
|
return sernum
|
||||||
|
diskpart = GetUserHomeAppSupKindleDirParitionName()
|
||||||
|
uuidnum = GetDiskPartitionUUID(diskpart)
|
||||||
|
if len(uuidnum) > 7:
|
||||||
|
return uuidnum
|
||||||
|
mungedmac = GetMACAddressMunged()
|
||||||
|
if len(mungedmac) > 7:
|
||||||
|
return mungedmac
|
||||||
|
return '9999999999'
|
||||||
|
|
||||||
|
|
||||||
# implements an Pseudo Mac Version of Windows built-in Crypto routine
|
# implements an Pseudo Mac Version of Windows built-in Crypto routine
|
||||||
def CryptUnprotectData(encryptedData, salt):
|
# used by Kindle for Mac versions < 1.6.0
|
||||||
sp = GetVolumeSerialNumber() + '!@#' + GetUserName()
|
class CryptUnprotectData(object):
|
||||||
|
def __init__(self):
|
||||||
|
sernum = GetVolumeSerialNumber()
|
||||||
|
if sernum == '':
|
||||||
|
sernum = '9999999999'
|
||||||
|
sp = sernum + '!@#' + GetUserName()
|
||||||
passwdData = encode(SHA256(sp),charMap1)
|
passwdData = encode(SHA256(sp),charMap1)
|
||||||
|
salt = '16743'
|
||||||
|
self.crp = LibCrypto()
|
||||||
|
iter = 0x3e8
|
||||||
|
keylen = 0x80
|
||||||
|
key_iv = self.crp.keyivgen(passwdData, salt, iter, keylen)
|
||||||
|
self.key = key_iv[0:32]
|
||||||
|
self.iv = key_iv[32:48]
|
||||||
|
self.crp.set_decrypt_key(self.key, self.iv)
|
||||||
|
|
||||||
|
def decrypt(self, encryptedData):
|
||||||
|
cleartext = self.crp.decrypt(encryptedData)
|
||||||
|
cleartext = decode(cleartext,charMap1)
|
||||||
|
return cleartext
|
||||||
|
|
||||||
|
|
||||||
|
# implements an Pseudo Mac Version of Windows built-in Crypto routine
|
||||||
|
# used for Kindle for Mac Versions >= 1.6.0
|
||||||
|
class CryptUnprotectDataV2(object):
|
||||||
|
def __init__(self):
|
||||||
|
sp = GetUserName() + ':&%:' + GetIDString()
|
||||||
|
passwdData = encode(SHA256(sp),charMap5)
|
||||||
|
# salt generation as per the code
|
||||||
|
salt = 0x0512981d * 2 * 1 * 1
|
||||||
|
salt = str(salt) + GetUserName()
|
||||||
|
salt = encode(salt,charMap5)
|
||||||
|
self.crp = LibCrypto()
|
||||||
|
iter = 0x800
|
||||||
|
keylen = 0x400
|
||||||
|
key_iv = self.crp.keyivgen(passwdData, salt, iter, keylen)
|
||||||
|
self.key = key_iv[0:32]
|
||||||
|
self.iv = key_iv[32:48]
|
||||||
|
self.crp.set_decrypt_key(self.key, self.iv)
|
||||||
|
|
||||||
|
def decrypt(self, encryptedData):
|
||||||
|
cleartext = self.crp.decrypt(encryptedData)
|
||||||
|
cleartext = decode(cleartext, charMap5)
|
||||||
|
return cleartext
|
||||||
|
|
||||||
|
|
||||||
|
# unprotect the new header blob in .kinf2011
|
||||||
|
# used in Kindle for Mac Version >= 1.9.0
|
||||||
|
def UnprotectHeaderData(encryptedData):
|
||||||
|
passwdData = 'header_key_data'
|
||||||
|
salt = 'HEADER.2011'
|
||||||
|
iter = 0x80
|
||||||
|
keylen = 0x100
|
||||||
crp = LibCrypto()
|
crp = LibCrypto()
|
||||||
key_iv = crp.keyivgen(passwdData, salt)
|
key_iv = crp.keyivgen(passwdData, salt, iter, keylen)
|
||||||
key = key_iv[0:32]
|
key = key_iv[0:32]
|
||||||
iv = key_iv[32:48]
|
iv = key_iv[32:48]
|
||||||
crp.set_decrypt_key(key,iv)
|
crp.set_decrypt_key(key,iv)
|
||||||
@@ -217,6 +463,27 @@ def CryptUnprotectData(encryptedData, salt):
|
|||||||
return cleartext
|
return cleartext
|
||||||
|
|
||||||
|
|
||||||
|
# implements an Pseudo Mac Version of Windows built-in Crypto routine
|
||||||
|
# used for Kindle for Mac Versions >= 1.9.0
|
||||||
|
class CryptUnprotectDataV3(object):
|
||||||
|
def __init__(self, entropy):
|
||||||
|
sp = GetUserName() + '+@#$%+' + GetIDString()
|
||||||
|
passwdData = encode(SHA256(sp),charMap2)
|
||||||
|
salt = entropy
|
||||||
|
self.crp = LibCrypto()
|
||||||
|
iter = 0x800
|
||||||
|
keylen = 0x400
|
||||||
|
key_iv = self.crp.keyivgen(passwdData, salt, iter, keylen)
|
||||||
|
self.key = key_iv[0:32]
|
||||||
|
self.iv = key_iv[32:48]
|
||||||
|
self.crp.set_decrypt_key(self.key, self.iv)
|
||||||
|
|
||||||
|
def decrypt(self, encryptedData):
|
||||||
|
cleartext = self.crp.decrypt(encryptedData)
|
||||||
|
cleartext = decode(cleartext, charMap2)
|
||||||
|
return cleartext
|
||||||
|
|
||||||
|
|
||||||
# Locate the .kindle-info files
|
# Locate the .kindle-info files
|
||||||
def getKindleInfoFiles(kInfoFiles):
|
def getKindleInfoFiles(kInfoFiles):
|
||||||
# first search for current .kindle-info files
|
# first search for current .kindle-info files
|
||||||
@@ -232,18 +499,26 @@ def getKindleInfoFiles(kInfoFiles):
|
|||||||
if os.path.isfile(resline):
|
if os.path.isfile(resline):
|
||||||
kInfoFiles.append(resline)
|
kInfoFiles.append(resline)
|
||||||
found = True
|
found = True
|
||||||
# For Future Reference
|
# add any .rainier*-kinf files
|
||||||
#
|
cmdline = 'find "' + home + '/Library/Application Support" -name ".rainier*-kinf"'
|
||||||
# # add any .kinf files
|
cmdline = cmdline.encode(sys.getfilesystemencoding())
|
||||||
# cmdline = 'find "' + home + '/Library/Application Support" -name "rainier*.kinf"'
|
p1 = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
|
||||||
# cmdline = cmdline.encode(sys.getfilesystemencoding())
|
out1, out2 = p1.communicate()
|
||||||
# p1 = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
|
reslst = out1.split('\n')
|
||||||
# out1, out2 = p1.communicate()
|
for resline in reslst:
|
||||||
# reslst = out1.split('\n')
|
if os.path.isfile(resline):
|
||||||
# for resline in reslst:
|
kInfoFiles.append(resline)
|
||||||
# if os.path.isfile(resline):
|
found = True
|
||||||
# kInfoFiles.append(resline)
|
# add any .kinf2011 files
|
||||||
# found = True
|
cmdline = 'find "' + home + '/Library/Application Support" -name ".kinf2011"'
|
||||||
|
cmdline = cmdline.encode(sys.getfilesystemencoding())
|
||||||
|
p1 = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
|
||||||
|
out1, out2 = p1.communicate()
|
||||||
|
reslst = out1.split('\n')
|
||||||
|
for resline in reslst:
|
||||||
|
if os.path.isfile(resline):
|
||||||
|
kInfoFiles.append(resline)
|
||||||
|
found = True
|
||||||
if not found:
|
if not found:
|
||||||
print('No kindle-info files have been found.')
|
print('No kindle-info files have been found.')
|
||||||
return kInfoFiles
|
return kInfoFiles
|
||||||
@@ -251,7 +526,7 @@ def getKindleInfoFiles(kInfoFiles):
|
|||||||
# determine type of kindle info provided and return a
|
# determine type of kindle info provided and return a
|
||||||
# database of keynames and values
|
# database of keynames and values
|
||||||
def getDBfromFile(kInfoFile):
|
def getDBfromFile(kInfoFile):
|
||||||
names = ["kindle.account.tokens","kindle.cookie.item","eulaVersionAccepted","login_date","kindle.token.item","login","kindle.key.item","kindle.name.info","kindle.device.info", "MazamaRandomNumber"]
|
names = ["kindle.account.tokens","kindle.cookie.item","eulaVersionAccepted","login_date","kindle.token.item","login","kindle.key.item","kindle.name.info","kindle.device.info", "MazamaRandomNumber", "max_date", "SIGVERIF"]
|
||||||
DB = {}
|
DB = {}
|
||||||
cnt = 0
|
cnt = 0
|
||||||
infoReader = open(kInfoFile, 'r')
|
infoReader = open(kInfoFile, 'r')
|
||||||
@@ -261,6 +536,7 @@ def getDBfromFile(kInfoFile):
|
|||||||
if data.find('[') != -1 :
|
if data.find('[') != -1 :
|
||||||
|
|
||||||
# older style kindle-info file
|
# older style kindle-info file
|
||||||
|
cud = CryptUnprotectData()
|
||||||
items = data.split('[')
|
items = data.split('[')
|
||||||
for item in items:
|
for item in items:
|
||||||
if item != '':
|
if item != '':
|
||||||
@@ -273,84 +549,177 @@ def getDBfromFile(kInfoFile):
|
|||||||
if keyname == "unknown":
|
if keyname == "unknown":
|
||||||
keyname = keyhash
|
keyname = keyhash
|
||||||
encryptedValue = decode(rawdata,charMap2)
|
encryptedValue = decode(rawdata,charMap2)
|
||||||
salt = '16743'
|
cleartext = cud.decrypt(encryptedValue)
|
||||||
cleartext = CryptUnprotectData(encryptedValue, salt)
|
DB[keyname] = cleartext
|
||||||
DB[keyname] = decode(cleartext,charMap1)
|
|
||||||
cnt = cnt + 1
|
cnt = cnt + 1
|
||||||
if cnt == 0:
|
if cnt == 0:
|
||||||
DB = None
|
DB = None
|
||||||
return DB
|
return DB
|
||||||
|
|
||||||
# For Future Reference taken from K4PC 1.5.0 .kinf
|
if hdr == '/':
|
||||||
#
|
|
||||||
# # else newer style .kinf file
|
# else newer style .kinf file used by K4Mac >= 1.6.0
|
||||||
# # the .kinf file uses "/" to separate it into records
|
# the .kinf file uses "/" to separate it into records
|
||||||
# # so remove the trailing "/" to make it easy to use split
|
# so remove the trailing "/" to make it easy to use split
|
||||||
# data = data[:-1]
|
data = data[:-1]
|
||||||
# items = data.split('/')
|
items = data.split('/')
|
||||||
#
|
cud = CryptUnprotectDataV2()
|
||||||
# # loop through the item records until all are processed
|
|
||||||
# while len(items) > 0:
|
# loop through the item records until all are processed
|
||||||
#
|
while len(items) > 0:
|
||||||
# # get the first item record
|
|
||||||
# item = items.pop(0)
|
# get the first item record
|
||||||
#
|
item = items.pop(0)
|
||||||
# # the first 32 chars of the first record of a group
|
|
||||||
# # is the MD5 hash of the key name encoded by charMap5
|
# the first 32 chars of the first record of a group
|
||||||
# keyhash = item[0:32]
|
# is the MD5 hash of the key name encoded by charMap5
|
||||||
#
|
keyhash = item[0:32]
|
||||||
# # the raw keyhash string is also used to create entropy for the actual
|
keyname = "unknown"
|
||||||
# # CryptProtectData Blob that represents that keys contents
|
|
||||||
|
# the raw keyhash string is also used to create entropy for the actual
|
||||||
|
# CryptProtectData Blob that represents that keys contents
|
||||||
|
# "entropy" not used for K4Mac only K4PC
|
||||||
# entropy = SHA1(keyhash)
|
# entropy = SHA1(keyhash)
|
||||||
#
|
|
||||||
# # the remainder of the first record when decoded with charMap5
|
# the remainder of the first record when decoded with charMap5
|
||||||
# # has the ':' split char followed by the string representation
|
# has the ':' split char followed by the string representation
|
||||||
# # of the number of records that follow
|
# of the number of records that follow
|
||||||
# # and make up the contents
|
# and make up the contents
|
||||||
# srcnt = decode(item[34:],charMap5)
|
srcnt = decode(item[34:],charMap5)
|
||||||
# rcnt = int(srcnt)
|
rcnt = int(srcnt)
|
||||||
#
|
|
||||||
# # read and store in rcnt records of data
|
# read and store in rcnt records of data
|
||||||
# # that make up the contents value
|
# that make up the contents value
|
||||||
# edlst = []
|
edlst = []
|
||||||
# for i in xrange(rcnt):
|
for i in xrange(rcnt):
|
||||||
# item = items.pop(0)
|
item = items.pop(0)
|
||||||
# edlst.append(item)
|
edlst.append(item)
|
||||||
#
|
|
||||||
# keyname = "unknown"
|
keyname = "unknown"
|
||||||
# for name in names:
|
for name in names:
|
||||||
# if encodeHash(name,charMap5) == keyhash:
|
if encodeHash(name,charMap5) == keyhash:
|
||||||
# keyname = name
|
keyname = name
|
||||||
# break
|
break
|
||||||
# if keyname == "unknown":
|
if keyname == "unknown":
|
||||||
# keyname = keyhash
|
keyname = keyhash
|
||||||
#
|
|
||||||
# # the charMap5 encoded contents data has had a length
|
# the charMap5 encoded contents data has had a length
|
||||||
# # of chars (always odd) cut off of the front and moved
|
# of chars (always odd) cut off of the front and moved
|
||||||
# # to the end to prevent decoding using charMap5 from
|
# to the end to prevent decoding using charMap5 from
|
||||||
# # working properly, and thereby preventing the ensuing
|
# working properly, and thereby preventing the ensuing
|
||||||
# # CryptUnprotectData call from succeeding.
|
# CryptUnprotectData call from succeeding.
|
||||||
#
|
|
||||||
# # The offset into the charMap5 encoded contents seems to be:
|
# The offset into the charMap5 encoded contents seems to be:
|
||||||
# # len(contents) - largest prime number less than or equal to int(len(content)/3)
|
# len(contents) - largest prime number less than or equal to int(len(content)/3)
|
||||||
# # (in other words split "about" 2/3rds of the way through)
|
# (in other words split "about" 2/3rds of the way through)
|
||||||
#
|
|
||||||
# # move first offsets chars to end to align for decode by charMap5
|
# move first offsets chars to end to align for decode by charMap5
|
||||||
# encdata = "".join(edlst)
|
encdata = "".join(edlst)
|
||||||
# contlen = len(encdata)
|
contlen = len(encdata)
|
||||||
# noffset = contlen - primes(int(contlen/3))[-1]
|
|
||||||
#
|
# now properly split and recombine
|
||||||
# # now properly split and recombine
|
# by moving noffset chars from the start of the
|
||||||
# # by moving noffset chars from the start of the
|
# string to the end of the string
|
||||||
# # string to the end of the string
|
noffset = contlen - primes(int(contlen/3))[-1]
|
||||||
# pfx = encdata[0:noffset]
|
pfx = encdata[0:noffset]
|
||||||
# encdata = encdata[noffset:]
|
encdata = encdata[noffset:]
|
||||||
# encdata = encdata + pfx
|
encdata = encdata + pfx
|
||||||
#
|
|
||||||
# # decode using Map5 to get the CryptProtect Data
|
# decode using charMap5 to get the CryptProtect Data
|
||||||
# encryptedValue = decode(encdata,charMap5)
|
encryptedValue = decode(encdata,charMap5)
|
||||||
# DB[keyname] = CryptUnprotectData(encryptedValue, entropy, 1)
|
cleartext = cud.decrypt(encryptedValue)
|
||||||
# cnt = cnt + 1
|
DB[keyname] = cleartext
|
||||||
|
cnt = cnt + 1
|
||||||
|
|
||||||
|
if cnt == 0:
|
||||||
|
DB = None
|
||||||
|
return DB
|
||||||
|
|
||||||
|
# the latest .kinf2011 version for K4M 1.9.1
|
||||||
|
# put back the hdr char, it is needed
|
||||||
|
data = hdr + data
|
||||||
|
data = data[:-1]
|
||||||
|
items = data.split('/')
|
||||||
|
|
||||||
|
# the headerblob is the encrypted information needed to build the entropy string
|
||||||
|
headerblob = items.pop(0)
|
||||||
|
encryptedValue = decode(headerblob, charMap1)
|
||||||
|
cleartext = UnprotectHeaderData(encryptedValue)
|
||||||
|
|
||||||
|
# now extract the pieces in the same way
|
||||||
|
# this version is different from K4PC it scales the build number by multipying by 735
|
||||||
|
pattern = re.compile(r'''\[Version:(\d+)\]\[Build:(\d+)\]\[Cksum:([^\]]+)\]\[Guid:([\{\}a-z0-9\-]+)\]''', re.IGNORECASE)
|
||||||
|
for m in re.finditer(pattern, cleartext):
|
||||||
|
entropy = str(int(m.group(2)) * 0x2df) + m.group(4)
|
||||||
|
|
||||||
|
cud = CryptUnprotectDataV3(entropy)
|
||||||
|
|
||||||
|
# loop through the item records until all are processed
|
||||||
|
while len(items) > 0:
|
||||||
|
|
||||||
|
# get the first item record
|
||||||
|
item = items.pop(0)
|
||||||
|
|
||||||
|
# the first 32 chars of the first record of a group
|
||||||
|
# is the MD5 hash of the key name encoded by charMap5
|
||||||
|
keyhash = item[0:32]
|
||||||
|
keyname = "unknown"
|
||||||
|
|
||||||
|
# unlike K4PC the keyhash is not used in generating entropy
|
||||||
|
# entropy = SHA1(keyhash) + added_entropy
|
||||||
|
# entropy = added_entropy
|
||||||
|
|
||||||
|
# the remainder of the first record when decoded with charMap5
|
||||||
|
# has the ':' split char followed by the string representation
|
||||||
|
# of the number of records that follow
|
||||||
|
# and make up the contents
|
||||||
|
srcnt = decode(item[34:],charMap5)
|
||||||
|
rcnt = int(srcnt)
|
||||||
|
|
||||||
|
# read and store in rcnt records of data
|
||||||
|
# that make up the contents value
|
||||||
|
edlst = []
|
||||||
|
for i in xrange(rcnt):
|
||||||
|
item = items.pop(0)
|
||||||
|
edlst.append(item)
|
||||||
|
|
||||||
|
keyname = "unknown"
|
||||||
|
for name in names:
|
||||||
|
if encodeHash(name,testMap8) == keyhash:
|
||||||
|
keyname = name
|
||||||
|
break
|
||||||
|
if keyname == "unknown":
|
||||||
|
keyname = keyhash
|
||||||
|
|
||||||
|
# the testMap8 encoded contents data has had a length
|
||||||
|
# of chars (always odd) cut off of the front and moved
|
||||||
|
# to the end to prevent decoding using testMap8 from
|
||||||
|
# working properly, and thereby preventing the ensuing
|
||||||
|
# CryptUnprotectData call from succeeding.
|
||||||
|
|
||||||
|
# The offset into the testMap8 encoded contents seems to be:
|
||||||
|
# len(contents) - largest prime number less than or equal to int(len(content)/3)
|
||||||
|
# (in other words split "about" 2/3rds of the way through)
|
||||||
|
|
||||||
|
# move first offsets chars to end to align for decode by testMap8
|
||||||
|
encdata = "".join(edlst)
|
||||||
|
contlen = len(encdata)
|
||||||
|
|
||||||
|
# now properly split and recombine
|
||||||
|
# by moving noffset chars from the start of the
|
||||||
|
# string to the end of the string
|
||||||
|
noffset = contlen - primes(int(contlen/3))[-1]
|
||||||
|
pfx = encdata[0:noffset]
|
||||||
|
encdata = encdata[noffset:]
|
||||||
|
encdata = encdata + pfx
|
||||||
|
|
||||||
|
# decode using testMap8 to get the CryptProtect Data
|
||||||
|
encryptedValue = decode(encdata,testMap8)
|
||||||
|
cleartext = cud.decrypt(encryptedValue)
|
||||||
|
# print keyname
|
||||||
|
# print cleartext
|
||||||
|
DB[keyname] = cleartext
|
||||||
|
cnt = cnt + 1
|
||||||
|
|
||||||
if cnt == 0:
|
if cnt == 0:
|
||||||
DB = None
|
DB = None
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
|
|
||||||
from __future__ import with_statement
|
from __future__ import with_statement
|
||||||
|
|
||||||
import sys, os
|
import sys, os, re
|
||||||
from struct import pack, unpack, unpack_from
|
from struct import pack, unpack, unpack_from
|
||||||
|
|
||||||
from ctypes import windll, c_char_p, c_wchar_p, c_uint, POINTER, byref, \
|
from ctypes import windll, c_char_p, c_wchar_p, c_uint, POINTER, byref, \
|
||||||
@@ -11,9 +11,7 @@ from ctypes import windll, c_char_p, c_wchar_p, c_uint, POINTER, byref, \
|
|||||||
string_at, Structure, c_void_p, cast
|
string_at, Structure, c_void_p, cast
|
||||||
|
|
||||||
import _winreg as winreg
|
import _winreg as winreg
|
||||||
|
|
||||||
MAX_PATH = 255
|
MAX_PATH = 255
|
||||||
|
|
||||||
kernel32 = windll.kernel32
|
kernel32 = windll.kernel32
|
||||||
advapi32 = windll.advapi32
|
advapi32 = windll.advapi32
|
||||||
crypt32 = windll.crypt32
|
crypt32 = windll.crypt32
|
||||||
@@ -33,6 +31,32 @@ def SHA1(message):
|
|||||||
ctx.update(message)
|
ctx.update(message)
|
||||||
return ctx.digest()
|
return ctx.digest()
|
||||||
|
|
||||||
|
def SHA256(message):
|
||||||
|
ctx = hashlib.sha256()
|
||||||
|
ctx.update(message)
|
||||||
|
return ctx.digest()
|
||||||
|
|
||||||
|
# For K4PC 1.9.X
|
||||||
|
# use routines in alfcrypto:
|
||||||
|
# AES_cbc_encrypt
|
||||||
|
# AES_set_decrypt_key
|
||||||
|
# PKCS5_PBKDF2_HMAC_SHA1
|
||||||
|
|
||||||
|
from alfcrypto import AES_CBC, KeyIVGen
|
||||||
|
|
||||||
|
def UnprotectHeaderData(encryptedData):
|
||||||
|
passwdData = 'header_key_data'
|
||||||
|
salt = 'HEADER.2011'
|
||||||
|
iter = 0x80
|
||||||
|
keylen = 0x100
|
||||||
|
key_iv = KeyIVGen().pbkdf2(passwdData, salt, iter, keylen)
|
||||||
|
key = key_iv[0:32]
|
||||||
|
iv = key_iv[32:48]
|
||||||
|
aes=AES_CBC()
|
||||||
|
aes.set_decrypt_key(key, iv)
|
||||||
|
cleartext = aes.decrypt(encryptedData)
|
||||||
|
return cleartext
|
||||||
|
|
||||||
|
|
||||||
# simple primes table (<= n) calculator
|
# simple primes table (<= n) calculator
|
||||||
def primes(n):
|
def primes(n):
|
||||||
@@ -59,6 +83,10 @@ def primes(n):
|
|||||||
# Probably supposed to act as obfuscation
|
# Probably supposed to act as obfuscation
|
||||||
charMap2 = "AaZzB0bYyCc1XxDdW2wEeVv3FfUuG4g-TtHh5SsIiR6rJjQq7KkPpL8lOoMm9Nn_"
|
charMap2 = "AaZzB0bYyCc1XxDdW2wEeVv3FfUuG4g-TtHh5SsIiR6rJjQq7KkPpL8lOoMm9Nn_"
|
||||||
charMap5 = "AzB0bYyCeVvaZ3FfUuG4g-TtHh5SsIiR6rJjQq7KkPpL8lOoMm9Nn_c1XxDdW2wE"
|
charMap5 = "AzB0bYyCeVvaZ3FfUuG4g-TtHh5SsIiR6rJjQq7KkPpL8lOoMm9Nn_c1XxDdW2wE"
|
||||||
|
# New maps in K4PC 1.9.0
|
||||||
|
testMap1 = "n5Pr6St7Uv8Wx9YzAb0Cd1Ef2Gh3Jk4M"
|
||||||
|
testMap6 = "9YzAb0Cd1Ef2n5Pr6St7Uvh3Jk4M8WxG"
|
||||||
|
testMap8 = "YvaZ3FfUm9Nn_c1XuG4yCAzB0beVg-TtHh5SsIiR6rJjQdW2wEq7KkPpL8lOoMxD"
|
||||||
|
|
||||||
class DrmException(Exception):
|
class DrmException(Exception):
|
||||||
pass
|
pass
|
||||||
@@ -122,6 +150,9 @@ def GetVolumeSerialNumber():
|
|||||||
return GetVolumeSerialNumber
|
return GetVolumeSerialNumber
|
||||||
GetVolumeSerialNumber = GetVolumeSerialNumber()
|
GetVolumeSerialNumber = GetVolumeSerialNumber()
|
||||||
|
|
||||||
|
def GetIDString():
|
||||||
|
return GetVolumeSerialNumber()
|
||||||
|
|
||||||
def getLastError():
|
def getLastError():
|
||||||
GetLastError = kernel32.GetLastError
|
GetLastError = kernel32.GetLastError
|
||||||
GetLastError.argtypes = None
|
GetLastError.argtypes = None
|
||||||
@@ -162,7 +193,8 @@ def CryptUnprotectData():
|
|||||||
outdata = DataBlob()
|
outdata = DataBlob()
|
||||||
if not _CryptUnprotectData(byref(indata), None, byref(entropy),
|
if not _CryptUnprotectData(byref(indata), None, byref(entropy),
|
||||||
None, None, flags, byref(outdata)):
|
None, None, flags, byref(outdata)):
|
||||||
raise DrmException("Failed to Unprotect Data")
|
# raise DrmException("Failed to Unprotect Data")
|
||||||
|
return 'failed'
|
||||||
return string_at(outdata.pbData, outdata.cbData)
|
return string_at(outdata.pbData, outdata.cbData)
|
||||||
return CryptUnprotectData
|
return CryptUnprotectData
|
||||||
CryptUnprotectData = CryptUnprotectData()
|
CryptUnprotectData = CryptUnprotectData()
|
||||||
@@ -173,6 +205,13 @@ def getKindleInfoFiles(kInfoFiles):
|
|||||||
regkey = winreg.OpenKey(winreg.HKEY_CURRENT_USER, "Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Shell Folders\\")
|
regkey = winreg.OpenKey(winreg.HKEY_CURRENT_USER, "Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Shell Folders\\")
|
||||||
path = winreg.QueryValueEx(regkey, 'Local AppData')[0]
|
path = winreg.QueryValueEx(regkey, 'Local AppData')[0]
|
||||||
|
|
||||||
|
# some 64 bit machines do not have the proper registry key for some reason
|
||||||
|
# or the pythonn interface to the 32 vs 64 bit registry is broken
|
||||||
|
if 'LOCALAPPDATA' in os.environ.keys():
|
||||||
|
path = os.environ['LOCALAPPDATA']
|
||||||
|
|
||||||
|
print "searching for kinfoFiles in ", path
|
||||||
|
|
||||||
# first look for older kindle-info files
|
# first look for older kindle-info files
|
||||||
kinfopath = path +'\\Amazon\\Kindle For PC\\{AMAwzsaPaaZAzmZzZQzgZCAkZ3AjA_AY}\\kindle.info'
|
kinfopath = path +'\\Amazon\\Kindle For PC\\{AMAwzsaPaaZAzmZzZQzgZCAkZ3AjA_AY}\\kindle.info'
|
||||||
if not os.path.isfile(kinfopath):
|
if not os.path.isfile(kinfopath):
|
||||||
@@ -181,18 +220,34 @@ def getKindleInfoFiles(kInfoFiles):
|
|||||||
kInfoFiles.append(kinfopath)
|
kInfoFiles.append(kinfopath)
|
||||||
|
|
||||||
# now look for newer (K4PC 1.5.0 and later rainier.2.1.1.kinf file
|
# now look for newer (K4PC 1.5.0 and later rainier.2.1.1.kinf file
|
||||||
|
|
||||||
kinfopath = path +'\\Amazon\\Kindle For PC\\storage\\rainier.2.1.1.kinf'
|
kinfopath = path +'\\Amazon\\Kindle For PC\\storage\\rainier.2.1.1.kinf'
|
||||||
if not os.path.isfile(kinfopath):
|
if not os.path.isfile(kinfopath):
|
||||||
print('No .kinf files have not been found.')
|
print('No K4PC 1.5.X .kinf files have not been found.')
|
||||||
else:
|
else:
|
||||||
kInfoFiles.append(kinfopath)
|
kInfoFiles.append(kinfopath)
|
||||||
|
|
||||||
|
# now look for even newer (K4PC 1.6.0 and later) rainier.2.1.1.kinf file
|
||||||
|
kinfopath = path +'\\Amazon\\Kindle\\storage\\rainier.2.1.1.kinf'
|
||||||
|
if not os.path.isfile(kinfopath):
|
||||||
|
print('No K4PC 1.6.X .kinf files have not been found.')
|
||||||
|
else:
|
||||||
|
kInfoFiles.append(kinfopath)
|
||||||
|
|
||||||
|
# now look for even newer (K4PC 1.9.0 and later) .kinf2011 file
|
||||||
|
kinfopath = path +'\\Amazon\\Kindle\\storage\\.kinf2011'
|
||||||
|
if not os.path.isfile(kinfopath):
|
||||||
|
print('No K4PC 1.9.X .kinf files have not been found.')
|
||||||
|
else:
|
||||||
|
kInfoFiles.append(kinfopath)
|
||||||
|
|
||||||
return kInfoFiles
|
return kInfoFiles
|
||||||
|
|
||||||
|
|
||||||
# determine type of kindle info provided and return a
|
# determine type of kindle info provided and return a
|
||||||
# database of keynames and values
|
# database of keynames and values
|
||||||
def getDBfromFile(kInfoFile):
|
def getDBfromFile(kInfoFile):
|
||||||
names = ["kindle.account.tokens","kindle.cookie.item","eulaVersionAccepted","login_date","kindle.token.item","login","kindle.key.item","kindle.name.info","kindle.device.info", "MazamaRandomNumber"]
|
names = ["kindle.account.tokens","kindle.cookie.item","eulaVersionAccepted","login_date","kindle.token.item","login","kindle.key.item","kindle.name.info","kindle.device.info", "MazamaRandomNumber", "max_date", "SIGVERIF"]
|
||||||
DB = {}
|
DB = {}
|
||||||
cnt = 0
|
cnt = 0
|
||||||
infoReader = open(kInfoFile, 'r')
|
infoReader = open(kInfoFile, 'r')
|
||||||
@@ -220,7 +275,8 @@ def getDBfromFile(kInfoFile):
|
|||||||
DB = None
|
DB = None
|
||||||
return DB
|
return DB
|
||||||
|
|
||||||
# else newer style .kinf file
|
if hdr == '/':
|
||||||
|
# else rainier-2-1-1 .kinf file
|
||||||
# the .kinf file uses "/" to separate it into records
|
# the .kinf file uses "/" to separate it into records
|
||||||
# so remove the trailing "/" to make it easy to use split
|
# so remove the trailing "/" to make it easy to use split
|
||||||
data = data[:-1]
|
data = data[:-1]
|
||||||
@@ -236,7 +292,7 @@ def getDBfromFile(kInfoFile):
|
|||||||
# is the MD5 hash of the key name encoded by charMap5
|
# is the MD5 hash of the key name encoded by charMap5
|
||||||
keyhash = item[0:32]
|
keyhash = item[0:32]
|
||||||
|
|
||||||
# the raw keyhash string is also used to create entropy for the actual
|
# the raw keyhash string is used to create entropy for the actual
|
||||||
# CryptProtectData Blob that represents that keys contents
|
# CryptProtectData Blob that represents that keys contents
|
||||||
entropy = SHA1(keyhash)
|
entropy = SHA1(keyhash)
|
||||||
|
|
||||||
@@ -261,7 +317,6 @@ def getDBfromFile(kInfoFile):
|
|||||||
break
|
break
|
||||||
if keyname == "unknown":
|
if keyname == "unknown":
|
||||||
keyname = keyhash
|
keyname = keyhash
|
||||||
|
|
||||||
# the charMap5 encoded contents data has had a length
|
# the charMap5 encoded contents data has had a length
|
||||||
# of chars (always odd) cut off of the front and moved
|
# of chars (always odd) cut off of the front and moved
|
||||||
# to the end to prevent decoding using charMap5 from
|
# to the end to prevent decoding using charMap5 from
|
||||||
@@ -269,7 +324,7 @@ def getDBfromFile(kInfoFile):
|
|||||||
# CryptUnprotectData call from succeeding.
|
# CryptUnprotectData call from succeeding.
|
||||||
|
|
||||||
# The offset into the charMap5 encoded contents seems to be:
|
# The offset into the charMap5 encoded contents seems to be:
|
||||||
# len(contents) - largest prime number less than or equal to int(len(content)/3)
|
# len(contents)-largest prime number <= int(len(content)/3)
|
||||||
# (in other words split "about" 2/3rds of the way through)
|
# (in other words split "about" 2/3rds of the way through)
|
||||||
|
|
||||||
# move first offsets chars to end to align for decode by charMap5
|
# move first offsets chars to end to align for decode by charMap5
|
||||||
@@ -293,4 +348,85 @@ def getDBfromFile(kInfoFile):
|
|||||||
DB = None
|
DB = None
|
||||||
return DB
|
return DB
|
||||||
|
|
||||||
|
# else newest .kinf2011 style .kinf file
|
||||||
|
# the .kinf file uses "/" to separate it into records
|
||||||
|
# so remove the trailing "/" to make it easy to use split
|
||||||
|
# need to put back the first char read because it it part
|
||||||
|
# of the added entropy blob
|
||||||
|
data = hdr + data[:-1]
|
||||||
|
items = data.split('/')
|
||||||
|
|
||||||
|
# starts with and encoded and encrypted header blob
|
||||||
|
headerblob = items.pop(0)
|
||||||
|
encryptedValue = decode(headerblob, testMap1)
|
||||||
|
cleartext = UnprotectHeaderData(encryptedValue)
|
||||||
|
# now extract the pieces that form the added entropy
|
||||||
|
pattern = re.compile(r'''\[Version:(\d+)\]\[Build:(\d+)\]\[Cksum:([^\]]+)\]\[Guid:([\{\}a-z0-9\-]+)\]''', re.IGNORECASE)
|
||||||
|
for m in re.finditer(pattern, cleartext):
|
||||||
|
added_entropy = m.group(2) + m.group(4)
|
||||||
|
|
||||||
|
|
||||||
|
# loop through the item records until all are processed
|
||||||
|
while len(items) > 0:
|
||||||
|
|
||||||
|
# get the first item record
|
||||||
|
item = items.pop(0)
|
||||||
|
|
||||||
|
# the first 32 chars of the first record of a group
|
||||||
|
# is the MD5 hash of the key name encoded by charMap5
|
||||||
|
keyhash = item[0:32]
|
||||||
|
|
||||||
|
# the sha1 of raw keyhash string is used to create entropy along
|
||||||
|
# with the added entropy provided above from the headerblob
|
||||||
|
entropy = SHA1(keyhash) + added_entropy
|
||||||
|
|
||||||
|
# the remainder of the first record when decoded with charMap5
|
||||||
|
# has the ':' split char followed by the string representation
|
||||||
|
# of the number of records that follow
|
||||||
|
# and make up the contents
|
||||||
|
srcnt = decode(item[34:],charMap5)
|
||||||
|
rcnt = int(srcnt)
|
||||||
|
|
||||||
|
# read and store in rcnt records of data
|
||||||
|
# that make up the contents value
|
||||||
|
edlst = []
|
||||||
|
for i in xrange(rcnt):
|
||||||
|
item = items.pop(0)
|
||||||
|
edlst.append(item)
|
||||||
|
|
||||||
|
# key names now use the new testMap8 encoding
|
||||||
|
keyname = "unknown"
|
||||||
|
for name in names:
|
||||||
|
if encodeHash(name,testMap8) == keyhash:
|
||||||
|
keyname = name
|
||||||
|
break
|
||||||
|
|
||||||
|
# the testMap8 encoded contents data has had a length
|
||||||
|
# of chars (always odd) cut off of the front and moved
|
||||||
|
# to the end to prevent decoding using testMap8 from
|
||||||
|
# working properly, and thereby preventing the ensuing
|
||||||
|
# CryptUnprotectData call from succeeding.
|
||||||
|
|
||||||
|
# The offset into the testMap8 encoded contents seems to be:
|
||||||
|
# len(contents)-largest prime number <= int(len(content)/3)
|
||||||
|
# (in other words split "about" 2/3rds of the way through)
|
||||||
|
|
||||||
|
# move first offsets chars to end to align for decode by testMap8
|
||||||
|
# by moving noffset chars from the start of the
|
||||||
|
# string to the end of the string
|
||||||
|
encdata = "".join(edlst)
|
||||||
|
contlen = len(encdata)
|
||||||
|
noffset = contlen - primes(int(contlen/3))[-1]
|
||||||
|
pfx = encdata[0:noffset]
|
||||||
|
encdata = encdata[noffset:]
|
||||||
|
encdata = encdata + pfx
|
||||||
|
|
||||||
|
# decode using new testMap8 to get the original CryptProtect Data
|
||||||
|
encryptedValue = decode(encdata,testMap8)
|
||||||
|
cleartext = CryptUnprotectData(encryptedValue, entropy, 1)
|
||||||
|
DB[keyname] = cleartext
|
||||||
|
cnt = cnt + 1
|
||||||
|
|
||||||
|
if cnt == 0:
|
||||||
|
DB = None
|
||||||
|
return DB
|
||||||
|
|||||||
@@ -22,16 +22,16 @@ else:
|
|||||||
|
|
||||||
if inCalibre:
|
if inCalibre:
|
||||||
if sys.platform.startswith('win'):
|
if sys.platform.startswith('win'):
|
||||||
from calibre_plugins.k4mobidedrm.k4pcutils import getKindleInfoFiles, getDBfromFile, GetUserName, GetVolumeSerialNumber
|
from calibre_plugins.k4mobidedrm.k4pcutils import getKindleInfoFiles, getDBfromFile, GetUserName, GetIDString
|
||||||
|
|
||||||
if sys.platform.startswith('darwin'):
|
if sys.platform.startswith('darwin'):
|
||||||
from calibre_plugins.k4mobidedrm.k4mutils import getKindleInfoFiles, getDBfromFile, GetUserName, GetVolumeSerialNumber
|
from calibre_plugins.k4mobidedrm.k4mutils import getKindleInfoFiles, getDBfromFile, GetUserName, GetIDString
|
||||||
else:
|
else:
|
||||||
if sys.platform.startswith('win'):
|
if sys.platform.startswith('win'):
|
||||||
from k4pcutils import getKindleInfoFiles, getDBfromFile, GetUserName, GetVolumeSerialNumber
|
from k4pcutils import getKindleInfoFiles, getDBfromFile, GetUserName, GetIDString
|
||||||
|
|
||||||
if sys.platform.startswith('darwin'):
|
if sys.platform.startswith('darwin'):
|
||||||
from k4mutils import getKindleInfoFiles, getDBfromFile, GetUserName, GetVolumeSerialNumber
|
from k4mutils import getKindleInfoFiles, getDBfromFile, GetUserName, GetIDString
|
||||||
|
|
||||||
|
|
||||||
charMap1 = "n5Pr6St7Uv8Wx9YzAb0Cd1Ef2Gh3Jk4M"
|
charMap1 = "n5Pr6St7Uv8Wx9YzAb0Cd1Ef2Gh3Jk4M"
|
||||||
@@ -218,14 +218,14 @@ def getK4Pids(pidlst, rec209, token, kInfoFile):
|
|||||||
print "Keys not found in " + kInfoFile
|
print "Keys not found in " + kInfoFile
|
||||||
return pidlst
|
return pidlst
|
||||||
|
|
||||||
# Get the HDD serial
|
# Get the ID string used
|
||||||
encodedSystemVolumeSerialNumber = encodeHash(GetVolumeSerialNumber(),charMap1)
|
encodedIDString = encodeHash(GetIDString(),charMap1)
|
||||||
|
|
||||||
# Get the current user name
|
# Get the current user name
|
||||||
encodedUsername = encodeHash(GetUserName(),charMap1)
|
encodedUsername = encodeHash(GetUserName(),charMap1)
|
||||||
|
|
||||||
# concat, hash and encode to calculate the DSN
|
# concat, hash and encode to calculate the DSN
|
||||||
DSN = encode(SHA1(MazamaRandomNumber+encodedSystemVolumeSerialNumber+encodedUsername),charMap1)
|
DSN = encode(SHA1(MazamaRandomNumber+encodedIDString+encodedUsername),charMap1)
|
||||||
|
|
||||||
# Compute the device PID (for which I can tell, is used for nothing).
|
# Compute the device PID (for which I can tell, is used for nothing).
|
||||||
table = generatePidEncryptionTable()
|
table = generatePidEncryptionTable()
|
||||||
|
|||||||
@@ -49,11 +49,16 @@
|
|||||||
# 0.27 - Correct pid metadata token generation to match that used by skindle (Thank You Bart!)
|
# 0.27 - Correct pid metadata token generation to match that used by skindle (Thank You Bart!)
|
||||||
# 0.28 - slight additional changes to metadata token generation (None -> '')
|
# 0.28 - slight additional changes to metadata token generation (None -> '')
|
||||||
# 0.29 - It seems that the ideas about when multibyte trailing characters were
|
# 0.29 - It seems that the ideas about when multibyte trailing characters were
|
||||||
# included in the encryption were wrong. They aren't for DOC compressed
|
# included in the encryption were wrong. They are for DOC compressed
|
||||||
# files, but they are for HUFF/CDIC compress files!
|
# files, but they are not for HUFF/CDIC compress files!
|
||||||
# 0.30 - Modified interface slightly to work better with new calibre plugin style
|
# 0.30 - Modified interface slightly to work better with new calibre plugin style
|
||||||
|
# 0.31 - The multibyte encrytion info is true for version 7 files too.
|
||||||
|
# 0.32 - Added support for "Print Replica" Kindle ebooks
|
||||||
|
# 0.33 - Performance improvements for large files (concatenation)
|
||||||
|
# 0.34 - Performance improvements in decryption (libalfcrypto)
|
||||||
|
# 0.35 - add interface to get mobi_version
|
||||||
|
|
||||||
__version__ = '0.30'
|
__version__ = '0.35'
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
@@ -70,6 +75,7 @@ sys.stdout=Unbuffered(sys.stdout)
|
|||||||
import os
|
import os
|
||||||
import struct
|
import struct
|
||||||
import binascii
|
import binascii
|
||||||
|
from alfcrypto import Pukall_Cipher
|
||||||
|
|
||||||
class DrmException(Exception):
|
class DrmException(Exception):
|
||||||
pass
|
pass
|
||||||
@@ -81,36 +87,37 @@ class DrmException(Exception):
|
|||||||
|
|
||||||
# Implementation of Pukall Cipher 1
|
# Implementation of Pukall Cipher 1
|
||||||
def PC1(key, src, decryption=True):
|
def PC1(key, src, decryption=True):
|
||||||
sum1 = 0;
|
return Pukall_Cipher().PC1(key,src,decryption)
|
||||||
sum2 = 0;
|
# sum1 = 0;
|
||||||
keyXorVal = 0;
|
# sum2 = 0;
|
||||||
if len(key)!=16:
|
# keyXorVal = 0;
|
||||||
print "Bad key length!"
|
# if len(key)!=16:
|
||||||
return None
|
# print "Bad key length!"
|
||||||
wkey = []
|
# return None
|
||||||
for i in xrange(8):
|
# wkey = []
|
||||||
wkey.append(ord(key[i*2])<<8 | ord(key[i*2+1]))
|
# for i in xrange(8):
|
||||||
dst = ""
|
# wkey.append(ord(key[i*2])<<8 | ord(key[i*2+1]))
|
||||||
for i in xrange(len(src)):
|
# dst = ""
|
||||||
temp1 = 0;
|
# for i in xrange(len(src)):
|
||||||
byteXorVal = 0;
|
# temp1 = 0;
|
||||||
for j in xrange(8):
|
# byteXorVal = 0;
|
||||||
temp1 ^= wkey[j]
|
# for j in xrange(8):
|
||||||
sum2 = (sum2+j)*20021 + sum1
|
# temp1 ^= wkey[j]
|
||||||
sum1 = (temp1*346)&0xFFFF
|
# sum2 = (sum2+j)*20021 + sum1
|
||||||
sum2 = (sum2+sum1)&0xFFFF
|
# sum1 = (temp1*346)&0xFFFF
|
||||||
temp1 = (temp1*20021+1)&0xFFFF
|
# sum2 = (sum2+sum1)&0xFFFF
|
||||||
byteXorVal ^= temp1 ^ sum2
|
# temp1 = (temp1*20021+1)&0xFFFF
|
||||||
curByte = ord(src[i])
|
# byteXorVal ^= temp1 ^ sum2
|
||||||
if not decryption:
|
# curByte = ord(src[i])
|
||||||
keyXorVal = curByte * 257;
|
# if not decryption:
|
||||||
curByte = ((curByte ^ (byteXorVal >> 8)) ^ byteXorVal) & 0xFF
|
# keyXorVal = curByte * 257;
|
||||||
if decryption:
|
# curByte = ((curByte ^ (byteXorVal >> 8)) ^ byteXorVal) & 0xFF
|
||||||
keyXorVal = curByte * 257;
|
# if decryption:
|
||||||
for j in xrange(8):
|
# keyXorVal = curByte * 257;
|
||||||
wkey[j] ^= keyXorVal;
|
# for j in xrange(8):
|
||||||
dst+=chr(curByte)
|
# wkey[j] ^= keyXorVal;
|
||||||
return dst
|
# dst+=chr(curByte)
|
||||||
|
# return dst
|
||||||
|
|
||||||
def checksumPid(s):
|
def checksumPid(s):
|
||||||
letters = "ABCDEFGHIJKLMNPQRSTUVWXYZ123456789"
|
letters = "ABCDEFGHIJKLMNPQRSTUVWXYZ123456789"
|
||||||
@@ -162,6 +169,9 @@ class MobiBook:
|
|||||||
return self.data_file[off:endoff]
|
return self.data_file[off:endoff]
|
||||||
|
|
||||||
def __init__(self, infile):
|
def __init__(self, infile):
|
||||||
|
print ('MobiDeDrm v%(__version__)s. '
|
||||||
|
'Copyright 2008-2011 The Dark Reverser et al.' % globals())
|
||||||
|
|
||||||
# initial sanity check on file
|
# initial sanity check on file
|
||||||
self.data_file = file(infile, 'rb').read()
|
self.data_file = file(infile, 'rb').read()
|
||||||
self.mobi_data = ''
|
self.mobi_data = ''
|
||||||
@@ -192,14 +202,15 @@ class MobiBook:
|
|||||||
self.meta_array = {}
|
self.meta_array = {}
|
||||||
return
|
return
|
||||||
self.mobi_length, = struct.unpack('>L',self.sect[0x14:0x18])
|
self.mobi_length, = struct.unpack('>L',self.sect[0x14:0x18])
|
||||||
|
self.mobi_codepage, = struct.unpack('>L',self.sect[0x1c:0x20])
|
||||||
self.mobi_version, = struct.unpack('>L',self.sect[0x68:0x6C])
|
self.mobi_version, = struct.unpack('>L',self.sect[0x68:0x6C])
|
||||||
print "MOBI header version = %d, length = %d" %(self.mobi_version, self.mobi_length)
|
print "MOBI header version = %d, length = %d" %(self.mobi_version, self.mobi_length)
|
||||||
self.extra_data_flags = 0
|
self.extra_data_flags = 0
|
||||||
if (self.mobi_length >= 0xE4) and (self.mobi_version >= 5):
|
if (self.mobi_length >= 0xE4) and (self.mobi_version >= 5):
|
||||||
self.extra_data_flags, = struct.unpack('>H', self.sect[0xF2:0xF4])
|
self.extra_data_flags, = struct.unpack('>H', self.sect[0xF2:0xF4])
|
||||||
print "Extra Data Flags = %d" % self.extra_data_flags
|
print "Extra Data Flags = %d" % self.extra_data_flags
|
||||||
if (self.mobi_version < 7) and (self.compression != 17480):
|
if (self.compression != 17480):
|
||||||
# multibyte utf8 data is included in the encryption for mobi_version 6 and below
|
# multibyte utf8 data is included in the encryption for PalmDoc compression
|
||||||
# so clear that byte so that we leave it to be decrypted.
|
# so clear that byte so that we leave it to be decrypted.
|
||||||
self.extra_data_flags &= 0xFFFE
|
self.extra_data_flags &= 0xFFFE
|
||||||
|
|
||||||
@@ -229,8 +240,13 @@ class MobiBook:
|
|||||||
except:
|
except:
|
||||||
self.meta_array = {}
|
self.meta_array = {}
|
||||||
pass
|
pass
|
||||||
|
self.print_replica = False
|
||||||
|
|
||||||
def getBookTitle(self):
|
def getBookTitle(self):
|
||||||
|
codec_map = {
|
||||||
|
1252 : 'windows-1252',
|
||||||
|
65001 : 'utf-8',
|
||||||
|
}
|
||||||
title = ''
|
title = ''
|
||||||
if 503 in self.meta_array:
|
if 503 in self.meta_array:
|
||||||
title = self.meta_array[503]
|
title = self.meta_array[503]
|
||||||
@@ -241,7 +257,10 @@ class MobiBook:
|
|||||||
if title == '':
|
if title == '':
|
||||||
title = self.header[:32]
|
title = self.header[:32]
|
||||||
title = title.split("\0")[0]
|
title = title.split("\0")[0]
|
||||||
return title
|
codec = 'windows-1252'
|
||||||
|
if self.mobi_codepage in codec_map.keys():
|
||||||
|
codec = codec_map[self.mobi_codepage]
|
||||||
|
return unicode(title, codec).encode('utf-8')
|
||||||
|
|
||||||
def getPIDMetaInfo(self):
|
def getPIDMetaInfo(self):
|
||||||
rec209 = ''
|
rec209 = ''
|
||||||
@@ -306,16 +325,29 @@ class MobiBook:
|
|||||||
def getMobiFile(self, outpath):
|
def getMobiFile(self, outpath):
|
||||||
file(outpath,'wb').write(self.mobi_data)
|
file(outpath,'wb').write(self.mobi_data)
|
||||||
|
|
||||||
|
def getMobiVersion(self):
|
||||||
|
return self.mobi_version
|
||||||
|
|
||||||
|
def getPrintReplica(self):
|
||||||
|
return self.print_replica
|
||||||
|
|
||||||
def processBook(self, pidlist):
|
def processBook(self, pidlist):
|
||||||
crypto_type, = struct.unpack('>H', self.sect[0xC:0xC+2])
|
crypto_type, = struct.unpack('>H', self.sect[0xC:0xC+2])
|
||||||
print 'Crypto Type is: ', crypto_type
|
print 'Crypto Type is: ', crypto_type
|
||||||
self.crypto_type = crypto_type
|
self.crypto_type = crypto_type
|
||||||
if crypto_type == 0:
|
if crypto_type == 0:
|
||||||
print "This book is not encrypted."
|
print "This book is not encrypted."
|
||||||
|
# we must still check for Print Replica
|
||||||
|
self.print_replica = (self.loadSection(1)[0:4] == '%MOP')
|
||||||
self.mobi_data = self.data_file
|
self.mobi_data = self.data_file
|
||||||
return
|
return
|
||||||
if crypto_type != 2 and crypto_type != 1:
|
if crypto_type != 2 and crypto_type != 1:
|
||||||
raise DrmException("Cannot decode unknown Mobipocket encryption type %d" % crypto_type)
|
raise DrmException("Cannot decode unknown Mobipocket encryption type %d" % crypto_type)
|
||||||
|
if 406 in self.meta_array:
|
||||||
|
data406 = self.meta_array[406]
|
||||||
|
val406, = struct.unpack('>Q',data406)
|
||||||
|
if val406 != 0:
|
||||||
|
raise DrmException("Cannot decode library or rented ebooks.")
|
||||||
|
|
||||||
goodpids = []
|
goodpids = []
|
||||||
for pid in pidlist:
|
for pid in pidlist:
|
||||||
@@ -343,7 +375,7 @@ class MobiBook:
|
|||||||
raise DrmException("Not yet initialised with PID. Must be opened with Mobipocket Reader first.")
|
raise DrmException("Not yet initialised with PID. Must be opened with Mobipocket Reader first.")
|
||||||
found_key, pid = self.parseDRM(self.sect[drm_ptr:drm_ptr+drm_size], drm_count, goodpids)
|
found_key, pid = self.parseDRM(self.sect[drm_ptr:drm_ptr+drm_size], drm_count, goodpids)
|
||||||
if not found_key:
|
if not found_key:
|
||||||
raise DrmException("No key found. Most likely the correct PID has not been given.")
|
raise DrmException("No key found. Please report this failure for help.")
|
||||||
# kill the drm keys
|
# kill the drm keys
|
||||||
self.patchSection(0, "\0" * drm_size, drm_ptr)
|
self.patchSection(0, "\0" * drm_size, drm_ptr)
|
||||||
# kill the drm pointers
|
# kill the drm pointers
|
||||||
@@ -359,18 +391,23 @@ class MobiBook:
|
|||||||
|
|
||||||
# decrypt sections
|
# decrypt sections
|
||||||
print "Decrypting. Please wait . . .",
|
print "Decrypting. Please wait . . .",
|
||||||
self.mobi_data = self.data_file[:self.sections[1][0]]
|
mobidataList = []
|
||||||
|
mobidataList.append(self.data_file[:self.sections[1][0]])
|
||||||
for i in xrange(1, self.records+1):
|
for i in xrange(1, self.records+1):
|
||||||
data = self.loadSection(i)
|
data = self.loadSection(i)
|
||||||
extra_size = getSizeOfTrailingDataEntries(data, len(data), self.extra_data_flags)
|
extra_size = getSizeOfTrailingDataEntries(data, len(data), self.extra_data_flags)
|
||||||
if i%100 == 0:
|
if i%100 == 0:
|
||||||
print ".",
|
print ".",
|
||||||
# print "record %d, extra_size %d" %(i,extra_size)
|
# print "record %d, extra_size %d" %(i,extra_size)
|
||||||
self.mobi_data += PC1(found_key, data[0:len(data) - extra_size])
|
decoded_data = PC1(found_key, data[0:len(data) - extra_size])
|
||||||
|
if i==1:
|
||||||
|
self.print_replica = (decoded_data[0:4] == '%MOP')
|
||||||
|
mobidataList.append(decoded_data)
|
||||||
if extra_size > 0:
|
if extra_size > 0:
|
||||||
self.mobi_data += data[-extra_size:]
|
mobidataList.append(data[-extra_size:])
|
||||||
if self.num_sections > self.records+1:
|
if self.num_sections > self.records+1:
|
||||||
self.mobi_data += self.data_file[self.sections[self.records+1][0]:]
|
mobidataList.append(self.data_file[self.sections[self.records+1][0]:])
|
||||||
|
self.mobi_data = "".join(mobidataList)
|
||||||
print "done"
|
print "done"
|
||||||
return
|
return
|
||||||
|
|
||||||
@@ -391,9 +428,9 @@ def getUnencryptedBookWithList(infile,pidlist):
|
|||||||
|
|
||||||
def main(argv=sys.argv):
|
def main(argv=sys.argv):
|
||||||
print ('MobiDeDrm v%(__version__)s. '
|
print ('MobiDeDrm v%(__version__)s. '
|
||||||
'Copyright 2008-2010 The Dark Reverser.' % globals())
|
'Copyright 2008-2011 The Dark Reverser et al.' % globals())
|
||||||
if len(argv)<3 or len(argv)>4:
|
if len(argv)<3 or len(argv)>4:
|
||||||
print "Removes protection from Mobipocket books"
|
print "Removes protection from Kindle/Mobipocket and Kindle/Print Replica ebooks"
|
||||||
print "Usage:"
|
print "Usage:"
|
||||||
print " %s <infile> <outfile> [<Comma separated list of PIDs to try>]" % sys.argv[0]
|
print " %s <infile> <outfile> [<Comma separated list of PIDs to try>]" % sys.argv[0]
|
||||||
return 1
|
return 1
|
||||||
|
|||||||
@@ -87,4 +87,3 @@ def load_libcrypto():
|
|||||||
return ''.join(result)
|
return ''.join(result)
|
||||||
|
|
||||||
return DES
|
return DES
|
||||||
|
|
||||||
|
|||||||
@@ -0,0 +1,68 @@
|
|||||||
|
# A simple implementation of pbkdf2 using stock python modules. See RFC2898
|
||||||
|
# for details. Basically, it derives a key from a password and salt.
|
||||||
|
|
||||||
|
# Copyright 2004 Matt Johnston <matt @ ucc asn au>
|
||||||
|
# Copyright 2009 Daniel Holth <dholth@fastmail.fm>
|
||||||
|
# This code may be freely used and modified for any purpose.
|
||||||
|
|
||||||
|
# Revision history
|
||||||
|
# v0.1 October 2004 - Initial release
|
||||||
|
# v0.2 8 March 2007 - Make usable with hashlib in Python 2.5 and use
|
||||||
|
# v0.3 "" the correct digest_size rather than always 20
|
||||||
|
# v0.4 Oct 2009 - Rescue from chandler svn, test and optimize.
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import hmac
|
||||||
|
from struct import pack
|
||||||
|
try:
|
||||||
|
# only in python 2.5
|
||||||
|
import hashlib
|
||||||
|
sha = hashlib.sha1
|
||||||
|
md5 = hashlib.md5
|
||||||
|
sha256 = hashlib.sha256
|
||||||
|
except ImportError: # pragma: NO COVERAGE
|
||||||
|
# fallback
|
||||||
|
import sha
|
||||||
|
import md5
|
||||||
|
|
||||||
|
# this is what you want to call.
|
||||||
|
def pbkdf2( password, salt, itercount, keylen, hashfn = sha ):
|
||||||
|
try:
|
||||||
|
# depending whether the hashfn is from hashlib or sha/md5
|
||||||
|
digest_size = hashfn().digest_size
|
||||||
|
except TypeError: # pragma: NO COVERAGE
|
||||||
|
digest_size = hashfn.digest_size
|
||||||
|
# l - number of output blocks to produce
|
||||||
|
l = keylen / digest_size
|
||||||
|
if keylen % digest_size != 0:
|
||||||
|
l += 1
|
||||||
|
|
||||||
|
h = hmac.new( password, None, hashfn )
|
||||||
|
|
||||||
|
T = ""
|
||||||
|
for i in range(1, l+1):
|
||||||
|
T += pbkdf2_F( h, salt, itercount, i )
|
||||||
|
|
||||||
|
return T[0: keylen]
|
||||||
|
|
||||||
|
def xorstr( a, b ):
|
||||||
|
if len(a) != len(b):
|
||||||
|
raise ValueError("xorstr(): lengths differ")
|
||||||
|
return ''.join((chr(ord(x)^ord(y)) for x, y in zip(a, b)))
|
||||||
|
|
||||||
|
def prf( h, data ):
|
||||||
|
hm = h.copy()
|
||||||
|
hm.update( data )
|
||||||
|
return hm.digest()
|
||||||
|
|
||||||
|
# Helper as per the spec. h is a hmac which has been created seeded with the
|
||||||
|
# password, it will be copy()ed and not modified.
|
||||||
|
def pbkdf2_F( h, salt, itercount, blocknum ):
|
||||||
|
U = prf( h, salt + pack('>i',blocknum ) )
|
||||||
|
T = U
|
||||||
|
|
||||||
|
for i in range(2, itercount+1):
|
||||||
|
U = prf( h, U )
|
||||||
|
T = xorstr( T, U )
|
||||||
|
|
||||||
|
return T
|
||||||
@@ -28,4 +28,3 @@ def load_pycrypto():
|
|||||||
i += 8
|
i += 8
|
||||||
return ''.join(result)
|
return ''.join(result)
|
||||||
return DES
|
return DES
|
||||||
|
|
||||||
|
|||||||
@@ -75,4 +75,3 @@ class SimplePrefs(object):
|
|||||||
pass
|
pass
|
||||||
self.prefs = newprefs
|
self.prefs = newprefs
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ import csv
|
|||||||
import sys
|
import sys
|
||||||
import os
|
import os
|
||||||
import getopt
|
import getopt
|
||||||
|
import re
|
||||||
from struct import pack
|
from struct import pack
|
||||||
from struct import unpack
|
from struct import unpack
|
||||||
|
|
||||||
@@ -81,6 +82,21 @@ class DocParser(object):
|
|||||||
pos = foundpos + 1
|
pos = foundpos + 1
|
||||||
return startpos
|
return startpos
|
||||||
|
|
||||||
|
# returns a vector of integers for the tagpath
|
||||||
|
def getData(self, tagpath, pos, end, clean=False):
|
||||||
|
if clean:
|
||||||
|
digits_only = re.compile(r'''([0-9]+)''')
|
||||||
|
argres=[]
|
||||||
|
(foundat, argt) = self.findinDoc(tagpath, pos, end)
|
||||||
|
if (argt != None) and (len(argt) > 0) :
|
||||||
|
argList = argt.split('|')
|
||||||
|
for strval in argList:
|
||||||
|
if clean:
|
||||||
|
m = re.search(digits_only, strval)
|
||||||
|
if m != None:
|
||||||
|
strval = m.group()
|
||||||
|
argres.append(int(strval))
|
||||||
|
return argres
|
||||||
|
|
||||||
def process(self):
|
def process(self):
|
||||||
|
|
||||||
@@ -237,7 +253,11 @@ def convert2CSS(flatxml, fontsize, ph, pw):
|
|||||||
|
|
||||||
# create a document parser
|
# create a document parser
|
||||||
dp = DocParser(flatxml, fontsize, ph, pw)
|
dp = DocParser(flatxml, fontsize, ph, pw)
|
||||||
|
|
||||||
csspage = dp.process()
|
csspage = dp.process()
|
||||||
|
|
||||||
return csspage
|
return csspage
|
||||||
|
|
||||||
|
|
||||||
|
def getpageIDMap(flatxml):
|
||||||
|
dp = DocParser(flatxml, 0, 0, 0)
|
||||||
|
pageidnumbers = dp.getData('info.original.pid', 0, -1, True)
|
||||||
|
return pageidnumbers
|
||||||
|
|||||||
@@ -146,4 +146,3 @@ class Process(object):
|
|||||||
self.__quit = True
|
self.__quit = True
|
||||||
self.__inputsem.release()
|
self.__inputsem.release()
|
||||||
self.__lock.release()
|
self.__lock.release()
|
||||||
|
|
||||||
|
|||||||
@@ -16,10 +16,13 @@ if 'calibre' in sys.modules:
|
|||||||
else:
|
else:
|
||||||
inCalibre = False
|
inCalibre = False
|
||||||
|
|
||||||
|
buildXML = False
|
||||||
|
|
||||||
import os, csv, getopt
|
import os, csv, getopt
|
||||||
import zlib, zipfile, tempfile, shutil
|
import zlib, zipfile, tempfile, shutil
|
||||||
from struct import pack
|
from struct import pack
|
||||||
from struct import unpack
|
from struct import unpack
|
||||||
|
from alfcrypto import Topaz_Cipher
|
||||||
|
|
||||||
class TpzDRMError(Exception):
|
class TpzDRMError(Exception):
|
||||||
pass
|
pass
|
||||||
@@ -81,25 +84,28 @@ def bookReadString(fo):
|
|||||||
|
|
||||||
# Context initialisation for the Topaz Crypto
|
# Context initialisation for the Topaz Crypto
|
||||||
def topazCryptoInit(key):
|
def topazCryptoInit(key):
|
||||||
ctx1 = 0x0CAFFE19E
|
return Topaz_Cipher().ctx_init(key)
|
||||||
for keyChar in key:
|
|
||||||
keyByte = ord(keyChar)
|
# ctx1 = 0x0CAFFE19E
|
||||||
ctx2 = ctx1
|
# for keyChar in key:
|
||||||
ctx1 = ((((ctx1 >>2) * (ctx1 >>7))&0xFFFFFFFF) ^ (keyByte * keyByte * 0x0F902007)& 0xFFFFFFFF )
|
# keyByte = ord(keyChar)
|
||||||
return [ctx1,ctx2]
|
# ctx2 = ctx1
|
||||||
|
# ctx1 = ((((ctx1 >>2) * (ctx1 >>7))&0xFFFFFFFF) ^ (keyByte * keyByte * 0x0F902007)& 0xFFFFFFFF )
|
||||||
|
# return [ctx1,ctx2]
|
||||||
|
|
||||||
# decrypt data with the context prepared by topazCryptoInit()
|
# decrypt data with the context prepared by topazCryptoInit()
|
||||||
def topazCryptoDecrypt(data, ctx):
|
def topazCryptoDecrypt(data, ctx):
|
||||||
ctx1 = ctx[0]
|
return Topaz_Cipher().decrypt(data, ctx)
|
||||||
ctx2 = ctx[1]
|
# ctx1 = ctx[0]
|
||||||
plainText = ""
|
# ctx2 = ctx[1]
|
||||||
for dataChar in data:
|
# plainText = ""
|
||||||
dataByte = ord(dataChar)
|
# for dataChar in data:
|
||||||
m = (dataByte ^ ((ctx1 >> 3) &0xFF) ^ ((ctx2<<3) & 0xFF)) &0xFF
|
# dataByte = ord(dataChar)
|
||||||
ctx2 = ctx1
|
# m = (dataByte ^ ((ctx1 >> 3) &0xFF) ^ ((ctx2<<3) & 0xFF)) &0xFF
|
||||||
ctx1 = (((ctx1 >> 2) * (ctx1 >> 7)) &0xFFFFFFFF) ^((m * m * 0x0F902007) &0xFFFFFFFF)
|
# ctx2 = ctx1
|
||||||
plainText += chr(m)
|
# ctx1 = (((ctx1 >> 2) * (ctx1 >> 7)) &0xFFFFFFFF) ^((m * m * 0x0F902007) &0xFFFFFFFF)
|
||||||
return plainText
|
# plainText += chr(m)
|
||||||
|
# return plainText
|
||||||
|
|
||||||
# Decrypt data with the PID
|
# Decrypt data with the PID
|
||||||
def decryptRecord(data,PID):
|
def decryptRecord(data,PID):
|
||||||
@@ -140,6 +146,7 @@ class TopazBook:
|
|||||||
def __init__(self, filename):
|
def __init__(self, filename):
|
||||||
self.fo = file(filename, 'rb')
|
self.fo = file(filename, 'rb')
|
||||||
self.outdir = tempfile.mkdtemp()
|
self.outdir = tempfile.mkdtemp()
|
||||||
|
# self.outdir = 'rawdat'
|
||||||
self.bookPayloadOffset = 0
|
self.bookPayloadOffset = 0
|
||||||
self.bookHeaderRecords = {}
|
self.bookHeaderRecords = {}
|
||||||
self.bookMetadata = {}
|
self.bookMetadata = {}
|
||||||
@@ -380,6 +387,7 @@ def usage(progname):
|
|||||||
|
|
||||||
# Main
|
# Main
|
||||||
def main(argv=sys.argv):
|
def main(argv=sys.argv):
|
||||||
|
global buildXML
|
||||||
progname = os.path.basename(argv[0])
|
progname = os.path.basename(argv[0])
|
||||||
k4 = False
|
k4 = False
|
||||||
pids = []
|
pids = []
|
||||||
@@ -438,9 +446,10 @@ def main(argv=sys.argv):
|
|||||||
tb.getHTMLZip(zipname)
|
tb.getHTMLZip(zipname)
|
||||||
|
|
||||||
print " Creating SVG ZIP Archive"
|
print " Creating SVG ZIP Archive"
|
||||||
zipname = os.path.join(outdir, bookname + '_SVG' + '.htmlz')
|
zipname = os.path.join(outdir, bookname + '_SVG' + '.zip')
|
||||||
tb.getSVGZip(zipname)
|
tb.getSVGZip(zipname)
|
||||||
|
|
||||||
|
if buildXML:
|
||||||
print " Creating XML ZIP Archive"
|
print " Creating XML ZIP Archive"
|
||||||
zipname = os.path.join(outdir, bookname + '_XML' + '.zip')
|
zipname = os.path.join(outdir, bookname + '_XML' + '.zip')
|
||||||
tb.getXMLZip(zipname)
|
tb.getXMLZip(zipname)
|
||||||
@@ -450,12 +459,12 @@ def main(argv=sys.argv):
|
|||||||
|
|
||||||
except TpzDRMError, e:
|
except TpzDRMError, e:
|
||||||
print str(e)
|
print str(e)
|
||||||
tb.cleanup()
|
# tb.cleanup()
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
except Exception, e:
|
except Exception, e:
|
||||||
print str(e)
|
print str(e)
|
||||||
tb.cleanup
|
# tb.cleanup
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
return 0
|
return 0
|
||||||
@@ -464,4 +473,3 @@ def main(argv=sys.argv):
|
|||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
sys.stdout=Unbuffered(sys.stdout)
|
sys.stdout=Unbuffered(sys.stdout)
|
||||||
sys.exit(main())
|
sys.exit(main())
|
||||||
|
|
||||||
|
|||||||
@@ -152,5 +152,3 @@ def main(argv=sys.argv):
|
|||||||
|
|
||||||
if __name__ == '__main__' :
|
if __name__ == '__main__' :
|
||||||
sys.exit(main())
|
sys.exit(main())
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
ReadMe_DeDRM_WinApp_vX.X
|
ReadMe_DeDRM_vX.X_WinApp
|
||||||
-----------------------
|
-----------------------
|
||||||
|
|
||||||
DeDRM_WinApp is a pure python drag and drop application that allows users to drag and drop ebooks or folders of ebooks onto theDeDRM_Drop_Target to have the DRM removed. It repackages the"tools" python software in one easy to use program.
|
DeDRM_vX.X_WinApp is a pure python drag and drop application that allows users to drag and drop ebooks or folders of ebooks onto the DeDRM_Drop_Target to have the DRM removed. It repackages the"tools" python software in one easy to use program that remembers preferences and settings.
|
||||||
|
|
||||||
It should work out of the box with Kindle for PC ebooks and Adobe Adept epub and pdf ebooks.
|
It should work out of the box with Kindle for PC ebooks and Adobe Adept epub and pdf ebooks.
|
||||||
|
|
||||||
@@ -21,9 +21,9 @@ This program requires that the proper 32 bit version of Python 2.X (tested with
|
|||||||
Installation
|
Installation
|
||||||
------------
|
------------
|
||||||
|
|
||||||
1. Download the latest DeDRM_WinApp_vx.x.zip and fully Extract its contents.
|
1. From tools_vX.X\DeDRM_Applications\, right click on DeDRM_v_X.X_WinApp.zip and fully Extract its contents.
|
||||||
|
|
||||||
2. Move the resulting DeDRM_WinApp_vX.X folder to whereever you keep you other programs.
|
2. Move the resulting DeDRM_vX.X_WinApp folder to whereever you keep you other programs.
|
||||||
(I typically use an "Applications" folder inside of my home directory)
|
(I typically use an "Applications" folder inside of my home directory)
|
||||||
|
|
||||||
3. Open the folder, and create a short-cut to DeDRM_Drop_Target and move that short-cut to your Desktop.
|
3. Open the folder, and create a short-cut to DeDRM_Drop_Target and move that short-cut to your Desktop.
|
||||||
@@ -33,19 +33,18 @@ Installation
|
|||||||
|
|
||||||
If you already have a correct version of Python and PyCrypto installed and in your path, you are ready to go!
|
If you already have a correct version of Python and PyCrypto installed and in your path, you are ready to go!
|
||||||
|
|
||||||
|
If not, see below.
|
||||||
|
|
||||||
If not, see where you can get these additional pieces.
|
|
||||||
|
|
||||||
|
|
||||||
Installing Python on Windows
|
Installing Python on Windows
|
||||||
----------------------------
|
----------------------------
|
||||||
I strongly recommend installing ActiveState’s Active Python, Community Edition for Windows (x86) 32 bits. This is a free, full version of the Python. It comes with some important additional modules that are not included in the bare-bones version from www.python.org unless you choose to install everything.
|
I strongly recommend fully installing ActiveState’s Active Python, free Community Edition for Windows (x86) 32 bits. This is a free, full version of the Python. It comes with some important additional modules that are not included in the bare-bones version from www.python.org unless you choose to install everything.
|
||||||
|
|
||||||
1. Download ActivePython 2.7.1 for Windows (x86) (or later 2.7 version for Windows (x86) ) from http://www.activestate.com/activepython/downloads. Do not download the ActivePython 2.7.1 for Windows (64-bit, x64) verson, even if you are running 64-bit Windows.
|
1. Download ActivePython 2.7.X for Windows (x86) (or later 2.7 version for Windows (x86) ) from http://www.activestate.com/activepython/downloads. Do not download the ActivePython 2.7.X for Windows (64-bit, x64) verson, even if you are running 64-bit Windows.
|
||||||
|
|
||||||
2. When it has finished downloading, run the installer. Accept the default options.
|
2. When it has finished downloading, run the installer. Accept the default options.
|
||||||
|
|
||||||
|
|
||||||
Installing PyCrypto on Windows
|
Installing PyCrypto on Windows
|
||||||
------------------------------
|
------------------------------
|
||||||
PyCrypto is a set of encryption/decryption routines that work with Python. The sources are freely available, and compiled versions are available from several sources. You must install a version that is for 32-bit Windows and Python 2.7. I recommend the installer linked from Michael Foord’s blog.
|
PyCrypto is a set of encryption/decryption routines that work with Python. The sources are freely available, and compiled versions are available from several sources. You must install a version that is for 32-bit Windows and Python 2.7. I recommend the installer linked from Michael Foord’s blog.
|
||||||
|
|||||||
@@ -1,151 +0,0 @@
|
|||||||
#! /usr/bin/python
|
|
||||||
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
|
|
||||||
|
|
||||||
import sys
|
|
||||||
import csv
|
|
||||||
import os
|
|
||||||
import getopt
|
|
||||||
from struct import pack
|
|
||||||
from struct import unpack
|
|
||||||
|
|
||||||
|
|
||||||
class PParser(object):
|
|
||||||
def __init__(self, gd, flatxml):
|
|
||||||
self.gd = gd
|
|
||||||
self.flatdoc = flatxml.split('\n')
|
|
||||||
self.temp = []
|
|
||||||
foo = self.getData('page.h') or self.getData('book.h')
|
|
||||||
self.ph = foo[0]
|
|
||||||
foo = self.getData('page.w') or self.getData('book.w')
|
|
||||||
self.pw = foo[0]
|
|
||||||
self.gx = self.getData('info.glyph.x')
|
|
||||||
self.gy = self.getData('info.glyph.y')
|
|
||||||
self.gid = self.getData('info.glyph.glyphID')
|
|
||||||
def getData(self, path):
|
|
||||||
result = None
|
|
||||||
cnt = len(self.flatdoc)
|
|
||||||
for j in xrange(cnt):
|
|
||||||
item = self.flatdoc[j]
|
|
||||||
if item.find('=') >= 0:
|
|
||||||
(name, argt) = item.split('=')
|
|
||||||
argres = argt.split('|')
|
|
||||||
else:
|
|
||||||
name = item
|
|
||||||
argres = []
|
|
||||||
if (name.endswith(path)):
|
|
||||||
result = argres
|
|
||||||
break
|
|
||||||
if (len(argres) > 0) :
|
|
||||||
for j in xrange(0,len(argres)):
|
|
||||||
argres[j] = int(argres[j])
|
|
||||||
return result
|
|
||||||
def getDataTemp(self, path):
|
|
||||||
result = None
|
|
||||||
cnt = len(self.temp)
|
|
||||||
for j in xrange(cnt):
|
|
||||||
item = self.temp[j]
|
|
||||||
if item.find('=') >= 0:
|
|
||||||
(name, argt) = item.split('=')
|
|
||||||
argres = argt.split('|')
|
|
||||||
else:
|
|
||||||
name = item
|
|
||||||
argres = []
|
|
||||||
if (name.endswith(path)):
|
|
||||||
result = argres
|
|
||||||
self.temp.pop(j)
|
|
||||||
break
|
|
||||||
if (len(argres) > 0) :
|
|
||||||
for j in xrange(0,len(argres)):
|
|
||||||
argres[j] = int(argres[j])
|
|
||||||
return result
|
|
||||||
def getImages(self):
|
|
||||||
result = []
|
|
||||||
self.temp = self.flatdoc
|
|
||||||
while (self.getDataTemp('img') != None):
|
|
||||||
h = self.getDataTemp('img.h')[0]
|
|
||||||
w = self.getDataTemp('img.w')[0]
|
|
||||||
x = self.getDataTemp('img.x')[0]
|
|
||||||
y = self.getDataTemp('img.y')[0]
|
|
||||||
src = self.getDataTemp('img.src')[0]
|
|
||||||
result.append('<image xlink:href="../img/img%04d.jpg" x="%d" y="%d" width="%d" height="%d" />\n' % (src, x, y, w, h))
|
|
||||||
return result
|
|
||||||
def getGlyphs(self):
|
|
||||||
result = []
|
|
||||||
if (self.gid != None) and (len(self.gid) > 0):
|
|
||||||
glyphs = []
|
|
||||||
for j in set(self.gid):
|
|
||||||
glyphs.append(j)
|
|
||||||
glyphs.sort()
|
|
||||||
for gid in glyphs:
|
|
||||||
id='id="gl%d"' % gid
|
|
||||||
path = self.gd.lookup(id)
|
|
||||||
if path:
|
|
||||||
result.append(id + ' ' + path)
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def convert2SVG(gdict, flat_xml, counter, numfiles, svgDir, raw, meta_array, scaledpi):
|
|
||||||
ml = ''
|
|
||||||
pp = PParser(gdict, flat_xml)
|
|
||||||
ml += '<?xml version="1.0" standalone="no"?>\n'
|
|
||||||
if (raw):
|
|
||||||
ml += '<!DOCTYPE svg PUBLIC "-//W3C/DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">\n'
|
|
||||||
ml += '<svg width="%fin" height="%fin" viewBox="0 0 %d %d" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1">\n' % (pp.pw / scaledpi, pp.ph / scaledpi, pp.pw -1, pp.ph -1)
|
|
||||||
ml += '<title>Page %d - %s by %s</title>\n' % (counter, meta_array['Title'],meta_array['Authors'])
|
|
||||||
else:
|
|
||||||
ml += '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n'
|
|
||||||
ml += '<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" ><head>\n'
|
|
||||||
ml += '<title>Page %d - %s by %s</title>\n' % (counter, meta_array['Title'],meta_array['Authors'])
|
|
||||||
ml += '<script><![CDATA[\n'
|
|
||||||
ml += 'function gd(){var p=window.location.href.replace(/^.*\?dpi=(\d+).*$/i,"$1");return p;}\n'
|
|
||||||
ml += 'var dpi=%d;\n' % scaledpi
|
|
||||||
if (counter) :
|
|
||||||
ml += 'var prevpage="page%04d.xhtml";\n' % (counter - 1)
|
|
||||||
if (counter < numfiles-1) :
|
|
||||||
ml += 'var nextpage="page%04d.xhtml";\n' % (counter + 1)
|
|
||||||
ml += 'var pw=%d;var ph=%d;' % (pp.pw, pp.ph)
|
|
||||||
ml += 'function zoomin(){dpi=dpi*(0.8);setsize();}\n'
|
|
||||||
ml += 'function zoomout(){dpi=dpi*1.25;setsize();}\n'
|
|
||||||
ml += 'function setsize(){var svg=document.getElementById("svgimg");var prev=document.getElementById("prevsvg");var next=document.getElementById("nextsvg");var width=(pw/dpi)+"in";var height=(ph/dpi)+"in";svg.setAttribute("width",width);svg.setAttribute("height",height);prev.setAttribute("height",height);prev.setAttribute("width","50px");next.setAttribute("height",height);next.setAttribute("width","50px");}\n'
|
|
||||||
ml += 'function ppage(){window.location.href=prevpage+"?dpi="+Math.round(dpi);}\n'
|
|
||||||
ml += 'function npage(){window.location.href=nextpage+"?dpi="+Math.round(dpi);}\n'
|
|
||||||
ml += 'var gt=gd();if(gt>0){dpi=gt;}\n'
|
|
||||||
ml += 'window.onload=setsize;\n'
|
|
||||||
ml += ']]></script>\n'
|
|
||||||
ml += '</head>\n'
|
|
||||||
ml += '<body onLoad="setsize();" style="background-color:#777;text-align:center;">\n'
|
|
||||||
ml += '<div style="white-space:nowrap;">\n'
|
|
||||||
if (counter == 0) :
|
|
||||||
ml += '<a href="javascript:ppage();"><svg id="prevsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"></svg></a>\n'
|
|
||||||
else:
|
|
||||||
ml += '<a href="javascript:ppage();"><svg id="prevsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"><polygon points="5,150,95,5,95,295" fill="#AAAAAA" /></svg></a>\n'
|
|
||||||
ml += '<a href="javascript:npage();"><svg id="svgimg" viewBox="0 0 %d %d" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" style="background-color:#FFF;border:1px solid black;">' % (pp.pw, pp.ph)
|
|
||||||
if (pp.gid != None):
|
|
||||||
ml += '<defs>\n'
|
|
||||||
gdefs = pp.getGlyphs()
|
|
||||||
for j in xrange(0,len(gdefs)):
|
|
||||||
ml += gdefs[j]
|
|
||||||
ml += '</defs>\n'
|
|
||||||
img = pp.getImages()
|
|
||||||
if (img != None):
|
|
||||||
for j in xrange(0,len(img)):
|
|
||||||
ml += img[j]
|
|
||||||
if (pp.gid != None):
|
|
||||||
for j in xrange(0,len(pp.gid)):
|
|
||||||
ml += '<use xlink:href="#gl%d" x="%d" y="%d" />\n' % (pp.gid[j], pp.gx[j], pp.gy[j])
|
|
||||||
if (img == None or len(img) == 0) and (pp.gid == None or len(pp.gid) == 0):
|
|
||||||
ml += '<text x="10" y="10" font-family="Helvetica" font-size="100" stroke="black">This page intentionally left blank.</text>\n<text x="10" y="110" font-family="Helvetica" font-size="50" stroke="black">Until this notice unintentionally gave it content. (gensvg.py)</text>\n'
|
|
||||||
if (raw) :
|
|
||||||
ml += '</svg>'
|
|
||||||
else :
|
|
||||||
ml += '</svg></a>\n'
|
|
||||||
if (counter == numfiles - 1) :
|
|
||||||
ml += '<a href="javascript:npage();"><svg id="nextsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"></svg></a>\n'
|
|
||||||
else :
|
|
||||||
ml += '<a href="javascript:npage();"><svg id="nextsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"><polygon points="5,5,5,295,95,150" fill="#AAAAAA" /></svg></a>\n'
|
|
||||||
ml += '</div>\n'
|
|
||||||
ml += '<div><a href="javascript:zoomin();">zoom in</a> - <a href="javascript:zoomout();">zoom out</a></div>\n'
|
|
||||||
ml += '</body>\n'
|
|
||||||
ml += '</html>\n'
|
|
||||||
return ml
|
|
||||||
|
|
||||||
@@ -1,357 +0,0 @@
|
|||||||
# standlone set of Mac OSX specific routines needed for K4DeDRM
|
|
||||||
|
|
||||||
from __future__ import with_statement
|
|
||||||
|
|
||||||
import sys
|
|
||||||
import os
|
|
||||||
import subprocess
|
|
||||||
|
|
||||||
from struct import pack, unpack, unpack_from
|
|
||||||
|
|
||||||
class DrmException(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
# interface to needed routines in openssl's libcrypto
|
|
||||||
def _load_crypto_libcrypto():
|
|
||||||
from ctypes import CDLL, byref, POINTER, c_void_p, c_char_p, c_int, c_long, \
|
|
||||||
Structure, c_ulong, create_string_buffer, addressof, string_at, cast
|
|
||||||
from ctypes.util import find_library
|
|
||||||
|
|
||||||
libcrypto = find_library('crypto')
|
|
||||||
if libcrypto is None:
|
|
||||||
raise DrmException('libcrypto not found')
|
|
||||||
libcrypto = CDLL(libcrypto)
|
|
||||||
|
|
||||||
AES_MAXNR = 14
|
|
||||||
c_char_pp = POINTER(c_char_p)
|
|
||||||
c_int_p = POINTER(c_int)
|
|
||||||
|
|
||||||
class AES_KEY(Structure):
|
|
||||||
_fields_ = [('rd_key', c_long * (4 * (AES_MAXNR + 1))), ('rounds', c_int)]
|
|
||||||
AES_KEY_p = POINTER(AES_KEY)
|
|
||||||
|
|
||||||
def F(restype, name, argtypes):
|
|
||||||
func = getattr(libcrypto, name)
|
|
||||||
func.restype = restype
|
|
||||||
func.argtypes = argtypes
|
|
||||||
return func
|
|
||||||
|
|
||||||
AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',[c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p,c_int])
|
|
||||||
|
|
||||||
AES_set_decrypt_key = F(c_int, 'AES_set_decrypt_key',[c_char_p, c_int, AES_KEY_p])
|
|
||||||
|
|
||||||
PKCS5_PBKDF2_HMAC_SHA1 = F(c_int, 'PKCS5_PBKDF2_HMAC_SHA1',
|
|
||||||
[c_char_p, c_ulong, c_char_p, c_ulong, c_ulong, c_ulong, c_char_p])
|
|
||||||
|
|
||||||
class LibCrypto(object):
|
|
||||||
def __init__(self):
|
|
||||||
self._blocksize = 0
|
|
||||||
self._keyctx = None
|
|
||||||
self.iv = 0
|
|
||||||
|
|
||||||
def set_decrypt_key(self, userkey, iv):
|
|
||||||
self._blocksize = len(userkey)
|
|
||||||
if (self._blocksize != 16) and (self._blocksize != 24) and (self._blocksize != 32) :
|
|
||||||
raise DrmException('AES improper key used')
|
|
||||||
return
|
|
||||||
keyctx = self._keyctx = AES_KEY()
|
|
||||||
self.iv = iv
|
|
||||||
rv = AES_set_decrypt_key(userkey, len(userkey) * 8, keyctx)
|
|
||||||
if rv < 0:
|
|
||||||
raise DrmException('Failed to initialize AES key')
|
|
||||||
|
|
||||||
def decrypt(self, data):
|
|
||||||
out = create_string_buffer(len(data))
|
|
||||||
rv = AES_cbc_encrypt(data, out, len(data), self._keyctx, self.iv, 0)
|
|
||||||
if rv == 0:
|
|
||||||
raise DrmException('AES decryption failed')
|
|
||||||
return out.raw
|
|
||||||
|
|
||||||
def keyivgen(self, passwd, salt):
|
|
||||||
saltlen = len(salt)
|
|
||||||
passlen = len(passwd)
|
|
||||||
iter = 0x3e8
|
|
||||||
keylen = 80
|
|
||||||
out = create_string_buffer(keylen)
|
|
||||||
rv = PKCS5_PBKDF2_HMAC_SHA1(passwd, passlen, salt, saltlen, iter, keylen, out)
|
|
||||||
return out.raw
|
|
||||||
return LibCrypto
|
|
||||||
|
|
||||||
def _load_crypto():
|
|
||||||
LibCrypto = None
|
|
||||||
try:
|
|
||||||
LibCrypto = _load_crypto_libcrypto()
|
|
||||||
except (ImportError, DrmException):
|
|
||||||
pass
|
|
||||||
return LibCrypto
|
|
||||||
|
|
||||||
LibCrypto = _load_crypto()
|
|
||||||
|
|
||||||
#
|
|
||||||
# Utility Routines
|
|
||||||
#
|
|
||||||
|
|
||||||
# crypto digestroutines
|
|
||||||
import hashlib
|
|
||||||
|
|
||||||
def MD5(message):
|
|
||||||
ctx = hashlib.md5()
|
|
||||||
ctx.update(message)
|
|
||||||
return ctx.digest()
|
|
||||||
|
|
||||||
def SHA1(message):
|
|
||||||
ctx = hashlib.sha1()
|
|
||||||
ctx.update(message)
|
|
||||||
return ctx.digest()
|
|
||||||
|
|
||||||
def SHA256(message):
|
|
||||||
ctx = hashlib.sha256()
|
|
||||||
ctx.update(message)
|
|
||||||
return ctx.digest()
|
|
||||||
|
|
||||||
# Various character maps used to decrypt books. Probably supposed to act as obfuscation
|
|
||||||
charMap1 = "n5Pr6St7Uv8Wx9YzAb0Cd1Ef2Gh3Jk4M"
|
|
||||||
charMap2 = "ZB0bYyc1xDdW2wEV3Ff7KkPpL8UuGA4gz-Tme9Nn_tHh5SvXCsIiR6rJjQaqlOoM"
|
|
||||||
|
|
||||||
# For Future Reference from .kinf approach of K4PC
|
|
||||||
charMap5 = "AzB0bYyCeVvaZ3FfUuG4g-TtHh5SsIiR6rJjQq7KkPpL8lOoMm9Nn_c1XxDdW2wE"
|
|
||||||
|
|
||||||
|
|
||||||
def encode(data, map):
|
|
||||||
result = ""
|
|
||||||
for char in data:
|
|
||||||
value = ord(char)
|
|
||||||
Q = (value ^ 0x80) // len(map)
|
|
||||||
R = value % len(map)
|
|
||||||
result += map[Q]
|
|
||||||
result += map[R]
|
|
||||||
return result
|
|
||||||
|
|
||||||
# Hash the bytes in data and then encode the digest with the characters in map
|
|
||||||
def encodeHash(data,map):
|
|
||||||
return encode(MD5(data),map)
|
|
||||||
|
|
||||||
# Decode the string in data with the characters in map. Returns the decoded bytes
|
|
||||||
def decode(data,map):
|
|
||||||
result = ""
|
|
||||||
for i in range (0,len(data)-1,2):
|
|
||||||
high = map.find(data[i])
|
|
||||||
low = map.find(data[i+1])
|
|
||||||
if (high == -1) or (low == -1) :
|
|
||||||
break
|
|
||||||
value = (((high * len(map)) ^ 0x80) & 0xFF) + low
|
|
||||||
result += pack("B",value)
|
|
||||||
return result
|
|
||||||
|
|
||||||
# For Future Reference from .kinf approach of K4PC
|
|
||||||
# generate table of prime number less than or equal to int n
|
|
||||||
def primes(n):
|
|
||||||
if n==2: return [2]
|
|
||||||
elif n<2: return []
|
|
||||||
s=range(3,n+1,2)
|
|
||||||
mroot = n ** 0.5
|
|
||||||
half=(n+1)/2-1
|
|
||||||
i=0
|
|
||||||
m=3
|
|
||||||
while m <= mroot:
|
|
||||||
if s[i]:
|
|
||||||
j=(m*m-3)/2
|
|
||||||
s[j]=0
|
|
||||||
while j<half:
|
|
||||||
s[j]=0
|
|
||||||
j+=m
|
|
||||||
i=i+1
|
|
||||||
m=2*i+3
|
|
||||||
return [2]+[x for x in s if x]
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# uses a sub process to get the Hard Drive Serial Number using ioreg
|
|
||||||
# returns with the serial number of drive whose BSD Name is "disk0"
|
|
||||||
def GetVolumeSerialNumber():
|
|
||||||
sernum = os.getenv('MYSERIALNUMBER')
|
|
||||||
if sernum != None:
|
|
||||||
return sernum
|
|
||||||
cmdline = '/usr/sbin/ioreg -l -S -w 0 -r -c AppleAHCIDiskDriver'
|
|
||||||
cmdline = cmdline.encode(sys.getfilesystemencoding())
|
|
||||||
p = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
|
|
||||||
out1, out2 = p.communicate()
|
|
||||||
reslst = out1.split('\n')
|
|
||||||
cnt = len(reslst)
|
|
||||||
bsdname = None
|
|
||||||
sernum = None
|
|
||||||
foundIt = False
|
|
||||||
for j in xrange(cnt):
|
|
||||||
resline = reslst[j]
|
|
||||||
pp = resline.find('"Serial Number" = "')
|
|
||||||
if pp >= 0:
|
|
||||||
sernum = resline[pp+19:-1]
|
|
||||||
sernum = sernum.strip()
|
|
||||||
bb = resline.find('"BSD Name" = "')
|
|
||||||
if bb >= 0:
|
|
||||||
bsdname = resline[bb+14:-1]
|
|
||||||
bsdname = bsdname.strip()
|
|
||||||
if (bsdname == 'disk0') and (sernum != None):
|
|
||||||
foundIt = True
|
|
||||||
break
|
|
||||||
if not foundIt:
|
|
||||||
sernum = '9999999999'
|
|
||||||
return sernum
|
|
||||||
|
|
||||||
# uses unix env to get username instead of using sysctlbyname
|
|
||||||
def GetUserName():
|
|
||||||
username = os.getenv('USER')
|
|
||||||
return username
|
|
||||||
|
|
||||||
# implements an Pseudo Mac Version of Windows built-in Crypto routine
|
|
||||||
def CryptUnprotectData(encryptedData, salt):
|
|
||||||
sp = GetVolumeSerialNumber() + '!@#' + GetUserName()
|
|
||||||
passwdData = encode(SHA256(sp),charMap1)
|
|
||||||
crp = LibCrypto()
|
|
||||||
key_iv = crp.keyivgen(passwdData, salt)
|
|
||||||
key = key_iv[0:32]
|
|
||||||
iv = key_iv[32:48]
|
|
||||||
crp.set_decrypt_key(key,iv)
|
|
||||||
cleartext = crp.decrypt(encryptedData)
|
|
||||||
return cleartext
|
|
||||||
|
|
||||||
|
|
||||||
# Locate the .kindle-info files
|
|
||||||
def getKindleInfoFiles(kInfoFiles):
|
|
||||||
# first search for current .kindle-info files
|
|
||||||
home = os.getenv('HOME')
|
|
||||||
cmdline = 'find "' + home + '/Library/Application Support" -name ".kindle-info"'
|
|
||||||
cmdline = cmdline.encode(sys.getfilesystemencoding())
|
|
||||||
p1 = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
|
|
||||||
out1, out2 = p1.communicate()
|
|
||||||
reslst = out1.split('\n')
|
|
||||||
kinfopath = 'NONE'
|
|
||||||
found = False
|
|
||||||
for resline in reslst:
|
|
||||||
if os.path.isfile(resline):
|
|
||||||
kInfoFiles.append(resline)
|
|
||||||
found = True
|
|
||||||
# For Future Reference
|
|
||||||
#
|
|
||||||
# # add any .kinf files
|
|
||||||
# cmdline = 'find "' + home + '/Library/Application Support" -name "rainier*.kinf"'
|
|
||||||
# cmdline = cmdline.encode(sys.getfilesystemencoding())
|
|
||||||
# p1 = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
|
|
||||||
# out1, out2 = p1.communicate()
|
|
||||||
# reslst = out1.split('\n')
|
|
||||||
# for resline in reslst:
|
|
||||||
# if os.path.isfile(resline):
|
|
||||||
# kInfoFiles.append(resline)
|
|
||||||
# found = True
|
|
||||||
if not found:
|
|
||||||
print('No kindle-info files have been found.')
|
|
||||||
return kInfoFiles
|
|
||||||
|
|
||||||
# determine type of kindle info provided and return a
|
|
||||||
# database of keynames and values
|
|
||||||
def getDBfromFile(kInfoFile):
|
|
||||||
names = ["kindle.account.tokens","kindle.cookie.item","eulaVersionAccepted","login_date","kindle.token.item","login","kindle.key.item","kindle.name.info","kindle.device.info", "MazamaRandomNumber"]
|
|
||||||
DB = {}
|
|
||||||
cnt = 0
|
|
||||||
infoReader = open(kInfoFile, 'r')
|
|
||||||
hdr = infoReader.read(1)
|
|
||||||
data = infoReader.read()
|
|
||||||
|
|
||||||
if data.find('[') != -1 :
|
|
||||||
|
|
||||||
# older style kindle-info file
|
|
||||||
items = data.split('[')
|
|
||||||
for item in items:
|
|
||||||
if item != '':
|
|
||||||
keyhash, rawdata = item.split(':')
|
|
||||||
keyname = "unknown"
|
|
||||||
for name in names:
|
|
||||||
if encodeHash(name,charMap2) == keyhash:
|
|
||||||
keyname = name
|
|
||||||
break
|
|
||||||
if keyname == "unknown":
|
|
||||||
keyname = keyhash
|
|
||||||
encryptedValue = decode(rawdata,charMap2)
|
|
||||||
salt = '16743'
|
|
||||||
cleartext = CryptUnprotectData(encryptedValue, salt)
|
|
||||||
DB[keyname] = decode(cleartext,charMap1)
|
|
||||||
cnt = cnt + 1
|
|
||||||
if cnt == 0:
|
|
||||||
DB = None
|
|
||||||
return DB
|
|
||||||
|
|
||||||
# For Future Reference taken from K4PC 1.5.0 .kinf
|
|
||||||
#
|
|
||||||
# # else newer style .kinf file
|
|
||||||
# # the .kinf file uses "/" to separate it into records
|
|
||||||
# # so remove the trailing "/" to make it easy to use split
|
|
||||||
# data = data[:-1]
|
|
||||||
# items = data.split('/')
|
|
||||||
#
|
|
||||||
# # loop through the item records until all are processed
|
|
||||||
# while len(items) > 0:
|
|
||||||
#
|
|
||||||
# # get the first item record
|
|
||||||
# item = items.pop(0)
|
|
||||||
#
|
|
||||||
# # the first 32 chars of the first record of a group
|
|
||||||
# # is the MD5 hash of the key name encoded by charMap5
|
|
||||||
# keyhash = item[0:32]
|
|
||||||
#
|
|
||||||
# # the raw keyhash string is also used to create entropy for the actual
|
|
||||||
# # CryptProtectData Blob that represents that keys contents
|
|
||||||
# entropy = SHA1(keyhash)
|
|
||||||
#
|
|
||||||
# # the remainder of the first record when decoded with charMap5
|
|
||||||
# # has the ':' split char followed by the string representation
|
|
||||||
# # of the number of records that follow
|
|
||||||
# # and make up the contents
|
|
||||||
# srcnt = decode(item[34:],charMap5)
|
|
||||||
# rcnt = int(srcnt)
|
|
||||||
#
|
|
||||||
# # read and store in rcnt records of data
|
|
||||||
# # that make up the contents value
|
|
||||||
# edlst = []
|
|
||||||
# for i in xrange(rcnt):
|
|
||||||
# item = items.pop(0)
|
|
||||||
# edlst.append(item)
|
|
||||||
#
|
|
||||||
# keyname = "unknown"
|
|
||||||
# for name in names:
|
|
||||||
# if encodeHash(name,charMap5) == keyhash:
|
|
||||||
# keyname = name
|
|
||||||
# break
|
|
||||||
# if keyname == "unknown":
|
|
||||||
# keyname = keyhash
|
|
||||||
#
|
|
||||||
# # the charMap5 encoded contents data has had a length
|
|
||||||
# # of chars (always odd) cut off of the front and moved
|
|
||||||
# # to the end to prevent decoding using charMap5 from
|
|
||||||
# # working properly, and thereby preventing the ensuing
|
|
||||||
# # CryptUnprotectData call from succeeding.
|
|
||||||
#
|
|
||||||
# # The offset into the charMap5 encoded contents seems to be:
|
|
||||||
# # len(contents) - largest prime number less than or equal to int(len(content)/3)
|
|
||||||
# # (in other words split "about" 2/3rds of the way through)
|
|
||||||
#
|
|
||||||
# # move first offsets chars to end to align for decode by charMap5
|
|
||||||
# encdata = "".join(edlst)
|
|
||||||
# contlen = len(encdata)
|
|
||||||
# noffset = contlen - primes(int(contlen/3))[-1]
|
|
||||||
#
|
|
||||||
# # now properly split and recombine
|
|
||||||
# # by moving noffset chars from the start of the
|
|
||||||
# # string to the end of the string
|
|
||||||
# pfx = encdata[0:noffset]
|
|
||||||
# encdata = encdata[noffset:]
|
|
||||||
# encdata = encdata + pfx
|
|
||||||
#
|
|
||||||
# # decode using Map5 to get the CryptProtect Data
|
|
||||||
# encryptedValue = decode(encdata,charMap5)
|
|
||||||
# DB[keyname] = CryptUnprotectData(encryptedValue, entropy, 1)
|
|
||||||
# cnt = cnt + 1
|
|
||||||
|
|
||||||
if cnt == 0:
|
|
||||||
DB = None
|
|
||||||
return DB
|
|
||||||
@@ -1,29 +0,0 @@
|
|||||||
Kindle for iPhone, iPod Touch, iPad
|
|
||||||
|
|
||||||
|
|
||||||
The Kindle application for iOS (iPhone/iPod Touch/iPad) uses a PID derived from the serial number of the iPhone/iPod Touch/iPad. Kindlepid.py is a python script that turns the serial number into the equivalent PID, which can then be used with the MobiDeDRM script.
|
|
||||||
|
|
||||||
So, to remove the DRM from (Mobipocket) Kindle books downloaded to your iPhone/iPodTouch/iPad, you’ll need the latest toolsvx.x.zip archive and
|
|
||||||
some way to extract the book files from the backup of your device on your computer. There are several free tools around to do this.
|
|
||||||
|
|
||||||
Double-click on KindlePID.pyw to get your device’s PID, then use the extractor to get your book files, then double-click on MobiDeDRM.pyw with the PID and the files to get Drm-free versions of your books.
|
|
||||||
|
|
||||||
Kindlefix gives you another way to use the PID generated by kindlepid. Some ebook stores and libraries will accept the PIDs generated by kindlepid (some won’t), and you can then download ebooks from the store or library encrypted to work with your Kindle. Except they don’t. There’s a flag byte set in encrypted Kindle ebooks, and Kindles and the Kindle app won’t read encrypted mobipocket ebooks unless that flag is set. Kindlefix will set that flag for you. If your library have Mobipocket ebooks to lend and will accept your Kindle’s PID, you can now check out library ebooks, run kindlefix on them, and then read them on your Kindle, and when your loan period ends, they’ll automatically become unreadable.
|
|
||||||
|
|
||||||
To extract the files from your iPod Touch (not archived Kindle ebooks, but ones actually on your iPod Touch) it’s necessary to first do a back-up of the iPod Touch using iTunes. That creates a backup file on your Mac, and you can then extract the Mobipocket files from that using iPhone/ipod Touch Backup Extractor – free software from here: http://supercrazyawesome.com/
|
|
||||||
|
|
||||||
Ok, so that will get your the .azw Kindle Mobipocket files.
|
|
||||||
|
|
||||||
Now you need the PID used to encrypt them. To get that you’ll need your iPod Touch UDID number – you can find it in iTunes when your iPod Touch is connected in the Summary page – click on the serial number and it changes to the (40 digit!) UDID.
|
|
||||||
|
|
||||||
And then you need to double-click the KindlePID.pyw script and enter your 40 digit UDID in the window and hit "Start".
|
|
||||||
|
|
||||||
and you should get back a response something like:
|
|
||||||
|
|
||||||
Mobipocket PID calculator for Amazon Kindle. Copyright (c) 2007, 2009 Igor Skochinsky
|
|
||||||
iPhone serial number (UDID) detected
|
|
||||||
Mobipocked PID for iPhone serial# FOURTYDIGITUDIDNUMBERGIVENHERE is TENDIGITPID
|
|
||||||
|
|
||||||
which gives you the PID to be used with MobiDeDRM to de-drm the files you extracted.
|
|
||||||
|
|
||||||
All of these scripts are gui python programs. Python 2.X (32 bit) is already installed in Mac OSX and Linux. We recommend ActiveState's Active Python Version 2.X (32 bit) for Windows users.
|
|
||||||
@@ -36,6 +36,7 @@ __license__ = 'GPL v3'
|
|||||||
|
|
||||||
import sys
|
import sys
|
||||||
import os
|
import os
|
||||||
|
os.environ['PYTHONIOENCODING'] = "utf-8"
|
||||||
import re
|
import re
|
||||||
import shutil
|
import shutil
|
||||||
import Tkinter
|
import Tkinter
|
||||||
@@ -4,6 +4,7 @@
|
|||||||
import sys
|
import sys
|
||||||
sys.path.append('lib')
|
sys.path.append('lib')
|
||||||
import os, os.path, urllib
|
import os, os.path, urllib
|
||||||
|
os.environ['PYTHONIOENCODING'] = "utf-8"
|
||||||
import subprocess
|
import subprocess
|
||||||
from subprocess import Popen, PIPE, STDOUT
|
from subprocess import Popen, PIPE, STDOUT
|
||||||
import subasyncio
|
import subasyncio
|
||||||
@@ -73,7 +74,6 @@ class MainDialog(Tkinter.Frame):
|
|||||||
# post output from subprocess in scrolled text widget
|
# post output from subprocess in scrolled text widget
|
||||||
def showCmdOutput(self, msg):
|
def showCmdOutput(self, msg):
|
||||||
if msg and msg !='':
|
if msg and msg !='':
|
||||||
msg = msg.encode('utf-8')
|
|
||||||
if sys.platform.startswith('win'):
|
if sys.platform.startswith('win'):
|
||||||
msg = msg.replace('\r\n','\n')
|
msg = msg.replace('\r\n','\n')
|
||||||
self.stext.insert(Tkconstants.END,msg)
|
self.stext.insert(Tkconstants.END,msg)
|
||||||
@@ -83,14 +83,19 @@ class MainDialog(Tkinter.Frame):
|
|||||||
# run as a subprocess via pipes and collect stdout
|
# run as a subprocess via pipes and collect stdout
|
||||||
def pidrdr(self, serial):
|
def pidrdr(self, serial):
|
||||||
# os.putenv('PYTHONUNBUFFERED', '1')
|
# os.putenv('PYTHONUNBUFFERED', '1')
|
||||||
cmdline = 'python ./lib/kindlepid.py "' + serial + '"'
|
pengine = sys.executable
|
||||||
|
if pengine is None or pengine == '':
|
||||||
|
pengine = "python"
|
||||||
|
pengine = os.path.normpath(pengine)
|
||||||
|
cmdline = pengine + ' ./lib/kindlepid.py "' + serial + '"'
|
||||||
if sys.platform[0:3] == 'win':
|
if sys.platform[0:3] == 'win':
|
||||||
search_path = os.environ['PATH']
|
# search_path = os.environ['PATH']
|
||||||
search_path = search_path.lower()
|
# search_path = search_path.lower()
|
||||||
if search_path.find('python') >= 0:
|
# if search_path.find('python') >= 0:
|
||||||
cmdline = 'python lib\kindlepid.py "' + serial + '"'
|
# cmdline = 'python lib\kindlepid.py "' + serial + '"'
|
||||||
else :
|
# else :
|
||||||
cmdline = 'lib\kindlepid.py "' + serial + '"'
|
# cmdline = 'lib\kindlepid.py "' + serial + '"'
|
||||||
|
cmdline = pengine + ' lib\\kindlepid.py "' + serial + '"'
|
||||||
cmdline = cmdline.encode(sys.getfilesystemencoding())
|
cmdline = cmdline.encode(sys.getfilesystemencoding())
|
||||||
p2 = Process(cmdline, shell=True, bufsize=1, stdin=None, stdout=PIPE, stderr=PIPE, close_fds=False)
|
p2 = Process(cmdline, shell=True, bufsize=1, stdin=None, stdout=PIPE, stderr=PIPE, close_fds=False)
|
||||||
return p2
|
return p2
|
||||||
@@ -116,7 +121,6 @@ class MainDialog(Tkinter.Frame):
|
|||||||
log += 'Serial = "' + serial + '"\n'
|
log += 'Serial = "' + serial + '"\n'
|
||||||
log += '\n\n'
|
log += '\n\n'
|
||||||
log += 'Please Wait ...\n\n'
|
log += 'Please Wait ...\n\n'
|
||||||
log = log.encode('utf-8')
|
|
||||||
self.stext.insert(Tkconstants.END,log)
|
self.stext.insert(Tkconstants.END,log)
|
||||||
self.p2 = self.pidrdr(serial)
|
self.p2 = self.pidrdr(serial)
|
||||||
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user