mirror of
https://github.com/noDRM/DeDRM_tools.git
synced 2026-03-24 22:48:56 +00:00
Compare commits
15 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
07e532f59c | ||
|
|
882edb6c69 | ||
|
|
93f02c625a | ||
|
|
e95ed1a8ed | ||
|
|
ba5927a20d | ||
|
|
297a9ddc66 | ||
|
|
4f34a9a196 | ||
|
|
529dd3f160 | ||
|
|
4163d5ccf4 | ||
|
|
867ac35b45 | ||
|
|
427137b0fe | ||
|
|
ac9cdb1e98 | ||
|
|
2bedd75005 | ||
|
|
8b632e309f | ||
|
|
bc968f8eca |
161
Calibre_Plugins/K4MobiDeDRM_plugin/__init__.py
Normal file
161
Calibre_Plugins/K4MobiDeDRM_plugin/__init__.py
Normal file
@@ -0,0 +1,161 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
from __future__ import with_statement
|
||||||
|
|
||||||
|
from calibre.customize import FileTypePlugin
|
||||||
|
from calibre.gui2 import is_ok_to_use_qt
|
||||||
|
from calibre.utils.config import config_dir
|
||||||
|
from calibre.constants import iswindows, isosx
|
||||||
|
# from calibre.ptempfile import PersistentTemporaryDirectory
|
||||||
|
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
from zipfile import ZipFile
|
||||||
|
|
||||||
|
class K4DeDRM(FileTypePlugin):
|
||||||
|
name = 'K4PC, K4Mac, Kindle Mobi and Topaz DeDRM' # Name of the plugin
|
||||||
|
description = 'Removes DRM from Mobipocket, Kindle/Mobi, Kindle/Topaz and Kindle/Print Replica files. Provided by the work of many including DiapDealer, SomeUpdates, IHeartCabbages, CMBDTC, Skindle, DarkReverser, ApprenticeAlf, etc.'
|
||||||
|
supported_platforms = ['osx', 'windows', 'linux'] # Platforms this plugin will run on
|
||||||
|
author = 'DiapDealer, SomeUpdates' # The author of this plugin
|
||||||
|
version = (0, 4, 1) # The version number of this plugin
|
||||||
|
file_types = set(['prc','mobi','azw','azw1','azw4','tpz']) # The file types that this plugin will be applied to
|
||||||
|
on_import = True # Run this plugin during the import
|
||||||
|
priority = 210 # run this plugin before mobidedrm, k4pcdedrm, k4dedrm
|
||||||
|
minimum_calibre_version = (0, 7, 55)
|
||||||
|
|
||||||
|
def initialize(self):
|
||||||
|
"""
|
||||||
|
Dynamic modules can't be imported/loaded from a zipfile... so this routine
|
||||||
|
runs whenever the plugin gets initialized. This will extract the appropriate
|
||||||
|
library for the target OS and copy it to the 'alfcrypto' subdirectory of
|
||||||
|
calibre's configuration directory. That 'alfcrypto' directory is then
|
||||||
|
inserted into the syspath (as the very first entry) in the run function
|
||||||
|
so the CDLL stuff will work in the alfcrypto.py script.
|
||||||
|
"""
|
||||||
|
if iswindows:
|
||||||
|
names = ['alfcrypto.dll','alfcrypto64.dll']
|
||||||
|
elif isosx:
|
||||||
|
names = ['libalfcrypto.dylib']
|
||||||
|
else:
|
||||||
|
names = ['libalfcrypto32.so','libalfcrypto64.so']
|
||||||
|
lib_dict = self.load_resources(names)
|
||||||
|
self.alfdir = os.path.join(config_dir, 'alfcrypto')
|
||||||
|
if not os.path.exists(self.alfdir):
|
||||||
|
os.mkdir(self.alfdir)
|
||||||
|
for entry, data in lib_dict.items():
|
||||||
|
file_path = os.path.join(self.alfdir, entry)
|
||||||
|
with open(file_path,'wb') as f:
|
||||||
|
f.write(data)
|
||||||
|
|
||||||
|
def run(self, path_to_ebook):
|
||||||
|
# add the alfcrypto directory to sys.path so alfcrypto.py
|
||||||
|
# will be able to locate the custom lib(s) for CDLL import.
|
||||||
|
sys.path.insert(0, self.alfdir)
|
||||||
|
# Had to move these imports here so the custom libs can be
|
||||||
|
# extracted to the appropriate places beforehand these routines
|
||||||
|
# look for them.
|
||||||
|
from calibre_plugins.k4mobidedrm import kgenpids
|
||||||
|
from calibre_plugins.k4mobidedrm import topazextract
|
||||||
|
from calibre_plugins.k4mobidedrm import mobidedrm
|
||||||
|
|
||||||
|
plug_ver = '.'.join(str(self.version).strip('()').replace(' ', '').split(','))
|
||||||
|
k4 = True
|
||||||
|
if sys.platform.startswith('linux'):
|
||||||
|
k4 = False
|
||||||
|
pids = []
|
||||||
|
serials = []
|
||||||
|
kInfoFiles = []
|
||||||
|
# Get supplied list of PIDs to try from plugin customization.
|
||||||
|
customvalues = self.site_customization.split(',')
|
||||||
|
for customvalue in customvalues:
|
||||||
|
customvalue = str(customvalue)
|
||||||
|
customvalue = customvalue.strip()
|
||||||
|
if len(customvalue) == 10 or len(customvalue) == 8:
|
||||||
|
pids.append(customvalue)
|
||||||
|
else :
|
||||||
|
if len(customvalue) == 16 and customvalue[0] == 'B':
|
||||||
|
serials.append(customvalue)
|
||||||
|
else:
|
||||||
|
print "%s is not a valid Kindle serial number or PID." % str(customvalue)
|
||||||
|
|
||||||
|
# Load any kindle info files (*.info) included Calibre's config directory.
|
||||||
|
try:
|
||||||
|
# Find Calibre's configuration directory.
|
||||||
|
confpath = os.path.split(os.path.split(self.plugin_path)[0])[0]
|
||||||
|
print 'K4MobiDeDRM v%s: Calibre configuration directory = %s' % (plug_ver, confpath)
|
||||||
|
files = os.listdir(confpath)
|
||||||
|
filefilter = re.compile("\.info$|\.kinf$", re.IGNORECASE)
|
||||||
|
files = filter(filefilter.search, files)
|
||||||
|
if files:
|
||||||
|
for filename in files:
|
||||||
|
fpath = os.path.join(confpath, filename)
|
||||||
|
kInfoFiles.append(fpath)
|
||||||
|
print 'K4MobiDeDRM v%s: Kindle info/kinf file %s found in config folder.' % (plug_ver, filename)
|
||||||
|
except IOError:
|
||||||
|
print 'K4MobiDeDRM v%s: Error reading kindle info/kinf files from config directory.' % plug_ver
|
||||||
|
pass
|
||||||
|
|
||||||
|
mobi = True
|
||||||
|
magic3 = file(path_to_ebook,'rb').read(3)
|
||||||
|
if magic3 == 'TPZ':
|
||||||
|
mobi = False
|
||||||
|
|
||||||
|
bookname = os.path.splitext(os.path.basename(path_to_ebook))[0]
|
||||||
|
|
||||||
|
if mobi:
|
||||||
|
mb = mobidedrm.MobiBook(path_to_ebook)
|
||||||
|
else:
|
||||||
|
mb = topazextract.TopazBook(path_to_ebook)
|
||||||
|
|
||||||
|
title = mb.getBookTitle()
|
||||||
|
md1, md2 = mb.getPIDMetaInfo()
|
||||||
|
pidlst = kgenpids.getPidList(md1, md2, k4, pids, serials, kInfoFiles)
|
||||||
|
|
||||||
|
try:
|
||||||
|
mb.processBook(pidlst)
|
||||||
|
|
||||||
|
except mobidedrm.DrmException, e:
|
||||||
|
#if you reached here then no luck raise and exception
|
||||||
|
if is_ok_to_use_qt():
|
||||||
|
from PyQt4.Qt import QMessageBox
|
||||||
|
d = QMessageBox(QMessageBox.Warning, "K4MobiDeDRM v%s Plugin" % plug_ver, "Error: " + str(e) + "... %s\n" % path_to_ebook)
|
||||||
|
d.show()
|
||||||
|
d.raise_()
|
||||||
|
d.exec_()
|
||||||
|
raise Exception("K4MobiDeDRM plugin v%s Error: %s" % (plug_ver, str(e)))
|
||||||
|
except topazextract.TpzDRMError, e:
|
||||||
|
#if you reached here then no luck raise and exception
|
||||||
|
if is_ok_to_use_qt():
|
||||||
|
from PyQt4.Qt import QMessageBox
|
||||||
|
d = QMessageBox(QMessageBox.Warning, "K4MobiDeDRM v%s Plugin" % plug_ver, "Error: " + str(e) + "... %s\n" % path_to_ebook)
|
||||||
|
d.show()
|
||||||
|
d.raise_()
|
||||||
|
d.exec_()
|
||||||
|
raise Exception("K4MobiDeDRM plugin v%s Error: %s" % (plug_ver, str(e)))
|
||||||
|
|
||||||
|
print "Success!"
|
||||||
|
if mobi:
|
||||||
|
if mb.getPrintReplica():
|
||||||
|
of = self.temporary_file(bookname+'.azw4')
|
||||||
|
print 'K4MobiDeDRM v%s: Print Replica format detected.' % plug_ver
|
||||||
|
else:
|
||||||
|
of = self.temporary_file(bookname+'.mobi')
|
||||||
|
mb.getMobiFile(of.name)
|
||||||
|
else:
|
||||||
|
of = self.temporary_file(bookname+'.htmlz')
|
||||||
|
mb.getHTMLZip(of.name)
|
||||||
|
mb.cleanup()
|
||||||
|
return of.name
|
||||||
|
|
||||||
|
def customization_help(self, gui=False):
|
||||||
|
return 'Enter 10 character PIDs and/or Kindle serial numbers, use a comma (no spaces) to separate each PID or SerialNumber from the next.'
|
||||||
|
|
||||||
|
def load_resources(self, names):
|
||||||
|
ans = {}
|
||||||
|
with ZipFile(self.plugin_path, 'r') as zf:
|
||||||
|
for candidate in zf.namelist():
|
||||||
|
if candidate in names:
|
||||||
|
ans[candidate] = zf.read(candidate)
|
||||||
|
return ans
|
||||||
File diff suppressed because it is too large
Load Diff
Binary file not shown.
@@ -1,151 +1,290 @@
|
|||||||
#! /usr/bin/python
|
#! /usr/bin/env python
|
||||||
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
|
|
||||||
|
|
||||||
import sys
|
import sys, os
|
||||||
import csv
|
import hmac
|
||||||
import os
|
|
||||||
import getopt
|
|
||||||
from struct import pack
|
from struct import pack
|
||||||
from struct import unpack
|
import hashlib
|
||||||
|
|
||||||
|
|
||||||
class PParser(object):
|
# interface to needed routines libalfcrypto
|
||||||
def __init__(self, gd, flatxml):
|
def _load_libalfcrypto():
|
||||||
self.gd = gd
|
import ctypes
|
||||||
self.flatdoc = flatxml.split('\n')
|
from ctypes import CDLL, byref, POINTER, c_void_p, c_char_p, c_int, c_long, \
|
||||||
self.temp = []
|
Structure, c_ulong, create_string_buffer, addressof, string_at, cast, sizeof
|
||||||
foo = self.getData('page.h') or self.getData('book.h')
|
|
||||||
self.ph = foo[0]
|
|
||||||
foo = self.getData('page.w') or self.getData('book.w')
|
|
||||||
self.pw = foo[0]
|
|
||||||
self.gx = self.getData('info.glyph.x')
|
|
||||||
self.gy = self.getData('info.glyph.y')
|
|
||||||
self.gid = self.getData('info.glyph.glyphID')
|
|
||||||
def getData(self, path):
|
|
||||||
result = None
|
|
||||||
cnt = len(self.flatdoc)
|
|
||||||
for j in xrange(cnt):
|
|
||||||
item = self.flatdoc[j]
|
|
||||||
if item.find('=') >= 0:
|
|
||||||
(name, argt) = item.split('=')
|
|
||||||
argres = argt.split('|')
|
|
||||||
else:
|
|
||||||
name = item
|
|
||||||
argres = []
|
|
||||||
if (name.endswith(path)):
|
|
||||||
result = argres
|
|
||||||
break
|
|
||||||
if (len(argres) > 0) :
|
|
||||||
for j in xrange(0,len(argres)):
|
|
||||||
argres[j] = int(argres[j])
|
|
||||||
return result
|
|
||||||
def getDataTemp(self, path):
|
|
||||||
result = None
|
|
||||||
cnt = len(self.temp)
|
|
||||||
for j in xrange(cnt):
|
|
||||||
item = self.temp[j]
|
|
||||||
if item.find('=') >= 0:
|
|
||||||
(name, argt) = item.split('=')
|
|
||||||
argres = argt.split('|')
|
|
||||||
else:
|
|
||||||
name = item
|
|
||||||
argres = []
|
|
||||||
if (name.endswith(path)):
|
|
||||||
result = argres
|
|
||||||
self.temp.pop(j)
|
|
||||||
break
|
|
||||||
if (len(argres) > 0) :
|
|
||||||
for j in xrange(0,len(argres)):
|
|
||||||
argres[j] = int(argres[j])
|
|
||||||
return result
|
|
||||||
def getImages(self):
|
|
||||||
result = []
|
|
||||||
self.temp = self.flatdoc
|
|
||||||
while (self.getDataTemp('img') != None):
|
|
||||||
h = self.getDataTemp('img.h')[0]
|
|
||||||
w = self.getDataTemp('img.w')[0]
|
|
||||||
x = self.getDataTemp('img.x')[0]
|
|
||||||
y = self.getDataTemp('img.y')[0]
|
|
||||||
src = self.getDataTemp('img.src')[0]
|
|
||||||
result.append('<image xlink:href="../img/img%04d.jpg" x="%d" y="%d" width="%d" height="%d" />\n' % (src, x, y, w, h))
|
|
||||||
return result
|
|
||||||
def getGlyphs(self):
|
|
||||||
result = []
|
|
||||||
if (self.gid != None) and (len(self.gid) > 0):
|
|
||||||
glyphs = []
|
|
||||||
for j in set(self.gid):
|
|
||||||
glyphs.append(j)
|
|
||||||
glyphs.sort()
|
|
||||||
for gid in glyphs:
|
|
||||||
id='id="gl%d"' % gid
|
|
||||||
path = self.gd.lookup(id)
|
|
||||||
if path:
|
|
||||||
result.append(id + ' ' + path)
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
pointer_size = ctypes.sizeof(ctypes.c_voidp)
|
||||||
def convert2SVG(gdict, flat_xml, counter, numfiles, svgDir, raw, meta_array, scaledpi):
|
name_of_lib = None
|
||||||
ml = ''
|
if sys.platform.startswith('darwin'):
|
||||||
pp = PParser(gdict, flat_xml)
|
name_of_lib = 'libalfcrypto.dylib'
|
||||||
ml += '<?xml version="1.0" standalone="no"?>\n'
|
elif sys.platform.startswith('win'):
|
||||||
if (raw):
|
if pointer_size == 4:
|
||||||
ml += '<!DOCTYPE svg PUBLIC "-//W3C/DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">\n'
|
name_of_lib = 'alfcrypto.dll'
|
||||||
ml += '<svg width="%fin" height="%fin" viewBox="0 0 %d %d" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1">\n' % (pp.pw / scaledpi, pp.ph / scaledpi, pp.pw -1, pp.ph -1)
|
|
||||||
ml += '<title>Page %d - %s by %s</title>\n' % (counter, meta_array['Title'],meta_array['Authors'])
|
|
||||||
else:
|
|
||||||
ml += '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n'
|
|
||||||
ml += '<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" ><head>\n'
|
|
||||||
ml += '<title>Page %d - %s by %s</title>\n' % (counter, meta_array['Title'],meta_array['Authors'])
|
|
||||||
ml += '<script><![CDATA[\n'
|
|
||||||
ml += 'function gd(){var p=window.location.href.replace(/^.*\?dpi=(\d+).*$/i,"$1");return p;}\n'
|
|
||||||
ml += 'var dpi=%d;\n' % scaledpi
|
|
||||||
if (counter) :
|
|
||||||
ml += 'var prevpage="page%04d.xhtml";\n' % (counter - 1)
|
|
||||||
if (counter < numfiles-1) :
|
|
||||||
ml += 'var nextpage="page%04d.xhtml";\n' % (counter + 1)
|
|
||||||
ml += 'var pw=%d;var ph=%d;' % (pp.pw, pp.ph)
|
|
||||||
ml += 'function zoomin(){dpi=dpi*(0.8);setsize();}\n'
|
|
||||||
ml += 'function zoomout(){dpi=dpi*1.25;setsize();}\n'
|
|
||||||
ml += 'function setsize(){var svg=document.getElementById("svgimg");var prev=document.getElementById("prevsvg");var next=document.getElementById("nextsvg");var width=(pw/dpi)+"in";var height=(ph/dpi)+"in";svg.setAttribute("width",width);svg.setAttribute("height",height);prev.setAttribute("height",height);prev.setAttribute("width","50px");next.setAttribute("height",height);next.setAttribute("width","50px");}\n'
|
|
||||||
ml += 'function ppage(){window.location.href=prevpage+"?dpi="+Math.round(dpi);}\n'
|
|
||||||
ml += 'function npage(){window.location.href=nextpage+"?dpi="+Math.round(dpi);}\n'
|
|
||||||
ml += 'var gt=gd();if(gt>0){dpi=gt;}\n'
|
|
||||||
ml += 'window.onload=setsize;\n'
|
|
||||||
ml += ']]></script>\n'
|
|
||||||
ml += '</head>\n'
|
|
||||||
ml += '<body onLoad="setsize();" style="background-color:#777;text-align:center;">\n'
|
|
||||||
ml += '<div style="white-space:nowrap;">\n'
|
|
||||||
if (counter == 0) :
|
|
||||||
ml += '<a href="javascript:ppage();"><svg id="prevsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"></svg></a>\n'
|
|
||||||
else:
|
else:
|
||||||
ml += '<a href="javascript:ppage();"><svg id="prevsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"><polygon points="5,150,95,5,95,295" fill="#AAAAAA" /></svg></a>\n'
|
name_of_lib = 'alfcrypto64.dll'
|
||||||
ml += '<a href="javascript:npage();"><svg id="svgimg" viewBox="0 0 %d %d" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" style="background-color:#FFF;border:1px solid black;">' % (pp.pw, pp.ph)
|
else:
|
||||||
if (pp.gid != None):
|
if pointer_size == 4:
|
||||||
ml += '<defs>\n'
|
name_of_lib = 'libalfcrypto32.so'
|
||||||
gdefs = pp.getGlyphs()
|
else:
|
||||||
for j in xrange(0,len(gdefs)):
|
name_of_lib = 'libalfcrypto64.so'
|
||||||
ml += gdefs[j]
|
|
||||||
ml += '</defs>\n'
|
libalfcrypto = sys.path[0] + os.sep + name_of_lib
|
||||||
img = pp.getImages()
|
|
||||||
if (img != None):
|
if not os.path.isfile(libalfcrypto):
|
||||||
for j in xrange(0,len(img)):
|
raise Exception('libalfcrypto not found')
|
||||||
ml += img[j]
|
|
||||||
if (pp.gid != None):
|
libalfcrypto = CDLL(libalfcrypto)
|
||||||
for j in xrange(0,len(pp.gid)):
|
|
||||||
ml += '<use xlink:href="#gl%d" x="%d" y="%d" />\n' % (pp.gid[j], pp.gx[j], pp.gy[j])
|
c_char_pp = POINTER(c_char_p)
|
||||||
if (img == None or len(img) == 0) and (pp.gid == None or len(pp.gid) == 0):
|
c_int_p = POINTER(c_int)
|
||||||
ml += '<text x="10" y="10" font-family="Helvetica" font-size="100" stroke="black">This page intentionally left blank.</text>\n<text x="10" y="110" font-family="Helvetica" font-size="50" stroke="black">Until this notice unintentionally gave it content. (gensvg.py)</text>\n'
|
|
||||||
if (raw) :
|
|
||||||
ml += '</svg>'
|
def F(restype, name, argtypes):
|
||||||
else :
|
func = getattr(libalfcrypto, name)
|
||||||
ml += '</svg></a>\n'
|
func.restype = restype
|
||||||
if (counter == numfiles - 1) :
|
func.argtypes = argtypes
|
||||||
ml += '<a href="javascript:npage();"><svg id="nextsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"></svg></a>\n'
|
return func
|
||||||
else :
|
|
||||||
ml += '<a href="javascript:npage();"><svg id="nextsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"><polygon points="5,5,5,295,95,150" fill="#AAAAAA" /></svg></a>\n'
|
# aes cbc decryption
|
||||||
ml += '</div>\n'
|
#
|
||||||
ml += '<div><a href="javascript:zoomin();">zoom in</a> - <a href="javascript:zoomout();">zoom out</a></div>\n'
|
# struct aes_key_st {
|
||||||
ml += '</body>\n'
|
# unsigned long rd_key[4 *(AES_MAXNR + 1)];
|
||||||
ml += '</html>\n'
|
# int rounds;
|
||||||
return ml
|
# };
|
||||||
|
#
|
||||||
|
# typedef struct aes_key_st AES_KEY;
|
||||||
|
#
|
||||||
|
# int AES_set_decrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key);
|
||||||
|
#
|
||||||
|
#
|
||||||
|
# void AES_cbc_encrypt(const unsigned char *in, unsigned char *out,
|
||||||
|
# const unsigned long length, const AES_KEY *key,
|
||||||
|
# unsigned char *ivec, const int enc);
|
||||||
|
|
||||||
|
AES_MAXNR = 14
|
||||||
|
|
||||||
|
class AES_KEY(Structure):
|
||||||
|
_fields_ = [('rd_key', c_long * (4 * (AES_MAXNR + 1))), ('rounds', c_int)]
|
||||||
|
|
||||||
|
AES_KEY_p = POINTER(AES_KEY)
|
||||||
|
AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',[c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p, c_int])
|
||||||
|
AES_set_decrypt_key = F(c_int, 'AES_set_decrypt_key',[c_char_p, c_int, AES_KEY_p])
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Pukall 1 Cipher
|
||||||
|
# unsigned char *PC1(const unsigned char *key, unsigned int klen, const unsigned char *src,
|
||||||
|
# unsigned char *dest, unsigned int len, int decryption);
|
||||||
|
|
||||||
|
PC1 = F(c_char_p, 'PC1', [c_char_p, c_ulong, c_char_p, c_char_p, c_ulong, c_ulong])
|
||||||
|
|
||||||
|
# Topaz Encryption
|
||||||
|
# typedef struct _TpzCtx {
|
||||||
|
# unsigned int v[2];
|
||||||
|
# } TpzCtx;
|
||||||
|
#
|
||||||
|
# void topazCryptoInit(TpzCtx *ctx, const unsigned char *key, int klen);
|
||||||
|
# void topazCryptoDecrypt(const TpzCtx *ctx, const unsigned char *in, unsigned char *out, int len);
|
||||||
|
|
||||||
|
class TPZ_CTX(Structure):
|
||||||
|
_fields_ = [('v', c_long * 2)]
|
||||||
|
|
||||||
|
TPZ_CTX_p = POINTER(TPZ_CTX)
|
||||||
|
topazCryptoInit = F(None, 'topazCryptoInit', [TPZ_CTX_p, c_char_p, c_ulong])
|
||||||
|
topazCryptoDecrypt = F(None, 'topazCryptoDecrypt', [TPZ_CTX_p, c_char_p, c_char_p, c_ulong])
|
||||||
|
|
||||||
|
|
||||||
|
class AES_CBC(object):
|
||||||
|
def __init__(self):
|
||||||
|
self._blocksize = 0
|
||||||
|
self._keyctx = None
|
||||||
|
self._iv = 0
|
||||||
|
|
||||||
|
def set_decrypt_key(self, userkey, iv):
|
||||||
|
self._blocksize = len(userkey)
|
||||||
|
if (self._blocksize != 16) and (self._blocksize != 24) and (self._blocksize != 32) :
|
||||||
|
raise Exception('AES CBC improper key used')
|
||||||
|
return
|
||||||
|
keyctx = self._keyctx = AES_KEY()
|
||||||
|
self._iv = iv
|
||||||
|
rv = AES_set_decrypt_key(userkey, len(userkey) * 8, keyctx)
|
||||||
|
if rv < 0:
|
||||||
|
raise Exception('Failed to initialize AES CBC key')
|
||||||
|
|
||||||
|
def decrypt(self, data):
|
||||||
|
out = create_string_buffer(len(data))
|
||||||
|
mutable_iv = create_string_buffer(self._iv, len(self._iv))
|
||||||
|
rv = AES_cbc_encrypt(data, out, len(data), self._keyctx, mutable_iv, 0)
|
||||||
|
if rv == 0:
|
||||||
|
raise Exception('AES CBC decryption failed')
|
||||||
|
return out.raw
|
||||||
|
|
||||||
|
class Pukall_Cipher(object):
|
||||||
|
def __init__(self):
|
||||||
|
self.key = None
|
||||||
|
|
||||||
|
def PC1(self, key, src, decryption=True):
|
||||||
|
self.key = key
|
||||||
|
out = create_string_buffer(len(src))
|
||||||
|
de = 0
|
||||||
|
if decryption:
|
||||||
|
de = 1
|
||||||
|
rv = PC1(key, len(key), src, out, len(src), de)
|
||||||
|
return out.raw
|
||||||
|
|
||||||
|
class Topaz_Cipher(object):
|
||||||
|
def __init__(self):
|
||||||
|
self._ctx = None
|
||||||
|
|
||||||
|
def ctx_init(self, key):
|
||||||
|
tpz_ctx = self._ctx = TPZ_CTX()
|
||||||
|
topazCryptoInit(tpz_ctx, key, len(key))
|
||||||
|
return tpz_ctx
|
||||||
|
|
||||||
|
def decrypt(self, data, ctx=None):
|
||||||
|
if ctx == None:
|
||||||
|
ctx = self._ctx
|
||||||
|
out = create_string_buffer(len(data))
|
||||||
|
topazCryptoDecrypt(ctx, data, out, len(data))
|
||||||
|
return out.raw
|
||||||
|
|
||||||
|
print "Using Library AlfCrypto DLL/DYLIB/SO"
|
||||||
|
return (AES_CBC, Pukall_Cipher, Topaz_Cipher)
|
||||||
|
|
||||||
|
|
||||||
|
def _load_python_alfcrypto():
|
||||||
|
|
||||||
|
import aescbc
|
||||||
|
|
||||||
|
class Pukall_Cipher(object):
|
||||||
|
def __init__(self):
|
||||||
|
self.key = None
|
||||||
|
|
||||||
|
def PC1(self, key, src, decryption=True):
|
||||||
|
sum1 = 0;
|
||||||
|
sum2 = 0;
|
||||||
|
keyXorVal = 0;
|
||||||
|
if len(key)!=16:
|
||||||
|
print "Bad key length!"
|
||||||
|
return None
|
||||||
|
wkey = []
|
||||||
|
for i in xrange(8):
|
||||||
|
wkey.append(ord(key[i*2])<<8 | ord(key[i*2+1]))
|
||||||
|
dst = ""
|
||||||
|
for i in xrange(len(src)):
|
||||||
|
temp1 = 0;
|
||||||
|
byteXorVal = 0;
|
||||||
|
for j in xrange(8):
|
||||||
|
temp1 ^= wkey[j]
|
||||||
|
sum2 = (sum2+j)*20021 + sum1
|
||||||
|
sum1 = (temp1*346)&0xFFFF
|
||||||
|
sum2 = (sum2+sum1)&0xFFFF
|
||||||
|
temp1 = (temp1*20021+1)&0xFFFF
|
||||||
|
byteXorVal ^= temp1 ^ sum2
|
||||||
|
curByte = ord(src[i])
|
||||||
|
if not decryption:
|
||||||
|
keyXorVal = curByte * 257;
|
||||||
|
curByte = ((curByte ^ (byteXorVal >> 8)) ^ byteXorVal) & 0xFF
|
||||||
|
if decryption:
|
||||||
|
keyXorVal = curByte * 257;
|
||||||
|
for j in xrange(8):
|
||||||
|
wkey[j] ^= keyXorVal;
|
||||||
|
dst+=chr(curByte)
|
||||||
|
return dst
|
||||||
|
|
||||||
|
class Topaz_Cipher(object):
|
||||||
|
def __init__(self):
|
||||||
|
self._ctx = None
|
||||||
|
|
||||||
|
def ctx_init(self, key):
|
||||||
|
ctx1 = 0x0CAFFE19E
|
||||||
|
for keyChar in key:
|
||||||
|
keyByte = ord(keyChar)
|
||||||
|
ctx2 = ctx1
|
||||||
|
ctx1 = ((((ctx1 >>2) * (ctx1 >>7))&0xFFFFFFFF) ^ (keyByte * keyByte * 0x0F902007)& 0xFFFFFFFF )
|
||||||
|
self._ctx = [ctx1, ctx2]
|
||||||
|
return [ctx1,ctx2]
|
||||||
|
|
||||||
|
def decrypt(self, data, ctx=None):
|
||||||
|
if ctx == None:
|
||||||
|
ctx = self._ctx
|
||||||
|
ctx1 = ctx[0]
|
||||||
|
ctx2 = ctx[1]
|
||||||
|
plainText = ""
|
||||||
|
for dataChar in data:
|
||||||
|
dataByte = ord(dataChar)
|
||||||
|
m = (dataByte ^ ((ctx1 >> 3) &0xFF) ^ ((ctx2<<3) & 0xFF)) &0xFF
|
||||||
|
ctx2 = ctx1
|
||||||
|
ctx1 = (((ctx1 >> 2) * (ctx1 >> 7)) &0xFFFFFFFF) ^((m * m * 0x0F902007) &0xFFFFFFFF)
|
||||||
|
plainText += chr(m)
|
||||||
|
return plainText
|
||||||
|
|
||||||
|
class AES_CBC(object):
|
||||||
|
def __init__(self):
|
||||||
|
self._key = None
|
||||||
|
self._iv = None
|
||||||
|
self.aes = None
|
||||||
|
|
||||||
|
def set_decrypt_key(self, userkey, iv):
|
||||||
|
self._key = userkey
|
||||||
|
self._iv = iv
|
||||||
|
self.aes = aescbc.AES_CBC(userkey, aescbc.noPadding(), len(userkey))
|
||||||
|
|
||||||
|
def decrypt(self, data):
|
||||||
|
iv = self._iv
|
||||||
|
cleartext = self.aes.decrypt(iv + data)
|
||||||
|
return cleartext
|
||||||
|
|
||||||
|
return (AES_CBC, Pukall_Cipher, Topaz_Cipher)
|
||||||
|
|
||||||
|
|
||||||
|
def _load_crypto():
|
||||||
|
AES_CBC = Pukall_Cipher = Topaz_Cipher = None
|
||||||
|
cryptolist = (_load_libalfcrypto, _load_python_alfcrypto)
|
||||||
|
for loader in cryptolist:
|
||||||
|
try:
|
||||||
|
AES_CBC, Pukall_Cipher, Topaz_Cipher = loader()
|
||||||
|
break
|
||||||
|
except (ImportError, Exception):
|
||||||
|
pass
|
||||||
|
return AES_CBC, Pukall_Cipher, Topaz_Cipher
|
||||||
|
|
||||||
|
AES_CBC, Pukall_Cipher, Topaz_Cipher = _load_crypto()
|
||||||
|
|
||||||
|
|
||||||
|
class KeyIVGen(object):
|
||||||
|
# this only exists in openssl so we will use pure python implementation instead
|
||||||
|
# PKCS5_PBKDF2_HMAC_SHA1 = F(c_int, 'PKCS5_PBKDF2_HMAC_SHA1',
|
||||||
|
# [c_char_p, c_ulong, c_char_p, c_ulong, c_ulong, c_ulong, c_char_p])
|
||||||
|
def pbkdf2(self, passwd, salt, iter, keylen):
|
||||||
|
|
||||||
|
def xorstr( a, b ):
|
||||||
|
if len(a) != len(b):
|
||||||
|
raise Exception("xorstr(): lengths differ")
|
||||||
|
return ''.join((chr(ord(x)^ord(y)) for x, y in zip(a, b)))
|
||||||
|
|
||||||
|
def prf( h, data ):
|
||||||
|
hm = h.copy()
|
||||||
|
hm.update( data )
|
||||||
|
return hm.digest()
|
||||||
|
|
||||||
|
def pbkdf2_F( h, salt, itercount, blocknum ):
|
||||||
|
U = prf( h, salt + pack('>i',blocknum ) )
|
||||||
|
T = U
|
||||||
|
for i in range(2, itercount+1):
|
||||||
|
U = prf( h, U )
|
||||||
|
T = xorstr( T, U )
|
||||||
|
return T
|
||||||
|
|
||||||
|
sha = hashlib.sha1
|
||||||
|
digest_size = sha().digest_size
|
||||||
|
# l - number of output blocks to produce
|
||||||
|
l = keylen / digest_size
|
||||||
|
if keylen % digest_size != 0:
|
||||||
|
l += 1
|
||||||
|
h = hmac.new( passwd, None, sha )
|
||||||
|
T = ""
|
||||||
|
for i in range(1, l+1):
|
||||||
|
T += pbkdf2_F( h, salt, iter, i )
|
||||||
|
return T[0: keylen]
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
Binary file not shown.
BIN
Calibre_Plugins/K4MobiDeDRM_plugin/k4mobidedrm_orig.py
Normal file
BIN
Calibre_Plugins/K4MobiDeDRM_plugin/k4mobidedrm_orig.py
Normal file
Binary file not shown.
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,726 @@
|
|||||||
|
# standlone set of Mac OSX specific routines needed for KindleBooks
|
||||||
|
|
||||||
|
from __future__ import with_statement
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
import os.path
|
||||||
|
import re
|
||||||
|
import copy
|
||||||
|
import subprocess
|
||||||
|
from struct import pack, unpack, unpack_from
|
||||||
|
|
||||||
|
class DrmException(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
# interface to needed routines in openssl's libcrypto
|
||||||
|
def _load_crypto_libcrypto():
|
||||||
|
from ctypes import CDLL, byref, POINTER, c_void_p, c_char_p, c_int, c_long, \
|
||||||
|
Structure, c_ulong, create_string_buffer, addressof, string_at, cast
|
||||||
|
from ctypes.util import find_library
|
||||||
|
|
||||||
|
libcrypto = find_library('crypto')
|
||||||
|
if libcrypto is None:
|
||||||
|
raise DrmException('libcrypto not found')
|
||||||
|
libcrypto = CDLL(libcrypto)
|
||||||
|
|
||||||
|
# From OpenSSL's crypto aes header
|
||||||
|
#
|
||||||
|
# AES_ENCRYPT 1
|
||||||
|
# AES_DECRYPT 0
|
||||||
|
# AES_MAXNR 14 (in bytes)
|
||||||
|
# AES_BLOCK_SIZE 16 (in bytes)
|
||||||
|
#
|
||||||
|
# struct aes_key_st {
|
||||||
|
# unsigned long rd_key[4 *(AES_MAXNR + 1)];
|
||||||
|
# int rounds;
|
||||||
|
# };
|
||||||
|
# typedef struct aes_key_st AES_KEY;
|
||||||
|
#
|
||||||
|
# int AES_set_decrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key);
|
||||||
|
#
|
||||||
|
# note: the ivec string, and output buffer are both mutable
|
||||||
|
# void AES_cbc_encrypt(const unsigned char *in, unsigned char *out,
|
||||||
|
# const unsigned long length, const AES_KEY *key, unsigned char *ivec, const int enc);
|
||||||
|
|
||||||
|
AES_MAXNR = 14
|
||||||
|
c_char_pp = POINTER(c_char_p)
|
||||||
|
c_int_p = POINTER(c_int)
|
||||||
|
|
||||||
|
class AES_KEY(Structure):
|
||||||
|
_fields_ = [('rd_key', c_long * (4 * (AES_MAXNR + 1))), ('rounds', c_int)]
|
||||||
|
AES_KEY_p = POINTER(AES_KEY)
|
||||||
|
|
||||||
|
def F(restype, name, argtypes):
|
||||||
|
func = getattr(libcrypto, name)
|
||||||
|
func.restype = restype
|
||||||
|
func.argtypes = argtypes
|
||||||
|
return func
|
||||||
|
|
||||||
|
AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',[c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p,c_int])
|
||||||
|
|
||||||
|
AES_set_decrypt_key = F(c_int, 'AES_set_decrypt_key',[c_char_p, c_int, AES_KEY_p])
|
||||||
|
|
||||||
|
# From OpenSSL's Crypto evp/p5_crpt2.c
|
||||||
|
#
|
||||||
|
# int PKCS5_PBKDF2_HMAC_SHA1(const char *pass, int passlen,
|
||||||
|
# const unsigned char *salt, int saltlen, int iter,
|
||||||
|
# int keylen, unsigned char *out);
|
||||||
|
|
||||||
|
PKCS5_PBKDF2_HMAC_SHA1 = F(c_int, 'PKCS5_PBKDF2_HMAC_SHA1',
|
||||||
|
[c_char_p, c_ulong, c_char_p, c_ulong, c_ulong, c_ulong, c_char_p])
|
||||||
|
|
||||||
|
class LibCrypto(object):
|
||||||
|
def __init__(self):
|
||||||
|
self._blocksize = 0
|
||||||
|
self._keyctx = None
|
||||||
|
self._iv = 0
|
||||||
|
|
||||||
|
def set_decrypt_key(self, userkey, iv):
|
||||||
|
self._blocksize = len(userkey)
|
||||||
|
if (self._blocksize != 16) and (self._blocksize != 24) and (self._blocksize != 32) :
|
||||||
|
raise DrmException('AES improper key used')
|
||||||
|
return
|
||||||
|
keyctx = self._keyctx = AES_KEY()
|
||||||
|
self._iv = iv
|
||||||
|
self._userkey = userkey
|
||||||
|
rv = AES_set_decrypt_key(userkey, len(userkey) * 8, keyctx)
|
||||||
|
if rv < 0:
|
||||||
|
raise DrmException('Failed to initialize AES key')
|
||||||
|
|
||||||
|
def decrypt(self, data):
|
||||||
|
out = create_string_buffer(len(data))
|
||||||
|
mutable_iv = create_string_buffer(self._iv, len(self._iv))
|
||||||
|
keyctx = self._keyctx
|
||||||
|
rv = AES_cbc_encrypt(data, out, len(data), keyctx, mutable_iv, 0)
|
||||||
|
if rv == 0:
|
||||||
|
raise DrmException('AES decryption failed')
|
||||||
|
return out.raw
|
||||||
|
|
||||||
|
def keyivgen(self, passwd, salt, iter, keylen):
|
||||||
|
saltlen = len(salt)
|
||||||
|
passlen = len(passwd)
|
||||||
|
out = create_string_buffer(keylen)
|
||||||
|
rv = PKCS5_PBKDF2_HMAC_SHA1(passwd, passlen, salt, saltlen, iter, keylen, out)
|
||||||
|
return out.raw
|
||||||
|
return LibCrypto
|
||||||
|
|
||||||
|
def _load_crypto():
|
||||||
|
LibCrypto = None
|
||||||
|
try:
|
||||||
|
LibCrypto = _load_crypto_libcrypto()
|
||||||
|
except (ImportError, DrmException):
|
||||||
|
pass
|
||||||
|
return LibCrypto
|
||||||
|
|
||||||
|
LibCrypto = _load_crypto()
|
||||||
|
|
||||||
|
#
|
||||||
|
# Utility Routines
|
||||||
|
#
|
||||||
|
|
||||||
|
# crypto digestroutines
|
||||||
|
import hashlib
|
||||||
|
|
||||||
|
def MD5(message):
|
||||||
|
ctx = hashlib.md5()
|
||||||
|
ctx.update(message)
|
||||||
|
return ctx.digest()
|
||||||
|
|
||||||
|
def SHA1(message):
|
||||||
|
ctx = hashlib.sha1()
|
||||||
|
ctx.update(message)
|
||||||
|
return ctx.digest()
|
||||||
|
|
||||||
|
def SHA256(message):
|
||||||
|
ctx = hashlib.sha256()
|
||||||
|
ctx.update(message)
|
||||||
|
return ctx.digest()
|
||||||
|
|
||||||
|
# Various character maps used to decrypt books. Probably supposed to act as obfuscation
|
||||||
|
charMap1 = "n5Pr6St7Uv8Wx9YzAb0Cd1Ef2Gh3Jk4M"
|
||||||
|
charMap2 = "ZB0bYyc1xDdW2wEV3Ff7KkPpL8UuGA4gz-Tme9Nn_tHh5SvXCsIiR6rJjQaqlOoM"
|
||||||
|
|
||||||
|
# For kinf approach of K4Mac 1.6.X or later
|
||||||
|
# On K4PC charMap5 = "AzB0bYyCeVvaZ3FfUuG4g-TtHh5SsIiR6rJjQq7KkPpL8lOoMm9Nn_c1XxDdW2wE"
|
||||||
|
# For Mac they seem to re-use charMap2 here
|
||||||
|
charMap5 = charMap2
|
||||||
|
|
||||||
|
# new in K4M 1.9.X
|
||||||
|
testMap8 = "YvaZ3FfUm9Nn_c1XuG4yCAzB0beVg-TtHh5SsIiR6rJjQdW2wEq7KkPpL8lOoMxD"
|
||||||
|
|
||||||
|
|
||||||
|
def encode(data, map):
|
||||||
|
result = ""
|
||||||
|
for char in data:
|
||||||
|
value = ord(char)
|
||||||
|
Q = (value ^ 0x80) // len(map)
|
||||||
|
R = value % len(map)
|
||||||
|
result += map[Q]
|
||||||
|
result += map[R]
|
||||||
|
return result
|
||||||
|
|
||||||
|
# Hash the bytes in data and then encode the digest with the characters in map
|
||||||
|
def encodeHash(data,map):
|
||||||
|
return encode(MD5(data),map)
|
||||||
|
|
||||||
|
# Decode the string in data with the characters in map. Returns the decoded bytes
|
||||||
|
def decode(data,map):
|
||||||
|
result = ""
|
||||||
|
for i in range (0,len(data)-1,2):
|
||||||
|
high = map.find(data[i])
|
||||||
|
low = map.find(data[i+1])
|
||||||
|
if (high == -1) or (low == -1) :
|
||||||
|
break
|
||||||
|
value = (((high * len(map)) ^ 0x80) & 0xFF) + low
|
||||||
|
result += pack("B",value)
|
||||||
|
return result
|
||||||
|
|
||||||
|
# For K4M 1.6.X and later
|
||||||
|
# generate table of prime number less than or equal to int n
|
||||||
|
def primes(n):
|
||||||
|
if n==2: return [2]
|
||||||
|
elif n<2: return []
|
||||||
|
s=range(3,n+1,2)
|
||||||
|
mroot = n ** 0.5
|
||||||
|
half=(n+1)/2-1
|
||||||
|
i=0
|
||||||
|
m=3
|
||||||
|
while m <= mroot:
|
||||||
|
if s[i]:
|
||||||
|
j=(m*m-3)/2
|
||||||
|
s[j]=0
|
||||||
|
while j<half:
|
||||||
|
s[j]=0
|
||||||
|
j+=m
|
||||||
|
i=i+1
|
||||||
|
m=2*i+3
|
||||||
|
return [2]+[x for x in s if x]
|
||||||
|
|
||||||
|
|
||||||
|
# uses a sub process to get the Hard Drive Serial Number using ioreg
|
||||||
|
# returns with the serial number of drive whose BSD Name is "disk0"
|
||||||
|
def GetVolumeSerialNumber():
|
||||||
|
sernum = os.getenv('MYSERIALNUMBER')
|
||||||
|
if sernum != None:
|
||||||
|
return sernum
|
||||||
|
cmdline = '/usr/sbin/ioreg -l -S -w 0 -r -c AppleAHCIDiskDriver'
|
||||||
|
cmdline = cmdline.encode(sys.getfilesystemencoding())
|
||||||
|
p = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
|
||||||
|
out1, out2 = p.communicate()
|
||||||
|
reslst = out1.split('\n')
|
||||||
|
cnt = len(reslst)
|
||||||
|
bsdname = None
|
||||||
|
sernum = None
|
||||||
|
foundIt = False
|
||||||
|
for j in xrange(cnt):
|
||||||
|
resline = reslst[j]
|
||||||
|
pp = resline.find('"Serial Number" = "')
|
||||||
|
if pp >= 0:
|
||||||
|
sernum = resline[pp+19:-1]
|
||||||
|
sernum = sernum.strip()
|
||||||
|
bb = resline.find('"BSD Name" = "')
|
||||||
|
if bb >= 0:
|
||||||
|
bsdname = resline[bb+14:-1]
|
||||||
|
bsdname = bsdname.strip()
|
||||||
|
if (bsdname == 'disk0') and (sernum != None):
|
||||||
|
foundIt = True
|
||||||
|
break
|
||||||
|
if not foundIt:
|
||||||
|
sernum = ''
|
||||||
|
return sernum
|
||||||
|
|
||||||
|
def GetUserHomeAppSupKindleDirParitionName():
|
||||||
|
home = os.getenv('HOME')
|
||||||
|
dpath = home + '/Library/Application Support/Kindle'
|
||||||
|
cmdline = '/sbin/mount'
|
||||||
|
cmdline = cmdline.encode(sys.getfilesystemencoding())
|
||||||
|
p = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
|
||||||
|
out1, out2 = p.communicate()
|
||||||
|
reslst = out1.split('\n')
|
||||||
|
cnt = len(reslst)
|
||||||
|
disk = ''
|
||||||
|
foundIt = False
|
||||||
|
for j in xrange(cnt):
|
||||||
|
resline = reslst[j]
|
||||||
|
if resline.startswith('/dev'):
|
||||||
|
(devpart, mpath) = resline.split(' on ')
|
||||||
|
dpart = devpart[5:]
|
||||||
|
pp = mpath.find('(')
|
||||||
|
if pp >= 0:
|
||||||
|
mpath = mpath[:pp-1]
|
||||||
|
if dpath.startswith(mpath):
|
||||||
|
disk = dpart
|
||||||
|
return disk
|
||||||
|
|
||||||
|
# uses a sub process to get the UUID of the specified disk partition using ioreg
|
||||||
|
def GetDiskPartitionUUID(diskpart):
|
||||||
|
uuidnum = os.getenv('MYUUIDNUMBER')
|
||||||
|
if uuidnum != None:
|
||||||
|
return uuidnum
|
||||||
|
cmdline = '/usr/sbin/ioreg -l -S -w 0 -r -c AppleAHCIDiskDriver'
|
||||||
|
cmdline = cmdline.encode(sys.getfilesystemencoding())
|
||||||
|
p = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
|
||||||
|
out1, out2 = p.communicate()
|
||||||
|
reslst = out1.split('\n')
|
||||||
|
cnt = len(reslst)
|
||||||
|
bsdname = None
|
||||||
|
uuidnum = None
|
||||||
|
foundIt = False
|
||||||
|
nest = 0
|
||||||
|
uuidnest = -1
|
||||||
|
partnest = -2
|
||||||
|
for j in xrange(cnt):
|
||||||
|
resline = reslst[j]
|
||||||
|
if resline.find('{') >= 0:
|
||||||
|
nest += 1
|
||||||
|
if resline.find('}') >= 0:
|
||||||
|
nest -= 1
|
||||||
|
pp = resline.find('"UUID" = "')
|
||||||
|
if pp >= 0:
|
||||||
|
uuidnum = resline[pp+10:-1]
|
||||||
|
uuidnum = uuidnum.strip()
|
||||||
|
uuidnest = nest
|
||||||
|
if partnest == uuidnest and uuidnest > 0:
|
||||||
|
foundIt = True
|
||||||
|
break
|
||||||
|
bb = resline.find('"BSD Name" = "')
|
||||||
|
if bb >= 0:
|
||||||
|
bsdname = resline[bb+14:-1]
|
||||||
|
bsdname = bsdname.strip()
|
||||||
|
if (bsdname == diskpart):
|
||||||
|
partnest = nest
|
||||||
|
else :
|
||||||
|
partnest = -2
|
||||||
|
if partnest == uuidnest and partnest > 0:
|
||||||
|
foundIt = True
|
||||||
|
break
|
||||||
|
if nest == 0:
|
||||||
|
partnest = -2
|
||||||
|
uuidnest = -1
|
||||||
|
uuidnum = None
|
||||||
|
bsdname = None
|
||||||
|
if not foundIt:
|
||||||
|
uuidnum = ''
|
||||||
|
return uuidnum
|
||||||
|
|
||||||
|
def GetMACAddressMunged():
|
||||||
|
macnum = os.getenv('MYMACNUM')
|
||||||
|
if macnum != None:
|
||||||
|
return macnum
|
||||||
|
cmdline = '/sbin/ifconfig en0'
|
||||||
|
cmdline = cmdline.encode(sys.getfilesystemencoding())
|
||||||
|
p = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
|
||||||
|
out1, out2 = p.communicate()
|
||||||
|
reslst = out1.split('\n')
|
||||||
|
cnt = len(reslst)
|
||||||
|
macnum = None
|
||||||
|
foundIt = False
|
||||||
|
for j in xrange(cnt):
|
||||||
|
resline = reslst[j]
|
||||||
|
pp = resline.find('ether ')
|
||||||
|
if pp >= 0:
|
||||||
|
macnum = resline[pp+6:-1]
|
||||||
|
macnum = macnum.strip()
|
||||||
|
# print "original mac", macnum
|
||||||
|
# now munge it up the way Kindle app does
|
||||||
|
# by xoring it with 0xa5 and swapping elements 3 and 4
|
||||||
|
maclst = macnum.split(':')
|
||||||
|
n = len(maclst)
|
||||||
|
if n != 6:
|
||||||
|
fountIt = False
|
||||||
|
break
|
||||||
|
for i in range(6):
|
||||||
|
maclst[i] = int('0x' + maclst[i], 0)
|
||||||
|
mlst = [0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
|
||||||
|
mlst[5] = maclst[5] ^ 0xa5
|
||||||
|
mlst[4] = maclst[3] ^ 0xa5
|
||||||
|
mlst[3] = maclst[4] ^ 0xa5
|
||||||
|
mlst[2] = maclst[2] ^ 0xa5
|
||||||
|
mlst[1] = maclst[1] ^ 0xa5
|
||||||
|
mlst[0] = maclst[0] ^ 0xa5
|
||||||
|
macnum = "%0.2x%0.2x%0.2x%0.2x%0.2x%0.2x" % (mlst[0], mlst[1], mlst[2], mlst[3], mlst[4], mlst[5])
|
||||||
|
foundIt = True
|
||||||
|
break
|
||||||
|
if not foundIt:
|
||||||
|
macnum = ''
|
||||||
|
return macnum
|
||||||
|
|
||||||
|
|
||||||
|
# uses unix env to get username instead of using sysctlbyname
|
||||||
|
def GetUserName():
|
||||||
|
username = os.getenv('USER')
|
||||||
|
return username
|
||||||
|
|
||||||
|
def isNewInstall():
|
||||||
|
home = os.getenv('HOME')
|
||||||
|
# soccer game fan anyone
|
||||||
|
dpath = home + '/Library/Application Support/Kindle/storage/.pes2011'
|
||||||
|
# print dpath, os.path.exists(dpath)
|
||||||
|
if os.path.exists(dpath):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def GetIDString():
|
||||||
|
# K4Mac now has an extensive set of ids strings it uses
|
||||||
|
# in encoding pids and in creating unique passwords
|
||||||
|
# for use in its own version of CryptUnprotectDataV2
|
||||||
|
|
||||||
|
# BUT Amazon has now become nasty enough to detect when its app
|
||||||
|
# is being run under a debugger and actually changes code paths
|
||||||
|
# including which one of these strings is chosen, all to try
|
||||||
|
# to prevent reverse engineering
|
||||||
|
|
||||||
|
# Sad really ... they will only hurt their own sales ...
|
||||||
|
# true book lovers really want to keep their books forever
|
||||||
|
# and move them to their devices and DRM prevents that so they
|
||||||
|
# will just buy from someplace else that they can remove
|
||||||
|
# the DRM from
|
||||||
|
|
||||||
|
# Amazon should know by now that true book lover's are not like
|
||||||
|
# penniless kids that pirate music, we do not pirate books
|
||||||
|
|
||||||
|
if isNewInstall():
|
||||||
|
mungedmac = GetMACAddressMunged()
|
||||||
|
if len(mungedmac) > 7:
|
||||||
|
return mungedmac
|
||||||
|
sernum = GetVolumeSerialNumber()
|
||||||
|
if len(sernum) > 7:
|
||||||
|
return sernum
|
||||||
|
diskpart = GetUserHomeAppSupKindleDirParitionName()
|
||||||
|
uuidnum = GetDiskPartitionUUID(diskpart)
|
||||||
|
if len(uuidnum) > 7:
|
||||||
|
return uuidnum
|
||||||
|
mungedmac = GetMACAddressMunged()
|
||||||
|
if len(mungedmac) > 7:
|
||||||
|
return mungedmac
|
||||||
|
return '9999999999'
|
||||||
|
|
||||||
|
|
||||||
|
# implements an Pseudo Mac Version of Windows built-in Crypto routine
|
||||||
|
# used by Kindle for Mac versions < 1.6.0
|
||||||
|
class CryptUnprotectData(object):
|
||||||
|
def __init__(self):
|
||||||
|
sernum = GetVolumeSerialNumber()
|
||||||
|
if sernum == '':
|
||||||
|
sernum = '9999999999'
|
||||||
|
sp = sernum + '!@#' + GetUserName()
|
||||||
|
passwdData = encode(SHA256(sp),charMap1)
|
||||||
|
salt = '16743'
|
||||||
|
self.crp = LibCrypto()
|
||||||
|
iter = 0x3e8
|
||||||
|
keylen = 0x80
|
||||||
|
key_iv = self.crp.keyivgen(passwdData, salt, iter, keylen)
|
||||||
|
self.key = key_iv[0:32]
|
||||||
|
self.iv = key_iv[32:48]
|
||||||
|
self.crp.set_decrypt_key(self.key, self.iv)
|
||||||
|
|
||||||
|
def decrypt(self, encryptedData):
|
||||||
|
cleartext = self.crp.decrypt(encryptedData)
|
||||||
|
cleartext = decode(cleartext,charMap1)
|
||||||
|
return cleartext
|
||||||
|
|
||||||
|
|
||||||
|
# implements an Pseudo Mac Version of Windows built-in Crypto routine
|
||||||
|
# used for Kindle for Mac Versions >= 1.6.0
|
||||||
|
class CryptUnprotectDataV2(object):
|
||||||
|
def __init__(self):
|
||||||
|
sp = GetUserName() + ':&%:' + GetIDString()
|
||||||
|
passwdData = encode(SHA256(sp),charMap5)
|
||||||
|
# salt generation as per the code
|
||||||
|
salt = 0x0512981d * 2 * 1 * 1
|
||||||
|
salt = str(salt) + GetUserName()
|
||||||
|
salt = encode(salt,charMap5)
|
||||||
|
self.crp = LibCrypto()
|
||||||
|
iter = 0x800
|
||||||
|
keylen = 0x400
|
||||||
|
key_iv = self.crp.keyivgen(passwdData, salt, iter, keylen)
|
||||||
|
self.key = key_iv[0:32]
|
||||||
|
self.iv = key_iv[32:48]
|
||||||
|
self.crp.set_decrypt_key(self.key, self.iv)
|
||||||
|
|
||||||
|
def decrypt(self, encryptedData):
|
||||||
|
cleartext = self.crp.decrypt(encryptedData)
|
||||||
|
cleartext = decode(cleartext, charMap5)
|
||||||
|
return cleartext
|
||||||
|
|
||||||
|
|
||||||
|
# unprotect the new header blob in .kinf2011
|
||||||
|
# used in Kindle for Mac Version >= 1.9.0
|
||||||
|
def UnprotectHeaderData(encryptedData):
|
||||||
|
passwdData = 'header_key_data'
|
||||||
|
salt = 'HEADER.2011'
|
||||||
|
iter = 0x80
|
||||||
|
keylen = 0x100
|
||||||
|
crp = LibCrypto()
|
||||||
|
key_iv = crp.keyivgen(passwdData, salt, iter, keylen)
|
||||||
|
key = key_iv[0:32]
|
||||||
|
iv = key_iv[32:48]
|
||||||
|
crp.set_decrypt_key(key,iv)
|
||||||
|
cleartext = crp.decrypt(encryptedData)
|
||||||
|
return cleartext
|
||||||
|
|
||||||
|
|
||||||
|
# implements an Pseudo Mac Version of Windows built-in Crypto routine
|
||||||
|
# used for Kindle for Mac Versions >= 1.9.0
|
||||||
|
class CryptUnprotectDataV3(object):
|
||||||
|
def __init__(self, entropy):
|
||||||
|
sp = GetUserName() + '+@#$%+' + GetIDString()
|
||||||
|
passwdData = encode(SHA256(sp),charMap2)
|
||||||
|
salt = entropy
|
||||||
|
self.crp = LibCrypto()
|
||||||
|
iter = 0x800
|
||||||
|
keylen = 0x400
|
||||||
|
key_iv = self.crp.keyivgen(passwdData, salt, iter, keylen)
|
||||||
|
self.key = key_iv[0:32]
|
||||||
|
self.iv = key_iv[32:48]
|
||||||
|
self.crp.set_decrypt_key(self.key, self.iv)
|
||||||
|
|
||||||
|
def decrypt(self, encryptedData):
|
||||||
|
cleartext = self.crp.decrypt(encryptedData)
|
||||||
|
cleartext = decode(cleartext, charMap2)
|
||||||
|
return cleartext
|
||||||
|
|
||||||
|
|
||||||
|
# Locate the .kindle-info files
|
||||||
|
def getKindleInfoFiles(kInfoFiles):
|
||||||
|
# first search for current .kindle-info files
|
||||||
|
home = os.getenv('HOME')
|
||||||
|
cmdline = 'find "' + home + '/Library/Application Support" -name ".kindle-info"'
|
||||||
|
cmdline = cmdline.encode(sys.getfilesystemencoding())
|
||||||
|
p1 = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
|
||||||
|
out1, out2 = p1.communicate()
|
||||||
|
reslst = out1.split('\n')
|
||||||
|
kinfopath = 'NONE'
|
||||||
|
found = False
|
||||||
|
for resline in reslst:
|
||||||
|
if os.path.isfile(resline):
|
||||||
|
kInfoFiles.append(resline)
|
||||||
|
found = True
|
||||||
|
# add any .rainier*-kinf files
|
||||||
|
cmdline = 'find "' + home + '/Library/Application Support" -name ".rainier*-kinf"'
|
||||||
|
cmdline = cmdline.encode(sys.getfilesystemencoding())
|
||||||
|
p1 = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
|
||||||
|
out1, out2 = p1.communicate()
|
||||||
|
reslst = out1.split('\n')
|
||||||
|
for resline in reslst:
|
||||||
|
if os.path.isfile(resline):
|
||||||
|
kInfoFiles.append(resline)
|
||||||
|
found = True
|
||||||
|
# add any .kinf2011 files
|
||||||
|
cmdline = 'find "' + home + '/Library/Application Support" -name ".kinf2011"'
|
||||||
|
cmdline = cmdline.encode(sys.getfilesystemencoding())
|
||||||
|
p1 = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
|
||||||
|
out1, out2 = p1.communicate()
|
||||||
|
reslst = out1.split('\n')
|
||||||
|
for resline in reslst:
|
||||||
|
if os.path.isfile(resline):
|
||||||
|
kInfoFiles.append(resline)
|
||||||
|
found = True
|
||||||
|
if not found:
|
||||||
|
print('No kindle-info files have been found.')
|
||||||
|
return kInfoFiles
|
||||||
|
|
||||||
|
# determine type of kindle info provided and return a
|
||||||
|
# database of keynames and values
|
||||||
|
def getDBfromFile(kInfoFile):
|
||||||
|
names = ["kindle.account.tokens","kindle.cookie.item","eulaVersionAccepted","login_date","kindle.token.item","login","kindle.key.item","kindle.name.info","kindle.device.info", "MazamaRandomNumber", "max_date", "SIGVERIF"]
|
||||||
|
DB = {}
|
||||||
|
cnt = 0
|
||||||
|
infoReader = open(kInfoFile, 'r')
|
||||||
|
hdr = infoReader.read(1)
|
||||||
|
data = infoReader.read()
|
||||||
|
|
||||||
|
if data.find('[') != -1 :
|
||||||
|
|
||||||
|
# older style kindle-info file
|
||||||
|
cud = CryptUnprotectData()
|
||||||
|
items = data.split('[')
|
||||||
|
for item in items:
|
||||||
|
if item != '':
|
||||||
|
keyhash, rawdata = item.split(':')
|
||||||
|
keyname = "unknown"
|
||||||
|
for name in names:
|
||||||
|
if encodeHash(name,charMap2) == keyhash:
|
||||||
|
keyname = name
|
||||||
|
break
|
||||||
|
if keyname == "unknown":
|
||||||
|
keyname = keyhash
|
||||||
|
encryptedValue = decode(rawdata,charMap2)
|
||||||
|
cleartext = cud.decrypt(encryptedValue)
|
||||||
|
DB[keyname] = cleartext
|
||||||
|
cnt = cnt + 1
|
||||||
|
if cnt == 0:
|
||||||
|
DB = None
|
||||||
|
return DB
|
||||||
|
|
||||||
|
if hdr == '/':
|
||||||
|
|
||||||
|
# else newer style .kinf file used by K4Mac >= 1.6.0
|
||||||
|
# the .kinf file uses "/" to separate it into records
|
||||||
|
# so remove the trailing "/" to make it easy to use split
|
||||||
|
data = data[:-1]
|
||||||
|
items = data.split('/')
|
||||||
|
cud = CryptUnprotectDataV2()
|
||||||
|
|
||||||
|
# loop through the item records until all are processed
|
||||||
|
while len(items) > 0:
|
||||||
|
|
||||||
|
# get the first item record
|
||||||
|
item = items.pop(0)
|
||||||
|
|
||||||
|
# the first 32 chars of the first record of a group
|
||||||
|
# is the MD5 hash of the key name encoded by charMap5
|
||||||
|
keyhash = item[0:32]
|
||||||
|
keyname = "unknown"
|
||||||
|
|
||||||
|
# the raw keyhash string is also used to create entropy for the actual
|
||||||
|
# CryptProtectData Blob that represents that keys contents
|
||||||
|
# "entropy" not used for K4Mac only K4PC
|
||||||
|
# entropy = SHA1(keyhash)
|
||||||
|
|
||||||
|
# the remainder of the first record when decoded with charMap5
|
||||||
|
# has the ':' split char followed by the string representation
|
||||||
|
# of the number of records that follow
|
||||||
|
# and make up the contents
|
||||||
|
srcnt = decode(item[34:],charMap5)
|
||||||
|
rcnt = int(srcnt)
|
||||||
|
|
||||||
|
# read and store in rcnt records of data
|
||||||
|
# that make up the contents value
|
||||||
|
edlst = []
|
||||||
|
for i in xrange(rcnt):
|
||||||
|
item = items.pop(0)
|
||||||
|
edlst.append(item)
|
||||||
|
|
||||||
|
keyname = "unknown"
|
||||||
|
for name in names:
|
||||||
|
if encodeHash(name,charMap5) == keyhash:
|
||||||
|
keyname = name
|
||||||
|
break
|
||||||
|
if keyname == "unknown":
|
||||||
|
keyname = keyhash
|
||||||
|
|
||||||
|
# the charMap5 encoded contents data has had a length
|
||||||
|
# of chars (always odd) cut off of the front and moved
|
||||||
|
# to the end to prevent decoding using charMap5 from
|
||||||
|
# working properly, and thereby preventing the ensuing
|
||||||
|
# CryptUnprotectData call from succeeding.
|
||||||
|
|
||||||
|
# The offset into the charMap5 encoded contents seems to be:
|
||||||
|
# len(contents) - largest prime number less than or equal to int(len(content)/3)
|
||||||
|
# (in other words split "about" 2/3rds of the way through)
|
||||||
|
|
||||||
|
# move first offsets chars to end to align for decode by charMap5
|
||||||
|
encdata = "".join(edlst)
|
||||||
|
contlen = len(encdata)
|
||||||
|
|
||||||
|
# now properly split and recombine
|
||||||
|
# by moving noffset chars from the start of the
|
||||||
|
# string to the end of the string
|
||||||
|
noffset = contlen - primes(int(contlen/3))[-1]
|
||||||
|
pfx = encdata[0:noffset]
|
||||||
|
encdata = encdata[noffset:]
|
||||||
|
encdata = encdata + pfx
|
||||||
|
|
||||||
|
# decode using charMap5 to get the CryptProtect Data
|
||||||
|
encryptedValue = decode(encdata,charMap5)
|
||||||
|
cleartext = cud.decrypt(encryptedValue)
|
||||||
|
DB[keyname] = cleartext
|
||||||
|
cnt = cnt + 1
|
||||||
|
|
||||||
|
if cnt == 0:
|
||||||
|
DB = None
|
||||||
|
return DB
|
||||||
|
|
||||||
|
# the latest .kinf2011 version for K4M 1.9.1
|
||||||
|
# put back the hdr char, it is needed
|
||||||
|
data = hdr + data
|
||||||
|
data = data[:-1]
|
||||||
|
items = data.split('/')
|
||||||
|
|
||||||
|
# the headerblob is the encrypted information needed to build the entropy string
|
||||||
|
headerblob = items.pop(0)
|
||||||
|
encryptedValue = decode(headerblob, charMap1)
|
||||||
|
cleartext = UnprotectHeaderData(encryptedValue)
|
||||||
|
|
||||||
|
# now extract the pieces in the same way
|
||||||
|
# this version is different from K4PC it scales the build number by multipying by 735
|
||||||
|
pattern = re.compile(r'''\[Version:(\d+)\]\[Build:(\d+)\]\[Cksum:([^\]]+)\]\[Guid:([\{\}a-z0-9\-]+)\]''', re.IGNORECASE)
|
||||||
|
for m in re.finditer(pattern, cleartext):
|
||||||
|
entropy = str(int(m.group(2)) * 0x2df) + m.group(4)
|
||||||
|
|
||||||
|
cud = CryptUnprotectDataV3(entropy)
|
||||||
|
|
||||||
|
# loop through the item records until all are processed
|
||||||
|
while len(items) > 0:
|
||||||
|
|
||||||
|
# get the first item record
|
||||||
|
item = items.pop(0)
|
||||||
|
|
||||||
|
# the first 32 chars of the first record of a group
|
||||||
|
# is the MD5 hash of the key name encoded by charMap5
|
||||||
|
keyhash = item[0:32]
|
||||||
|
keyname = "unknown"
|
||||||
|
|
||||||
|
# unlike K4PC the keyhash is not used in generating entropy
|
||||||
|
# entropy = SHA1(keyhash) + added_entropy
|
||||||
|
# entropy = added_entropy
|
||||||
|
|
||||||
|
# the remainder of the first record when decoded with charMap5
|
||||||
|
# has the ':' split char followed by the string representation
|
||||||
|
# of the number of records that follow
|
||||||
|
# and make up the contents
|
||||||
|
srcnt = decode(item[34:],charMap5)
|
||||||
|
rcnt = int(srcnt)
|
||||||
|
|
||||||
|
# read and store in rcnt records of data
|
||||||
|
# that make up the contents value
|
||||||
|
edlst = []
|
||||||
|
for i in xrange(rcnt):
|
||||||
|
item = items.pop(0)
|
||||||
|
edlst.append(item)
|
||||||
|
|
||||||
|
keyname = "unknown"
|
||||||
|
for name in names:
|
||||||
|
if encodeHash(name,testMap8) == keyhash:
|
||||||
|
keyname = name
|
||||||
|
break
|
||||||
|
if keyname == "unknown":
|
||||||
|
keyname = keyhash
|
||||||
|
|
||||||
|
# the testMap8 encoded contents data has had a length
|
||||||
|
# of chars (always odd) cut off of the front and moved
|
||||||
|
# to the end to prevent decoding using testMap8 from
|
||||||
|
# working properly, and thereby preventing the ensuing
|
||||||
|
# CryptUnprotectData call from succeeding.
|
||||||
|
|
||||||
|
# The offset into the testMap8 encoded contents seems to be:
|
||||||
|
# len(contents) - largest prime number less than or equal to int(len(content)/3)
|
||||||
|
# (in other words split "about" 2/3rds of the way through)
|
||||||
|
|
||||||
|
# move first offsets chars to end to align for decode by testMap8
|
||||||
|
encdata = "".join(edlst)
|
||||||
|
contlen = len(encdata)
|
||||||
|
|
||||||
|
# now properly split and recombine
|
||||||
|
# by moving noffset chars from the start of the
|
||||||
|
# string to the end of the string
|
||||||
|
noffset = contlen - primes(int(contlen/3))[-1]
|
||||||
|
pfx = encdata[0:noffset]
|
||||||
|
encdata = encdata[noffset:]
|
||||||
|
encdata = encdata + pfx
|
||||||
|
|
||||||
|
# decode using testMap8 to get the CryptProtect Data
|
||||||
|
encryptedValue = decode(encdata,testMap8)
|
||||||
|
cleartext = cud.decrypt(encryptedValue)
|
||||||
|
# print keyname
|
||||||
|
# print cleartext
|
||||||
|
DB[keyname] = cleartext
|
||||||
|
cnt = cnt + 1
|
||||||
|
|
||||||
|
if cnt == 0:
|
||||||
|
DB = None
|
||||||
|
return DB
|
||||||
@@ -1,50 +1,134 @@
|
|||||||
#! /usr/bin/python
|
#! /usr/bin/python
|
||||||
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
|
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
|
||||||
# For use with Topaz Scripts Version 2.6
|
|
||||||
|
class Unbuffered:
|
||||||
|
def __init__(self, stream):
|
||||||
|
self.stream = stream
|
||||||
|
def write(self, data):
|
||||||
|
self.stream.write(data)
|
||||||
|
self.stream.flush()
|
||||||
|
def __getattr__(self, attr):
|
||||||
|
return getattr(self.stream, attr)
|
||||||
|
|
||||||
|
import sys
|
||||||
|
sys.stdout=Unbuffered(sys.stdout)
|
||||||
|
|
||||||
import csv
|
import csv
|
||||||
import sys
|
|
||||||
import os
|
import os
|
||||||
import getopt
|
import getopt
|
||||||
from struct import pack
|
from struct import pack
|
||||||
from struct import unpack
|
from struct import unpack
|
||||||
|
|
||||||
|
class TpzDRMError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
class DocParser(object):
|
# local support routines
|
||||||
def __init__(self, flatxml, fontsize, ph, pw):
|
if 'calibre' in sys.modules:
|
||||||
|
inCalibre = True
|
||||||
|
else:
|
||||||
|
inCalibre = False
|
||||||
|
|
||||||
|
if inCalibre :
|
||||||
|
from calibre_plugins.k4mobidedrm import convert2xml
|
||||||
|
from calibre_plugins.k4mobidedrm import flatxml2html
|
||||||
|
from calibre_plugins.k4mobidedrm import flatxml2svg
|
||||||
|
from calibre_plugins.k4mobidedrm import stylexml2css
|
||||||
|
else :
|
||||||
|
import convert2xml
|
||||||
|
import flatxml2html
|
||||||
|
import flatxml2svg
|
||||||
|
import stylexml2css
|
||||||
|
|
||||||
|
# global switch
|
||||||
|
buildXML = False
|
||||||
|
|
||||||
|
# Get a 7 bit encoded number from a file
|
||||||
|
def readEncodedNumber(file):
|
||||||
|
flag = False
|
||||||
|
c = file.read(1)
|
||||||
|
if (len(c) == 0):
|
||||||
|
return None
|
||||||
|
data = ord(c)
|
||||||
|
if data == 0xFF:
|
||||||
|
flag = True
|
||||||
|
c = file.read(1)
|
||||||
|
if (len(c) == 0):
|
||||||
|
return None
|
||||||
|
data = ord(c)
|
||||||
|
if data >= 0x80:
|
||||||
|
datax = (data & 0x7F)
|
||||||
|
while data >= 0x80 :
|
||||||
|
c = file.read(1)
|
||||||
|
if (len(c) == 0):
|
||||||
|
return None
|
||||||
|
data = ord(c)
|
||||||
|
datax = (datax <<7) + (data & 0x7F)
|
||||||
|
data = datax
|
||||||
|
if flag:
|
||||||
|
data = -data
|
||||||
|
return data
|
||||||
|
|
||||||
|
# Get a length prefixed string from the file
|
||||||
|
def lengthPrefixString(data):
|
||||||
|
return encodeNumber(len(data))+data
|
||||||
|
|
||||||
|
def readString(file):
|
||||||
|
stringLength = readEncodedNumber(file)
|
||||||
|
if (stringLength == None):
|
||||||
|
return None
|
||||||
|
sv = file.read(stringLength)
|
||||||
|
if (len(sv) != stringLength):
|
||||||
|
return ""
|
||||||
|
return unpack(str(stringLength)+"s",sv)[0]
|
||||||
|
|
||||||
|
def getMetaArray(metaFile):
|
||||||
|
# parse the meta file
|
||||||
|
result = {}
|
||||||
|
fo = file(metaFile,'rb')
|
||||||
|
size = readEncodedNumber(fo)
|
||||||
|
for i in xrange(size):
|
||||||
|
tag = readString(fo)
|
||||||
|
value = readString(fo)
|
||||||
|
result[tag] = value
|
||||||
|
# print tag, value
|
||||||
|
fo.close()
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
# dictionary of all text strings by index value
|
||||||
|
class Dictionary(object):
|
||||||
|
def __init__(self, dictFile):
|
||||||
|
self.filename = dictFile
|
||||||
|
self.size = 0
|
||||||
|
self.fo = file(dictFile,'rb')
|
||||||
|
self.stable = []
|
||||||
|
self.size = readEncodedNumber(self.fo)
|
||||||
|
for i in xrange(self.size):
|
||||||
|
self.stable.append(self.escapestr(readString(self.fo)))
|
||||||
|
self.pos = 0
|
||||||
|
def escapestr(self, str):
|
||||||
|
str = str.replace('&','&')
|
||||||
|
str = str.replace('<','<')
|
||||||
|
str = str.replace('>','>')
|
||||||
|
str = str.replace('=','=')
|
||||||
|
return str
|
||||||
|
def lookup(self,val):
|
||||||
|
if ((val >= 0) and (val < self.size)) :
|
||||||
|
self.pos = val
|
||||||
|
return self.stable[self.pos]
|
||||||
|
else:
|
||||||
|
print "Error - %d outside of string table limits" % val
|
||||||
|
raise TpzDRMError('outside or string table limits')
|
||||||
|
# sys.exit(-1)
|
||||||
|
def getSize(self):
|
||||||
|
return self.size
|
||||||
|
def getPos(self):
|
||||||
|
return self.pos
|
||||||
|
|
||||||
|
|
||||||
|
class PageDimParser(object):
|
||||||
|
def __init__(self, flatxml):
|
||||||
self.flatdoc = flatxml.split('\n')
|
self.flatdoc = flatxml.split('\n')
|
||||||
self.fontsize = int(fontsize)
|
|
||||||
self.ph = int(ph) * 1.0
|
|
||||||
self.pw = int(pw) * 1.0
|
|
||||||
|
|
||||||
stags = {
|
|
||||||
'paragraph' : 'p',
|
|
||||||
'graphic' : '.graphic'
|
|
||||||
}
|
|
||||||
|
|
||||||
attr_val_map = {
|
|
||||||
'hang' : 'text-indent: ',
|
|
||||||
'indent' : 'text-indent: ',
|
|
||||||
'line-space' : 'line-height: ',
|
|
||||||
'margin-bottom' : 'margin-bottom: ',
|
|
||||||
'margin-left' : 'margin-left: ',
|
|
||||||
'margin-right' : 'margin-right: ',
|
|
||||||
'margin-top' : 'margin-top: ',
|
|
||||||
'space-after' : 'padding-bottom: ',
|
|
||||||
}
|
|
||||||
|
|
||||||
attr_str_map = {
|
|
||||||
'align-center' : 'text-align: center; margin-left: auto; margin-right: auto;',
|
|
||||||
'align-left' : 'text-align: left;',
|
|
||||||
'align-right' : 'text-align: right;',
|
|
||||||
'align-justify' : 'text-align: justify;',
|
|
||||||
'display-inline' : 'display: inline;',
|
|
||||||
'pos-left' : 'text-align: left;',
|
|
||||||
'pos-right' : 'text-align: right;',
|
|
||||||
'pos-center' : 'text-align: center; margin-left: auto; margin-right: auto;',
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
# find tag if within pos to end inclusive
|
# find tag if within pos to end inclusive
|
||||||
def findinDoc(self, tagpath, pos, end) :
|
def findinDoc(self, tagpath, pos, end) :
|
||||||
result = None
|
result = None
|
||||||
@@ -58,7 +142,7 @@ class DocParser(object):
|
|||||||
for j in xrange(pos, end):
|
for j in xrange(pos, end):
|
||||||
item = docList[j]
|
item = docList[j]
|
||||||
if item.find('=') >= 0:
|
if item.find('=') >= 0:
|
||||||
(name, argres) = item.split('=',1)
|
(name, argres) = item.split('=')
|
||||||
else :
|
else :
|
||||||
name = item
|
name = item
|
||||||
argres = ''
|
argres = ''
|
||||||
@@ -67,177 +151,559 @@ class DocParser(object):
|
|||||||
foundat = j
|
foundat = j
|
||||||
break
|
break
|
||||||
return foundat, result
|
return foundat, result
|
||||||
|
|
||||||
|
|
||||||
# return list of start positions for the tagpath
|
|
||||||
def posinDoc(self, tagpath):
|
|
||||||
startpos = []
|
|
||||||
pos = 0
|
|
||||||
res = ""
|
|
||||||
while res != None :
|
|
||||||
(foundpos, res) = self.findinDoc(tagpath, pos, -1)
|
|
||||||
if res != None :
|
|
||||||
startpos.append(foundpos)
|
|
||||||
pos = foundpos + 1
|
|
||||||
return startpos
|
|
||||||
|
|
||||||
|
|
||||||
def process(self):
|
def process(self):
|
||||||
|
(pos, sph) = self.findinDoc('page.h',0,-1)
|
||||||
|
(pos, spw) = self.findinDoc('page.w',0,-1)
|
||||||
|
if (sph == None): sph = '-1'
|
||||||
|
if (spw == None): spw = '-1'
|
||||||
|
return sph, spw
|
||||||
|
|
||||||
classlst = ''
|
def getPageDim(flatxml):
|
||||||
csspage = '.cl-center { text-align: center; margin-left: auto; margin-right: auto; }\n'
|
|
||||||
csspage += '.cl-right { text-align: right; }\n'
|
|
||||||
csspage += '.cl-left { text-align: left; }\n'
|
|
||||||
csspage += '.cl-justify { text-align: justify; }\n'
|
|
||||||
|
|
||||||
# generate a list of each <style> starting point in the stylesheet
|
|
||||||
styleList= self.posinDoc('book.stylesheet.style')
|
|
||||||
stylecnt = len(styleList)
|
|
||||||
styleList.append(-1)
|
|
||||||
|
|
||||||
# process each style converting what you can
|
|
||||||
|
|
||||||
for j in xrange(stylecnt):
|
|
||||||
start = styleList[j]
|
|
||||||
end = styleList[j+1]
|
|
||||||
|
|
||||||
(pos, tag) = self.findinDoc('style._tag',start,end)
|
|
||||||
if tag == None :
|
|
||||||
(pos, tag) = self.findinDoc('style.type',start,end)
|
|
||||||
|
|
||||||
# Is this something we know how to convert to css
|
|
||||||
if tag in self.stags :
|
|
||||||
|
|
||||||
# get the style class
|
|
||||||
(pos, sclass) = self.findinDoc('style.class',start,end)
|
|
||||||
if sclass != None:
|
|
||||||
sclass = sclass.replace(' ','-')
|
|
||||||
sclass = '.cl-' + sclass.lower()
|
|
||||||
else :
|
|
||||||
sclass = ''
|
|
||||||
|
|
||||||
# check for any "after class" specifiers
|
|
||||||
(pos, aftclass) = self.findinDoc('style._after_class',start,end)
|
|
||||||
if aftclass != None:
|
|
||||||
aftclass = aftclass.replace(' ','-')
|
|
||||||
aftclass = '.cl-' + aftclass.lower()
|
|
||||||
else :
|
|
||||||
aftclass = ''
|
|
||||||
|
|
||||||
cssargs = {}
|
|
||||||
|
|
||||||
while True :
|
|
||||||
|
|
||||||
(pos1, attr) = self.findinDoc('style.rule.attr', start, end)
|
|
||||||
(pos2, val) = self.findinDoc('style.rule.value', start, end)
|
|
||||||
|
|
||||||
if attr == None : break
|
|
||||||
|
|
||||||
if (attr == 'display') or (attr == 'pos') or (attr == 'align'):
|
|
||||||
# handle text based attributess
|
|
||||||
attr = attr + '-' + val
|
|
||||||
if attr in self.attr_str_map :
|
|
||||||
cssargs[attr] = (self.attr_str_map[attr], '')
|
|
||||||
else :
|
|
||||||
# handle value based attributes
|
|
||||||
if attr in self.attr_val_map :
|
|
||||||
name = self.attr_val_map[attr]
|
|
||||||
if attr in ('margin-bottom', 'margin-top', 'space-after') :
|
|
||||||
scale = self.ph
|
|
||||||
elif attr in ('margin-right', 'indent', 'margin-left', 'hang') :
|
|
||||||
scale = self.pw
|
|
||||||
elif attr == 'line-space':
|
|
||||||
scale = self.fontsize * 2.0
|
|
||||||
|
|
||||||
if not ((attr == 'hang') and (int(val) == 0)) :
|
|
||||||
pv = float(val)/scale
|
|
||||||
cssargs[attr] = (self.attr_val_map[attr], pv)
|
|
||||||
keep = True
|
|
||||||
|
|
||||||
start = max(pos1, pos2) + 1
|
|
||||||
|
|
||||||
# disable all of the after class tags until I figure out how to handle them
|
|
||||||
if aftclass != "" : keep = False
|
|
||||||
|
|
||||||
if keep :
|
|
||||||
# make sure line-space does not go below 100% or above 300% since
|
|
||||||
# it can be wacky in some styles
|
|
||||||
if 'line-space' in cssargs:
|
|
||||||
seg = cssargs['line-space'][0]
|
|
||||||
val = cssargs['line-space'][1]
|
|
||||||
if val < 1.0: val = 1.0
|
|
||||||
if val > 3.0: val = 3.0
|
|
||||||
del cssargs['line-space']
|
|
||||||
cssargs['line-space'] = (self.attr_val_map['line-space'], val)
|
|
||||||
|
|
||||||
|
|
||||||
# handle modifications for css style hanging indents
|
|
||||||
if 'hang' in cssargs:
|
|
||||||
hseg = cssargs['hang'][0]
|
|
||||||
hval = cssargs['hang'][1]
|
|
||||||
del cssargs['hang']
|
|
||||||
cssargs['hang'] = (self.attr_val_map['hang'], -hval)
|
|
||||||
mval = 0
|
|
||||||
mseg = 'margin-left: '
|
|
||||||
mval = hval
|
|
||||||
if 'margin-left' in cssargs:
|
|
||||||
mseg = cssargs['margin-left'][0]
|
|
||||||
mval = cssargs['margin-left'][1]
|
|
||||||
if mval < 0: mval = 0
|
|
||||||
mval = hval + mval
|
|
||||||
cssargs['margin-left'] = (mseg, mval)
|
|
||||||
if 'indent' in cssargs:
|
|
||||||
del cssargs['indent']
|
|
||||||
|
|
||||||
cssline = sclass + ' { '
|
|
||||||
for key in iter(cssargs):
|
|
||||||
mseg = cssargs[key][0]
|
|
||||||
mval = cssargs[key][1]
|
|
||||||
if mval == '':
|
|
||||||
cssline += mseg + ' '
|
|
||||||
else :
|
|
||||||
aseg = mseg + '%.1f%%;' % (mval * 100.0)
|
|
||||||
cssline += aseg + ' '
|
|
||||||
|
|
||||||
cssline += '}'
|
|
||||||
|
|
||||||
if sclass != '' :
|
|
||||||
classlst += sclass + '\n'
|
|
||||||
|
|
||||||
# handle special case of paragraph class used inside chapter heading
|
|
||||||
# and non-chapter headings
|
|
||||||
if sclass != '' :
|
|
||||||
ctype = sclass[4:7]
|
|
||||||
if ctype == 'ch1' :
|
|
||||||
csspage += 'h1' + cssline + '\n'
|
|
||||||
if ctype == 'ch2' :
|
|
||||||
csspage += 'h2' + cssline + '\n'
|
|
||||||
if ctype == 'ch3' :
|
|
||||||
csspage += 'h3' + cssline + '\n'
|
|
||||||
if ctype == 'h1-' :
|
|
||||||
csspage += 'h4' + cssline + '\n'
|
|
||||||
if ctype == 'h2-' :
|
|
||||||
csspage += 'h5' + cssline + '\n'
|
|
||||||
if ctype == 'h3_' :
|
|
||||||
csspage += 'h6' + cssline + '\n'
|
|
||||||
|
|
||||||
if cssline != ' { }':
|
|
||||||
csspage += self.stags[tag] + cssline + '\n'
|
|
||||||
|
|
||||||
|
|
||||||
return csspage, classlst
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def convert2CSS(flatxml, fontsize, ph, pw):
|
|
||||||
|
|
||||||
print ' ', 'Using font size:',fontsize
|
|
||||||
print ' ', 'Using page height:', ph
|
|
||||||
print ' ', 'Using page width:', pw
|
|
||||||
|
|
||||||
# create a document parser
|
# create a document parser
|
||||||
dp = DocParser(flatxml, fontsize, ph, pw)
|
dp = PageDimParser(flatxml)
|
||||||
|
(ph, pw) = dp.process()
|
||||||
|
return ph, pw
|
||||||
|
|
||||||
csspage = dp.process()
|
class GParser(object):
|
||||||
|
def __init__(self, flatxml):
|
||||||
|
self.flatdoc = flatxml.split('\n')
|
||||||
|
self.dpi = 1440
|
||||||
|
self.gh = self.getData('info.glyph.h')
|
||||||
|
self.gw = self.getData('info.glyph.w')
|
||||||
|
self.guse = self.getData('info.glyph.use')
|
||||||
|
if self.guse :
|
||||||
|
self.count = len(self.guse)
|
||||||
|
else :
|
||||||
|
self.count = 0
|
||||||
|
self.gvtx = self.getData('info.glyph.vtx')
|
||||||
|
self.glen = self.getData('info.glyph.len')
|
||||||
|
self.gdpi = self.getData('info.glyph.dpi')
|
||||||
|
self.vx = self.getData('info.vtx.x')
|
||||||
|
self.vy = self.getData('info.vtx.y')
|
||||||
|
self.vlen = self.getData('info.len.n')
|
||||||
|
if self.vlen :
|
||||||
|
self.glen.append(len(self.vlen))
|
||||||
|
elif self.glen:
|
||||||
|
self.glen.append(0)
|
||||||
|
if self.vx :
|
||||||
|
self.gvtx.append(len(self.vx))
|
||||||
|
elif self.gvtx :
|
||||||
|
self.gvtx.append(0)
|
||||||
|
def getData(self, path):
|
||||||
|
result = None
|
||||||
|
cnt = len(self.flatdoc)
|
||||||
|
for j in xrange(cnt):
|
||||||
|
item = self.flatdoc[j]
|
||||||
|
if item.find('=') >= 0:
|
||||||
|
(name, argt) = item.split('=')
|
||||||
|
argres = argt.split('|')
|
||||||
|
else:
|
||||||
|
name = item
|
||||||
|
argres = []
|
||||||
|
if (name == path):
|
||||||
|
result = argres
|
||||||
|
break
|
||||||
|
if (len(argres) > 0) :
|
||||||
|
for j in xrange(0,len(argres)):
|
||||||
|
argres[j] = int(argres[j])
|
||||||
|
return result
|
||||||
|
def getGlyphDim(self, gly):
|
||||||
|
if self.gdpi[gly] == 0:
|
||||||
|
return 0, 0
|
||||||
|
maxh = (self.gh[gly] * self.dpi) / self.gdpi[gly]
|
||||||
|
maxw = (self.gw[gly] * self.dpi) / self.gdpi[gly]
|
||||||
|
return maxh, maxw
|
||||||
|
def getPath(self, gly):
|
||||||
|
path = ''
|
||||||
|
if (gly < 0) or (gly >= self.count):
|
||||||
|
return path
|
||||||
|
tx = self.vx[self.gvtx[gly]:self.gvtx[gly+1]]
|
||||||
|
ty = self.vy[self.gvtx[gly]:self.gvtx[gly+1]]
|
||||||
|
p = 0
|
||||||
|
for k in xrange(self.glen[gly], self.glen[gly+1]):
|
||||||
|
if (p == 0):
|
||||||
|
zx = tx[0:self.vlen[k]+1]
|
||||||
|
zy = ty[0:self.vlen[k]+1]
|
||||||
|
else:
|
||||||
|
zx = tx[self.vlen[k-1]+1:self.vlen[k]+1]
|
||||||
|
zy = ty[self.vlen[k-1]+1:self.vlen[k]+1]
|
||||||
|
p += 1
|
||||||
|
j = 0
|
||||||
|
while ( j < len(zx) ):
|
||||||
|
if (j == 0):
|
||||||
|
# Start Position.
|
||||||
|
path += 'M %d %d ' % (zx[j] * self.dpi / self.gdpi[gly], zy[j] * self.dpi / self.gdpi[gly])
|
||||||
|
elif (j <= len(zx)-3):
|
||||||
|
# Cubic Bezier Curve
|
||||||
|
path += 'C %d %d %d %d %d %d ' % (zx[j] * self.dpi / self.gdpi[gly], zy[j] * self.dpi / self.gdpi[gly], zx[j+1] * self.dpi / self.gdpi[gly], zy[j+1] * self.dpi / self.gdpi[gly], zx[j+2] * self.dpi / self.gdpi[gly], zy[j+2] * self.dpi / self.gdpi[gly])
|
||||||
|
j += 2
|
||||||
|
elif (j == len(zx)-2):
|
||||||
|
# Cubic Bezier Curve to Start Position
|
||||||
|
path += 'C %d %d %d %d %d %d ' % (zx[j] * self.dpi / self.gdpi[gly], zy[j] * self.dpi / self.gdpi[gly], zx[j+1] * self.dpi / self.gdpi[gly], zy[j+1] * self.dpi / self.gdpi[gly], zx[0] * self.dpi / self.gdpi[gly], zy[0] * self.dpi / self.gdpi[gly])
|
||||||
|
j += 1
|
||||||
|
elif (j == len(zx)-1):
|
||||||
|
# Quadratic Bezier Curve to Start Position
|
||||||
|
path += 'Q %d %d %d %d ' % (zx[j] * self.dpi / self.gdpi[gly], zy[j] * self.dpi / self.gdpi[gly], zx[0] * self.dpi / self.gdpi[gly], zy[0] * self.dpi / self.gdpi[gly])
|
||||||
|
|
||||||
return csspage
|
j += 1
|
||||||
|
path += 'z'
|
||||||
|
return path
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# dictionary of all text strings by index value
|
||||||
|
class GlyphDict(object):
|
||||||
|
def __init__(self):
|
||||||
|
self.gdict = {}
|
||||||
|
def lookup(self, id):
|
||||||
|
# id='id="gl%d"' % val
|
||||||
|
if id in self.gdict:
|
||||||
|
return self.gdict[id]
|
||||||
|
return None
|
||||||
|
def addGlyph(self, val, path):
|
||||||
|
id='id="gl%d"' % val
|
||||||
|
self.gdict[id] = path
|
||||||
|
|
||||||
|
|
||||||
|
def generateBook(bookDir, raw, fixedimage):
|
||||||
|
# sanity check Topaz file extraction
|
||||||
|
if not os.path.exists(bookDir) :
|
||||||
|
print "Can not find directory with unencrypted book"
|
||||||
|
return 1
|
||||||
|
|
||||||
|
dictFile = os.path.join(bookDir,'dict0000.dat')
|
||||||
|
if not os.path.exists(dictFile) :
|
||||||
|
print "Can not find dict0000.dat file"
|
||||||
|
return 1
|
||||||
|
|
||||||
|
pageDir = os.path.join(bookDir,'page')
|
||||||
|
if not os.path.exists(pageDir) :
|
||||||
|
print "Can not find page directory in unencrypted book"
|
||||||
|
return 1
|
||||||
|
|
||||||
|
imgDir = os.path.join(bookDir,'img')
|
||||||
|
if not os.path.exists(imgDir) :
|
||||||
|
print "Can not find image directory in unencrypted book"
|
||||||
|
return 1
|
||||||
|
|
||||||
|
glyphsDir = os.path.join(bookDir,'glyphs')
|
||||||
|
if not os.path.exists(glyphsDir) :
|
||||||
|
print "Can not find glyphs directory in unencrypted book"
|
||||||
|
return 1
|
||||||
|
|
||||||
|
metaFile = os.path.join(bookDir,'metadata0000.dat')
|
||||||
|
if not os.path.exists(metaFile) :
|
||||||
|
print "Can not find metadata0000.dat in unencrypted book"
|
||||||
|
return 1
|
||||||
|
|
||||||
|
svgDir = os.path.join(bookDir,'svg')
|
||||||
|
if not os.path.exists(svgDir) :
|
||||||
|
os.makedirs(svgDir)
|
||||||
|
|
||||||
|
if buildXML:
|
||||||
|
xmlDir = os.path.join(bookDir,'xml')
|
||||||
|
if not os.path.exists(xmlDir) :
|
||||||
|
os.makedirs(xmlDir)
|
||||||
|
|
||||||
|
otherFile = os.path.join(bookDir,'other0000.dat')
|
||||||
|
if not os.path.exists(otherFile) :
|
||||||
|
print "Can not find other0000.dat in unencrypted book"
|
||||||
|
return 1
|
||||||
|
|
||||||
|
print "Updating to color images if available"
|
||||||
|
spath = os.path.join(bookDir,'color_img')
|
||||||
|
dpath = os.path.join(bookDir,'img')
|
||||||
|
filenames = os.listdir(spath)
|
||||||
|
filenames = sorted(filenames)
|
||||||
|
for filename in filenames:
|
||||||
|
imgname = filename.replace('color','img')
|
||||||
|
sfile = os.path.join(spath,filename)
|
||||||
|
dfile = os.path.join(dpath,imgname)
|
||||||
|
imgdata = file(sfile,'rb').read()
|
||||||
|
file(dfile,'wb').write(imgdata)
|
||||||
|
|
||||||
|
print "Creating cover.jpg"
|
||||||
|
isCover = False
|
||||||
|
cpath = os.path.join(bookDir,'img')
|
||||||
|
cpath = os.path.join(cpath,'img0000.jpg')
|
||||||
|
if os.path.isfile(cpath):
|
||||||
|
cover = file(cpath, 'rb').read()
|
||||||
|
cpath = os.path.join(bookDir,'cover.jpg')
|
||||||
|
file(cpath, 'wb').write(cover)
|
||||||
|
isCover = True
|
||||||
|
|
||||||
|
|
||||||
|
print 'Processing Dictionary'
|
||||||
|
dict = Dictionary(dictFile)
|
||||||
|
|
||||||
|
print 'Processing Meta Data and creating OPF'
|
||||||
|
meta_array = getMetaArray(metaFile)
|
||||||
|
|
||||||
|
# replace special chars in title and authors like & < >
|
||||||
|
title = meta_array.get('Title','No Title Provided')
|
||||||
|
title = title.replace('&','&')
|
||||||
|
title = title.replace('<','<')
|
||||||
|
title = title.replace('>','>')
|
||||||
|
meta_array['Title'] = title
|
||||||
|
authors = meta_array.get('Authors','No Authors Provided')
|
||||||
|
authors = authors.replace('&','&')
|
||||||
|
authors = authors.replace('<','<')
|
||||||
|
authors = authors.replace('>','>')
|
||||||
|
meta_array['Authors'] = authors
|
||||||
|
|
||||||
|
if buildXML:
|
||||||
|
xname = os.path.join(xmlDir, 'metadata.xml')
|
||||||
|
mlst = []
|
||||||
|
for key in meta_array:
|
||||||
|
mlst.append('<meta name="' + key + '" content="' + meta_array[key] + '" />\n')
|
||||||
|
metastr = "".join(mlst)
|
||||||
|
mlst = None
|
||||||
|
file(xname, 'wb').write(metastr)
|
||||||
|
|
||||||
|
print 'Processing StyleSheet'
|
||||||
|
# get some scaling info from metadata to use while processing styles
|
||||||
|
fontsize = '135'
|
||||||
|
if 'fontSize' in meta_array:
|
||||||
|
fontsize = meta_array['fontSize']
|
||||||
|
|
||||||
|
# also get the size of a normal text page
|
||||||
|
spage = '1'
|
||||||
|
if 'firstTextPage' in meta_array:
|
||||||
|
spage = meta_array['firstTextPage']
|
||||||
|
pnum = int(spage)
|
||||||
|
|
||||||
|
# get page height and width from first text page for use in stylesheet scaling
|
||||||
|
pname = 'page%04d.dat' % (pnum + 1)
|
||||||
|
fname = os.path.join(pageDir,pname)
|
||||||
|
flat_xml = convert2xml.fromData(dict, fname)
|
||||||
|
|
||||||
|
(ph, pw) = getPageDim(flat_xml)
|
||||||
|
if (ph == '-1') or (ph == '0') : ph = '11000'
|
||||||
|
if (pw == '-1') or (pw == '0') : pw = '8500'
|
||||||
|
meta_array['pageHeight'] = ph
|
||||||
|
meta_array['pageWidth'] = pw
|
||||||
|
if 'fontSize' not in meta_array.keys():
|
||||||
|
meta_array['fontSize'] = fontsize
|
||||||
|
|
||||||
|
# process other.dat for css info and for map of page files to svg images
|
||||||
|
# this map is needed because some pages actually are made up of multiple
|
||||||
|
# pageXXXX.xml files
|
||||||
|
xname = os.path.join(bookDir, 'style.css')
|
||||||
|
flat_xml = convert2xml.fromData(dict, otherFile)
|
||||||
|
|
||||||
|
# extract info.original.pid to get original page information
|
||||||
|
pageIDMap = {}
|
||||||
|
pageidnums = stylexml2css.getpageIDMap(flat_xml)
|
||||||
|
if len(pageidnums) == 0:
|
||||||
|
filenames = os.listdir(pageDir)
|
||||||
|
numfiles = len(filenames)
|
||||||
|
for k in range(numfiles):
|
||||||
|
pageidnums.append(k)
|
||||||
|
# create a map from page ids to list of page file nums to process for that page
|
||||||
|
for i in range(len(pageidnums)):
|
||||||
|
id = pageidnums[i]
|
||||||
|
if id in pageIDMap.keys():
|
||||||
|
pageIDMap[id].append(i)
|
||||||
|
else:
|
||||||
|
pageIDMap[id] = [i]
|
||||||
|
|
||||||
|
# now get the css info
|
||||||
|
cssstr , classlst = stylexml2css.convert2CSS(flat_xml, fontsize, ph, pw)
|
||||||
|
file(xname, 'wb').write(cssstr)
|
||||||
|
if buildXML:
|
||||||
|
xname = os.path.join(xmlDir, 'other0000.xml')
|
||||||
|
file(xname, 'wb').write(convert2xml.getXML(dict, otherFile))
|
||||||
|
|
||||||
|
print 'Processing Glyphs'
|
||||||
|
gd = GlyphDict()
|
||||||
|
filenames = os.listdir(glyphsDir)
|
||||||
|
filenames = sorted(filenames)
|
||||||
|
glyfname = os.path.join(svgDir,'glyphs.svg')
|
||||||
|
glyfile = open(glyfname, 'w')
|
||||||
|
glyfile.write('<?xml version="1.0" standalone="no"?>\n')
|
||||||
|
glyfile.write('<!DOCTYPE svg PUBLIC "-//W3C/DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">\n')
|
||||||
|
glyfile.write('<svg width="512" height="512" viewBox="0 0 511 511" xmlns="http://www.w3.org/2000/svg" version="1.1">\n')
|
||||||
|
glyfile.write('<title>Glyphs for %s</title>\n' % meta_array['Title'])
|
||||||
|
glyfile.write('<defs>\n')
|
||||||
|
counter = 0
|
||||||
|
for filename in filenames:
|
||||||
|
# print ' ', filename
|
||||||
|
print '.',
|
||||||
|
fname = os.path.join(glyphsDir,filename)
|
||||||
|
flat_xml = convert2xml.fromData(dict, fname)
|
||||||
|
|
||||||
|
if buildXML:
|
||||||
|
xname = os.path.join(xmlDir, filename.replace('.dat','.xml'))
|
||||||
|
file(xname, 'wb').write(convert2xml.getXML(dict, fname))
|
||||||
|
|
||||||
|
gp = GParser(flat_xml)
|
||||||
|
for i in xrange(0, gp.count):
|
||||||
|
path = gp.getPath(i)
|
||||||
|
maxh, maxw = gp.getGlyphDim(i)
|
||||||
|
fullpath = '<path id="gl%d" d="%s" fill="black" /><!-- width=%d height=%d -->\n' % (counter * 256 + i, path, maxw, maxh)
|
||||||
|
glyfile.write(fullpath)
|
||||||
|
gd.addGlyph(counter * 256 + i, fullpath)
|
||||||
|
counter += 1
|
||||||
|
glyfile.write('</defs>\n')
|
||||||
|
glyfile.write('</svg>\n')
|
||||||
|
glyfile.close()
|
||||||
|
print " "
|
||||||
|
|
||||||
|
|
||||||
|
# start up the html
|
||||||
|
# also build up tocentries while processing html
|
||||||
|
htmlFileName = "book.html"
|
||||||
|
hlst = []
|
||||||
|
hlst.append('<?xml version="1.0" encoding="utf-8"?>\n')
|
||||||
|
hlst.append('<!DOCTYPE HTML PUBLIC "-//W3C//DTD XHTML 1.1 Strict//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11-strict.dtd">\n')
|
||||||
|
hlst.append('<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en">\n')
|
||||||
|
hlst.append('<head>\n')
|
||||||
|
hlst.append('<meta http-equiv="content-type" content="text/html; charset=utf-8"/>\n')
|
||||||
|
hlst.append('<title>' + meta_array['Title'] + ' by ' + meta_array['Authors'] + '</title>\n')
|
||||||
|
hlst.append('<meta name="Author" content="' + meta_array['Authors'] + '" />\n')
|
||||||
|
hlst.append('<meta name="Title" content="' + meta_array['Title'] + '" />\n')
|
||||||
|
if 'ASIN' in meta_array:
|
||||||
|
hlst.append('<meta name="ASIN" content="' + meta_array['ASIN'] + '" />\n')
|
||||||
|
if 'GUID' in meta_array:
|
||||||
|
hlst.append('<meta name="GUID" content="' + meta_array['GUID'] + '" />\n')
|
||||||
|
hlst.append('<link href="style.css" rel="stylesheet" type="text/css" />\n')
|
||||||
|
hlst.append('</head>\n<body>\n')
|
||||||
|
|
||||||
|
print 'Processing Pages'
|
||||||
|
# Books are at 1440 DPI. This is rendering at twice that size for
|
||||||
|
# readability when rendering to the screen.
|
||||||
|
scaledpi = 1440.0
|
||||||
|
|
||||||
|
filenames = os.listdir(pageDir)
|
||||||
|
filenames = sorted(filenames)
|
||||||
|
numfiles = len(filenames)
|
||||||
|
|
||||||
|
xmllst = []
|
||||||
|
elst = []
|
||||||
|
|
||||||
|
for filename in filenames:
|
||||||
|
# print ' ', filename
|
||||||
|
print ".",
|
||||||
|
fname = os.path.join(pageDir,filename)
|
||||||
|
flat_xml = convert2xml.fromData(dict, fname)
|
||||||
|
|
||||||
|
# keep flat_xml for later svg processing
|
||||||
|
xmllst.append(flat_xml)
|
||||||
|
|
||||||
|
if buildXML:
|
||||||
|
xname = os.path.join(xmlDir, filename.replace('.dat','.xml'))
|
||||||
|
file(xname, 'wb').write(convert2xml.getXML(dict, fname))
|
||||||
|
|
||||||
|
# first get the html
|
||||||
|
pagehtml, tocinfo = flatxml2html.convert2HTML(flat_xml, classlst, fname, bookDir, gd, fixedimage)
|
||||||
|
elst.append(tocinfo)
|
||||||
|
hlst.append(pagehtml)
|
||||||
|
|
||||||
|
# finish up the html string and output it
|
||||||
|
hlst.append('</body>\n</html>\n')
|
||||||
|
htmlstr = "".join(hlst)
|
||||||
|
hlst = None
|
||||||
|
file(os.path.join(bookDir, htmlFileName), 'wb').write(htmlstr)
|
||||||
|
|
||||||
|
print " "
|
||||||
|
print 'Extracting Table of Contents from Amazon OCR'
|
||||||
|
|
||||||
|
# first create a table of contents file for the svg images
|
||||||
|
tlst = []
|
||||||
|
tlst.append('<?xml version="1.0" encoding="utf-8"?>\n')
|
||||||
|
tlst.append('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n')
|
||||||
|
tlst.append('<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" >')
|
||||||
|
tlst.append('<head>\n')
|
||||||
|
tlst.append('<title>' + meta_array['Title'] + '</title>\n')
|
||||||
|
tlst.append('<meta name="Author" content="' + meta_array['Authors'] + '" />\n')
|
||||||
|
tlst.append('<meta name="Title" content="' + meta_array['Title'] + '" />\n')
|
||||||
|
if 'ASIN' in meta_array:
|
||||||
|
tlst.append('<meta name="ASIN" content="' + meta_array['ASIN'] + '" />\n')
|
||||||
|
if 'GUID' in meta_array:
|
||||||
|
tlst.append('<meta name="GUID" content="' + meta_array['GUID'] + '" />\n')
|
||||||
|
tlst.append('</head>\n')
|
||||||
|
tlst.append('<body>\n')
|
||||||
|
|
||||||
|
tlst.append('<h2>Table of Contents</h2>\n')
|
||||||
|
start = pageidnums[0]
|
||||||
|
if (raw):
|
||||||
|
startname = 'page%04d.svg' % start
|
||||||
|
else:
|
||||||
|
startname = 'page%04d.xhtml' % start
|
||||||
|
|
||||||
|
tlst.append('<h3><a href="' + startname + '">Start of Book</a></h3>\n')
|
||||||
|
# build up a table of contents for the svg xhtml output
|
||||||
|
tocentries = "".join(elst)
|
||||||
|
elst = None
|
||||||
|
toclst = tocentries.split('\n')
|
||||||
|
toclst.pop()
|
||||||
|
for entry in toclst:
|
||||||
|
print entry
|
||||||
|
title, pagenum = entry.split('|')
|
||||||
|
id = pageidnums[int(pagenum)]
|
||||||
|
if (raw):
|
||||||
|
fname = 'page%04d.svg' % id
|
||||||
|
else:
|
||||||
|
fname = 'page%04d.xhtml' % id
|
||||||
|
tlst.append('<h3><a href="'+ fname + '">' + title + '</a></h3>\n')
|
||||||
|
tlst.append('</body>\n')
|
||||||
|
tlst.append('</html>\n')
|
||||||
|
tochtml = "".join(tlst)
|
||||||
|
file(os.path.join(svgDir, 'toc.xhtml'), 'wb').write(tochtml)
|
||||||
|
|
||||||
|
|
||||||
|
# now create index_svg.xhtml that points to all required files
|
||||||
|
slst = []
|
||||||
|
slst.append('<?xml version="1.0" encoding="utf-8"?>\n')
|
||||||
|
slst.append('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n')
|
||||||
|
slst.append('<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" >')
|
||||||
|
slst.append('<head>\n')
|
||||||
|
slst.append('<title>' + meta_array['Title'] + '</title>\n')
|
||||||
|
slst.append('<meta name="Author" content="' + meta_array['Authors'] + '" />\n')
|
||||||
|
slst.append('<meta name="Title" content="' + meta_array['Title'] + '" />\n')
|
||||||
|
if 'ASIN' in meta_array:
|
||||||
|
slst.append('<meta name="ASIN" content="' + meta_array['ASIN'] + '" />\n')
|
||||||
|
if 'GUID' in meta_array:
|
||||||
|
slst.append('<meta name="GUID" content="' + meta_array['GUID'] + '" />\n')
|
||||||
|
slst.append('</head>\n')
|
||||||
|
slst.append('<body>\n')
|
||||||
|
|
||||||
|
print "Building svg images of each book page"
|
||||||
|
slst.append('<h2>List of Pages</h2>\n')
|
||||||
|
slst.append('<div>\n')
|
||||||
|
idlst = sorted(pageIDMap.keys())
|
||||||
|
numids = len(idlst)
|
||||||
|
cnt = len(idlst)
|
||||||
|
previd = None
|
||||||
|
for j in range(cnt):
|
||||||
|
pageid = idlst[j]
|
||||||
|
if j < cnt - 1:
|
||||||
|
nextid = idlst[j+1]
|
||||||
|
else:
|
||||||
|
nextid = None
|
||||||
|
print '.',
|
||||||
|
pagelst = pageIDMap[pageid]
|
||||||
|
flst = []
|
||||||
|
for page in pagelst:
|
||||||
|
flst.append(xmllst[page])
|
||||||
|
flat_svg = "".join(flst)
|
||||||
|
flst=None
|
||||||
|
svgxml = flatxml2svg.convert2SVG(gd, flat_svg, pageid, previd, nextid, svgDir, raw, meta_array, scaledpi)
|
||||||
|
if (raw) :
|
||||||
|
pfile = open(os.path.join(svgDir,'page%04d.svg' % pageid),'w')
|
||||||
|
slst.append('<a href="svg/page%04d.svg">Page %d</a>\n' % (pageid, pageid))
|
||||||
|
else :
|
||||||
|
pfile = open(os.path.join(svgDir,'page%04d.xhtml' % pageid), 'w')
|
||||||
|
slst.append('<a href="svg/page%04d.xhtml">Page %d</a>\n' % (pageid, pageid))
|
||||||
|
previd = pageid
|
||||||
|
pfile.write(svgxml)
|
||||||
|
pfile.close()
|
||||||
|
counter += 1
|
||||||
|
slst.append('</div>\n')
|
||||||
|
slst.append('<h2><a href="svg/toc.xhtml">Table of Contents</a></h2>\n')
|
||||||
|
slst.append('</body>\n</html>\n')
|
||||||
|
svgindex = "".join(slst)
|
||||||
|
slst = None
|
||||||
|
file(os.path.join(bookDir, 'index_svg.xhtml'), 'wb').write(svgindex)
|
||||||
|
|
||||||
|
print " "
|
||||||
|
|
||||||
|
# build the opf file
|
||||||
|
opfname = os.path.join(bookDir, 'book.opf')
|
||||||
|
olst = []
|
||||||
|
olst.append('<?xml version="1.0" encoding="utf-8"?>\n')
|
||||||
|
olst.append('<package xmlns="http://www.idpf.org/2007/opf" unique-identifier="guid_id">\n')
|
||||||
|
# adding metadata
|
||||||
|
olst.append(' <metadata xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:opf="http://www.idpf.org/2007/opf">\n')
|
||||||
|
if 'GUID' in meta_array:
|
||||||
|
olst.append(' <dc:identifier opf:scheme="GUID" id="guid_id">' + meta_array['GUID'] + '</dc:identifier>\n')
|
||||||
|
if 'ASIN' in meta_array:
|
||||||
|
olst.append(' <dc:identifier opf:scheme="ASIN">' + meta_array['ASIN'] + '</dc:identifier>\n')
|
||||||
|
if 'oASIN' in meta_array:
|
||||||
|
olst.append(' <dc:identifier opf:scheme="oASIN">' + meta_array['oASIN'] + '</dc:identifier>\n')
|
||||||
|
olst.append(' <dc:title>' + meta_array['Title'] + '</dc:title>\n')
|
||||||
|
olst.append(' <dc:creator opf:role="aut">' + meta_array['Authors'] + '</dc:creator>\n')
|
||||||
|
olst.append(' <dc:language>en</dc:language>\n')
|
||||||
|
olst.append(' <dc:date>' + meta_array['UpdateTime'] + '</dc:date>\n')
|
||||||
|
if isCover:
|
||||||
|
olst.append(' <meta name="cover" content="bookcover"/>\n')
|
||||||
|
olst.append(' </metadata>\n')
|
||||||
|
olst.append('<manifest>\n')
|
||||||
|
olst.append(' <item id="book" href="book.html" media-type="application/xhtml+xml"/>\n')
|
||||||
|
olst.append(' <item id="stylesheet" href="style.css" media-type="text/css"/>\n')
|
||||||
|
# adding image files to manifest
|
||||||
|
filenames = os.listdir(imgDir)
|
||||||
|
filenames = sorted(filenames)
|
||||||
|
for filename in filenames:
|
||||||
|
imgname, imgext = os.path.splitext(filename)
|
||||||
|
if imgext == '.jpg':
|
||||||
|
imgext = 'jpeg'
|
||||||
|
if imgext == '.svg':
|
||||||
|
imgext = 'svg+xml'
|
||||||
|
olst.append(' <item id="' + imgname + '" href="img/' + filename + '" media-type="image/' + imgext + '"/>\n')
|
||||||
|
if isCover:
|
||||||
|
olst.append(' <item id="bookcover" href="cover.jpg" media-type="image/jpeg" />\n')
|
||||||
|
olst.append('</manifest>\n')
|
||||||
|
# adding spine
|
||||||
|
olst.append('<spine>\n <itemref idref="book" />\n</spine>\n')
|
||||||
|
if isCover:
|
||||||
|
olst.append(' <guide>\n')
|
||||||
|
olst.append(' <reference href="cover.jpg" type="cover" title="Cover"/>\n')
|
||||||
|
olst.append(' </guide>\n')
|
||||||
|
olst.append('</package>\n')
|
||||||
|
opfstr = "".join(olst)
|
||||||
|
olst = None
|
||||||
|
file(opfname, 'wb').write(opfstr)
|
||||||
|
|
||||||
|
print 'Processing Complete'
|
||||||
|
|
||||||
|
return 0
|
||||||
|
|
||||||
|
def usage():
|
||||||
|
print "genbook.py generates a book from the extract Topaz Files"
|
||||||
|
print "Usage:"
|
||||||
|
print " genbook.py [-r] [-h [--fixed-image] <bookDir> "
|
||||||
|
print " "
|
||||||
|
print "Options:"
|
||||||
|
print " -h : help - print this usage message"
|
||||||
|
print " -r : generate raw svg files (not wrapped in xhtml)"
|
||||||
|
print " --fixed-image : genearate any Fixed Area as an svg image in the html"
|
||||||
|
print " "
|
||||||
|
|
||||||
|
|
||||||
|
def main(argv):
|
||||||
|
bookDir = ''
|
||||||
|
if len(argv) == 0:
|
||||||
|
argv = sys.argv
|
||||||
|
|
||||||
|
try:
|
||||||
|
opts, args = getopt.getopt(argv[1:], "rh:",["fixed-image"])
|
||||||
|
|
||||||
|
except getopt.GetoptError, err:
|
||||||
|
print str(err)
|
||||||
|
usage()
|
||||||
|
return 1
|
||||||
|
|
||||||
|
if len(opts) == 0 and len(args) == 0 :
|
||||||
|
usage()
|
||||||
|
return 1
|
||||||
|
|
||||||
|
raw = 0
|
||||||
|
fixedimage = True
|
||||||
|
for o, a in opts:
|
||||||
|
if o =="-h":
|
||||||
|
usage()
|
||||||
|
return 0
|
||||||
|
if o =="-r":
|
||||||
|
raw = 1
|
||||||
|
if o =="--fixed-image":
|
||||||
|
fixedimage = True
|
||||||
|
|
||||||
|
bookDir = args[0]
|
||||||
|
|
||||||
|
rv = generateBook(bookDir, raw, fixedimage)
|
||||||
|
return rv
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
sys.exit(main(''))
|
||||||
|
|||||||
@@ -1,5 +1,24 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
from __future__ import with_statement
|
||||||
|
|
||||||
|
# engine to remove drm from Kindle for Mac and Kindle for PC books
|
||||||
|
# for personal use for archiving and converting your ebooks
|
||||||
|
|
||||||
|
# PLEASE DO NOT PIRATE EBOOKS!
|
||||||
|
|
||||||
|
# We want all authors and publishers, and eBook stores to live
|
||||||
|
# long and prosperous lives but at the same time we just want to
|
||||||
|
# be able to read OUR books on whatever device we want and to keep
|
||||||
|
# readable for a long, long time
|
||||||
|
|
||||||
|
# This borrows very heavily from works by CMBDTC, IHeartCabbages, skindle,
|
||||||
|
# unswindle, DarkReverser, ApprenticeAlf, DiapDealer, some_updates
|
||||||
|
# and many many others
|
||||||
|
|
||||||
|
|
||||||
|
__version__ = '4.0'
|
||||||
|
|
||||||
class Unbuffered:
|
class Unbuffered:
|
||||||
def __init__(self, stream):
|
def __init__(self, stream):
|
||||||
self.stream = stream
|
self.stream = stream
|
||||||
@@ -10,427 +29,184 @@ class Unbuffered:
|
|||||||
return getattr(self.stream, attr)
|
return getattr(self.stream, attr)
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
sys.stdout=Unbuffered(sys.stdout)
|
|
||||||
import os, csv, getopt
|
import os, csv, getopt
|
||||||
import zlib, zipfile, tempfile, shutil
|
import string
|
||||||
from struct import pack
|
import re
|
||||||
from struct import unpack
|
import traceback
|
||||||
|
|
||||||
class TpzDRMError(Exception):
|
buildXML = False
|
||||||
|
|
||||||
|
class DrmException(Exception):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
# local support routines
|
if 'calibre' in sys.modules:
|
||||||
import kgenpids
|
inCalibre = True
|
||||||
import genbook
|
else:
|
||||||
#
|
inCalibre = False
|
||||||
# Utility routines
|
|
||||||
#
|
|
||||||
|
|
||||||
# Get a 7 bit encoded number from file
|
if inCalibre:
|
||||||
def bookReadEncodedNumber(fo):
|
from calibre_plugins.k4mobidedrm import mobidedrm
|
||||||
flag = False
|
from calibre_plugins.k4mobidedrm import topazextract
|
||||||
data = ord(fo.read(1))
|
from calibre_plugins.k4mobidedrm import kgenpids
|
||||||
if data == 0xFF:
|
else:
|
||||||
flag = True
|
import mobidedrm
|
||||||
data = ord(fo.read(1))
|
import topazextract
|
||||||
if data >= 0x80:
|
import kgenpids
|
||||||
datax = (data & 0x7F)
|
|
||||||
while data >= 0x80 :
|
|
||||||
data = ord(fo.read(1))
|
|
||||||
datax = (datax <<7) + (data & 0x7F)
|
|
||||||
data = datax
|
|
||||||
if flag:
|
|
||||||
data = -data
|
|
||||||
return data
|
|
||||||
|
|
||||||
# Get a length prefixed string from file
|
|
||||||
def bookReadString(fo):
|
|
||||||
stringLength = bookReadEncodedNumber(fo)
|
|
||||||
return unpack(str(stringLength)+"s",fo.read(stringLength))[0]
|
|
||||||
|
|
||||||
#
|
|
||||||
# crypto routines
|
|
||||||
#
|
|
||||||
|
|
||||||
# Context initialisation for the Topaz Crypto
|
|
||||||
def topazCryptoInit(key):
|
|
||||||
ctx1 = 0x0CAFFE19E
|
|
||||||
for keyChar in key:
|
|
||||||
keyByte = ord(keyChar)
|
|
||||||
ctx2 = ctx1
|
|
||||||
ctx1 = ((((ctx1 >>2) * (ctx1 >>7))&0xFFFFFFFF) ^ (keyByte * keyByte * 0x0F902007)& 0xFFFFFFFF )
|
|
||||||
return [ctx1,ctx2]
|
|
||||||
|
|
||||||
# decrypt data with the context prepared by topazCryptoInit()
|
|
||||||
def topazCryptoDecrypt(data, ctx):
|
|
||||||
ctx1 = ctx[0]
|
|
||||||
ctx2 = ctx[1]
|
|
||||||
plainText = ""
|
|
||||||
for dataChar in data:
|
|
||||||
dataByte = ord(dataChar)
|
|
||||||
m = (dataByte ^ ((ctx1 >> 3) &0xFF) ^ ((ctx2<<3) & 0xFF)) &0xFF
|
|
||||||
ctx2 = ctx1
|
|
||||||
ctx1 = (((ctx1 >> 2) * (ctx1 >> 7)) &0xFFFFFFFF) ^((m * m * 0x0F902007) &0xFFFFFFFF)
|
|
||||||
plainText += chr(m)
|
|
||||||
return plainText
|
|
||||||
|
|
||||||
# Decrypt data with the PID
|
|
||||||
def decryptRecord(data,PID):
|
|
||||||
ctx = topazCryptoInit(PID)
|
|
||||||
return topazCryptoDecrypt(data, ctx)
|
|
||||||
|
|
||||||
# Try to decrypt a dkey record (contains the bookPID)
|
|
||||||
def decryptDkeyRecord(data,PID):
|
|
||||||
record = decryptRecord(data,PID)
|
|
||||||
fields = unpack("3sB8sB8s3s",record)
|
|
||||||
if fields[0] != "PID" or fields[5] != "pid" :
|
|
||||||
raise TpzDRMError("Didn't find PID magic numbers in record")
|
|
||||||
elif fields[1] != 8 or fields[3] != 8 :
|
|
||||||
raise TpzDRMError("Record didn't contain correct length fields")
|
|
||||||
elif fields[2] != PID :
|
|
||||||
raise TpzDRMError("Record didn't contain PID")
|
|
||||||
return fields[4]
|
|
||||||
|
|
||||||
# Decrypt all dkey records (contain the book PID)
|
|
||||||
def decryptDkeyRecords(data,PID):
|
|
||||||
nbKeyRecords = ord(data[0])
|
|
||||||
records = []
|
|
||||||
data = data[1:]
|
|
||||||
for i in range (0,nbKeyRecords):
|
|
||||||
length = ord(data[0])
|
|
||||||
try:
|
|
||||||
key = decryptDkeyRecord(data[1:length+1],PID)
|
|
||||||
records.append(key)
|
|
||||||
except TpzDRMError:
|
|
||||||
pass
|
|
||||||
data = data[1+length:]
|
|
||||||
if len(records) == 0:
|
|
||||||
raise TpzDRMError("BookKey Not Found")
|
|
||||||
return records
|
|
||||||
|
|
||||||
|
|
||||||
class TopazBook:
|
# cleanup bytestring filenames
|
||||||
def __init__(self, filename, outdir):
|
# borrowed from calibre from calibre/src/calibre/__init__.py
|
||||||
self.fo = file(filename, 'rb')
|
# added in removal of non-printing chars
|
||||||
self.outdir = outdir
|
# and removal of . at start
|
||||||
self.bookPayloadOffset = 0
|
# convert spaces to underscores
|
||||||
self.bookHeaderRecords = {}
|
def cleanup_name(name):
|
||||||
self.bookMetadata = {}
|
_filename_sanitize = re.compile(r'[\xae\0\\|\?\*<":>\+/]')
|
||||||
self.bookKey = None
|
substitute='_'
|
||||||
magic = unpack("4s",self.fo.read(4))[0]
|
one = ''.join(char for char in name if char in string.printable)
|
||||||
if magic != 'TPZ0':
|
one = _filename_sanitize.sub(substitute, one)
|
||||||
raise TpzDRMError("Parse Error : Invalid Header, not a Topaz file")
|
one = re.sub(r'\s', ' ', one).strip()
|
||||||
self.parseTopazHeaders()
|
one = re.sub(r'^\.+$', '_', one)
|
||||||
self.parseMetadata()
|
one = one.replace('..', substitute)
|
||||||
|
# Windows doesn't like path components that end with a period
|
||||||
|
if one.endswith('.'):
|
||||||
|
one = one[:-1]+substitute
|
||||||
|
# Mac and Unix don't like file names that begin with a full stop
|
||||||
|
if len(one) > 0 and one[0] == '.':
|
||||||
|
one = substitute+one[1:]
|
||||||
|
one = one.replace(' ','_')
|
||||||
|
return one
|
||||||
|
|
||||||
def parseTopazHeaders(self):
|
def decryptBook(infile, outdir, k4, kInfoFiles, serials, pids):
|
||||||
def bookReadHeaderRecordData():
|
global buildXML
|
||||||
# Read and return the data of one header record at the current book file position
|
|
||||||
# [[offset,decompressedLength,compressedLength],...]
|
|
||||||
nbValues = bookReadEncodedNumber(self.fo)
|
|
||||||
values = []
|
|
||||||
for i in range (0,nbValues):
|
|
||||||
values.append([bookReadEncodedNumber(self.fo),bookReadEncodedNumber(self.fo),bookReadEncodedNumber(self.fo)])
|
|
||||||
return values
|
|
||||||
def parseTopazHeaderRecord():
|
|
||||||
# Read and parse one header record at the current book file position and return the associated data
|
|
||||||
# [[offset,decompressedLength,compressedLength],...]
|
|
||||||
if ord(self.fo.read(1)) != 0x63:
|
|
||||||
raise TpzDRMError("Parse Error : Invalid Header")
|
|
||||||
tag = bookReadString(self.fo)
|
|
||||||
record = bookReadHeaderRecordData()
|
|
||||||
return [tag,record]
|
|
||||||
nbRecords = bookReadEncodedNumber(self.fo)
|
|
||||||
for i in range (0,nbRecords):
|
|
||||||
result = parseTopazHeaderRecord()
|
|
||||||
# print result[0], result[1]
|
|
||||||
self.bookHeaderRecords[result[0]] = result[1]
|
|
||||||
if ord(self.fo.read(1)) != 0x64 :
|
|
||||||
raise TpzDRMError("Parse Error : Invalid Header")
|
|
||||||
self.bookPayloadOffset = self.fo.tell()
|
|
||||||
|
|
||||||
def parseMetadata(self):
|
# handle the obvious cases at the beginning
|
||||||
# Parse the metadata record from the book payload and return a list of [key,values]
|
if not os.path.isfile(infile):
|
||||||
self.fo.seek(self.bookPayloadOffset + self.bookHeaderRecords["metadata"][0][0])
|
print >>sys.stderr, ('K4MobiDeDrm v%(__version__)s\n' % globals()) + "Error: Input file does not exist"
|
||||||
tag = bookReadString(self.fo)
|
return 1
|
||||||
if tag != "metadata" :
|
|
||||||
raise TpzDRMError("Parse Error : Record Names Don't Match")
|
|
||||||
flags = ord(self.fo.read(1))
|
|
||||||
nbRecords = ord(self.fo.read(1))
|
|
||||||
for i in range (0,nbRecords) :
|
|
||||||
record = [bookReadString(self.fo), bookReadString(self.fo)]
|
|
||||||
self.bookMetadata[record[0]] = record[1]
|
|
||||||
return self.bookMetadata
|
|
||||||
|
|
||||||
def getPIDMetaInfo(self):
|
mobi = True
|
||||||
keysRecord = None
|
magic3 = file(infile,'rb').read(3)
|
||||||
keysRecordRecord = None
|
if magic3 == 'TPZ':
|
||||||
if 'keys' in self.bookMetadata:
|
mobi = False
|
||||||
keysRecord = self.bookMetadata['keys']
|
|
||||||
if keysRecord in self.bookMetadata:
|
|
||||||
keysRecordRecord = self.bookMetadata[keysRecord]
|
|
||||||
return keysRecord, keysRecordRecord
|
|
||||||
|
|
||||||
def getBookTitle(self):
|
bookname = os.path.splitext(os.path.basename(infile))[0]
|
||||||
title = ''
|
|
||||||
if 'Title' in self.bookMetadata:
|
|
||||||
title = self.bookMetadata['Title']
|
|
||||||
return title
|
|
||||||
|
|
||||||
def setBookKey(self, key):
|
if mobi:
|
||||||
self.bookKey = key
|
mb = mobidedrm.MobiBook(infile)
|
||||||
|
else:
|
||||||
|
mb = topazextract.TopazBook(infile)
|
||||||
|
|
||||||
def getBookPayloadRecord(self, name, index):
|
title = mb.getBookTitle()
|
||||||
# Get a record in the book payload, given its name and index.
|
print "Processing Book: ", title
|
||||||
# decrypted and decompressed if necessary
|
filenametitle = cleanup_name(title)
|
||||||
encrypted = False
|
outfilename = bookname
|
||||||
compressed = False
|
if len(outfilename)<=8 or len(filenametitle)<=8:
|
||||||
try:
|
outfilename = outfilename + "_" + filenametitle
|
||||||
recordOffset = self.bookHeaderRecords[name][index][0]
|
elif outfilename[:8] != filenametitle[:8]:
|
||||||
except:
|
outfilename = outfilename[:8] + "_" + filenametitle
|
||||||
raise TpzDRMError("Parse Error : Invalid Record, record not found")
|
|
||||||
|
|
||||||
self.fo.seek(self.bookPayloadOffset + recordOffset)
|
# avoid excessively long file names
|
||||||
|
if len(outfilename)>150:
|
||||||
|
outfilename = outfilename[:150]
|
||||||
|
|
||||||
tag = bookReadString(self.fo)
|
# build pid list
|
||||||
if tag != name :
|
md1, md2 = mb.getPIDMetaInfo()
|
||||||
raise TpzDRMError("Parse Error : Invalid Record, record name doesn't match")
|
pidlst = kgenpids.getPidList(md1, md2, k4, pids, serials, kInfoFiles)
|
||||||
|
|
||||||
recordIndex = bookReadEncodedNumber(self.fo)
|
try:
|
||||||
if recordIndex < 0 :
|
mb.processBook(pidlst)
|
||||||
encrypted = True
|
|
||||||
recordIndex = -recordIndex -1
|
|
||||||
|
|
||||||
if recordIndex != index :
|
except mobidedrm.DrmException, e:
|
||||||
raise TpzDRMError("Parse Error : Invalid Record, index doesn't match")
|
print >>sys.stderr, ('K4MobiDeDrm v%(__version__)s\n' % globals()) + "Error: " + str(e) + "\nDRM Removal Failed.\n"
|
||||||
|
return 1
|
||||||
|
except topazextract.TpzDRMError, e:
|
||||||
|
print >>sys.stderr, ('K4MobiDeDrm v%(__version__)s\n' % globals()) + "Error: " + str(e) + "\nDRM Removal Failed.\n"
|
||||||
|
return 1
|
||||||
|
except Exception, e:
|
||||||
|
print >>sys.stderr, ('K4MobiDeDrm v%(__version__)s\n' % globals()) + "Error: " + str(e) + "\nDRM Removal Failed.\n"
|
||||||
|
return 1
|
||||||
|
|
||||||
if (self.bookHeaderRecords[name][index][2] > 0):
|
if mobi:
|
||||||
compressed = True
|
if mb.getPrintReplica():
|
||||||
record = self.fo.read(self.bookHeaderRecords[name][index][2])
|
outfile = os.path.join(outdir, outfilename + '_nodrm' + '.azw4')
|
||||||
else:
|
else:
|
||||||
record = self.fo.read(self.bookHeaderRecords[name][index][1])
|
outfile = os.path.join(outdir, outfilename + '_nodrm' + '.mobi')
|
||||||
|
mb.getMobiFile(outfile)
|
||||||
|
return 0
|
||||||
|
|
||||||
if encrypted:
|
# topaz:
|
||||||
if self.bookKey:
|
print " Creating NoDRM HTMLZ Archive"
|
||||||
ctx = topazCryptoInit(self.bookKey)
|
zipname = os.path.join(outdir, outfilename + '_nodrm' + '.htmlz')
|
||||||
record = topazCryptoDecrypt(record,ctx)
|
mb.getHTMLZip(zipname)
|
||||||
else :
|
|
||||||
raise TpzDRMError("Error: Attempt to decrypt without bookKey")
|
|
||||||
|
|
||||||
if compressed:
|
print " Creating SVG ZIP Archive"
|
||||||
record = zlib.decompress(record)
|
zipname = os.path.join(outdir, outfilename + '_SVG' + '.zip')
|
||||||
|
mb.getSVGZip(zipname)
|
||||||
|
|
||||||
return record
|
if buildXML:
|
||||||
|
print " Creating XML ZIP Archive"
|
||||||
|
zipname = os.path.join(outdir, outfilename + '_XML' + '.zip')
|
||||||
|
mb.getXMLZip(zipname)
|
||||||
|
|
||||||
def processBook(self, pidlst):
|
# remove internal temporary directory of Topaz pieces
|
||||||
raw = 0
|
mb.cleanup()
|
||||||
fixedimage=True
|
|
||||||
try:
|
|
||||||
keydata = self.getBookPayloadRecord('dkey', 0)
|
|
||||||
except TpzDRMError, e:
|
|
||||||
print "no dkey record found, book may not be encrypted"
|
|
||||||
print "attempting to extrct files without a book key"
|
|
||||||
self.createBookDirectory()
|
|
||||||
self.extractFiles()
|
|
||||||
print "Successfully Extracted Topaz contents"
|
|
||||||
rv = genbook.generateBook(self.outdir, raw, fixedimage)
|
|
||||||
if rv == 0:
|
|
||||||
print "\nBook Successfully generated"
|
|
||||||
return rv
|
|
||||||
|
|
||||||
# try each pid to decode the file
|
return 0
|
||||||
bookKey = None
|
|
||||||
for pid in pidlst:
|
|
||||||
# use 8 digit pids here
|
|
||||||
pid = pid[0:8]
|
|
||||||
print "\nTrying: ", pid
|
|
||||||
bookKeys = []
|
|
||||||
data = keydata
|
|
||||||
try:
|
|
||||||
bookKeys+=decryptDkeyRecords(data,pid)
|
|
||||||
except TpzDRMError, e:
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
bookKey = bookKeys[0]
|
|
||||||
print "Book Key Found!"
|
|
||||||
break
|
|
||||||
|
|
||||||
if not bookKey:
|
|
||||||
raise TpzDRMError('Decryption Unsucessful; No valid pid found')
|
|
||||||
|
|
||||||
self.setBookKey(bookKey)
|
|
||||||
self.createBookDirectory()
|
|
||||||
self.extractFiles()
|
|
||||||
print "Successfully Extracted Topaz contents"
|
|
||||||
rv = genbook.generateBook(self.outdir, raw, fixedimage)
|
|
||||||
if rv == 0:
|
|
||||||
print "\nBook Successfully generated"
|
|
||||||
return rv
|
|
||||||
|
|
||||||
def createBookDirectory(self):
|
|
||||||
outdir = self.outdir
|
|
||||||
# create output directory structure
|
|
||||||
if not os.path.exists(outdir):
|
|
||||||
os.makedirs(outdir)
|
|
||||||
destdir = os.path.join(outdir,'img')
|
|
||||||
if not os.path.exists(destdir):
|
|
||||||
os.makedirs(destdir)
|
|
||||||
destdir = os.path.join(outdir,'color_img')
|
|
||||||
if not os.path.exists(destdir):
|
|
||||||
os.makedirs(destdir)
|
|
||||||
destdir = os.path.join(outdir,'page')
|
|
||||||
if not os.path.exists(destdir):
|
|
||||||
os.makedirs(destdir)
|
|
||||||
destdir = os.path.join(outdir,'glyphs')
|
|
||||||
if not os.path.exists(destdir):
|
|
||||||
os.makedirs(destdir)
|
|
||||||
|
|
||||||
def extractFiles(self):
|
|
||||||
outdir = self.outdir
|
|
||||||
for headerRecord in self.bookHeaderRecords:
|
|
||||||
name = headerRecord
|
|
||||||
if name != "dkey" :
|
|
||||||
ext = '.dat'
|
|
||||||
if name == 'img' : ext = '.jpg'
|
|
||||||
if name == 'color' : ext = '.jpg'
|
|
||||||
print "\nProcessing Section: %s " % name
|
|
||||||
for index in range (0,len(self.bookHeaderRecords[name])) :
|
|
||||||
fnum = "%04d" % index
|
|
||||||
fname = name + fnum + ext
|
|
||||||
destdir = outdir
|
|
||||||
if name == 'img':
|
|
||||||
destdir = os.path.join(outdir,'img')
|
|
||||||
if name == 'color':
|
|
||||||
destdir = os.path.join(outdir,'color_img')
|
|
||||||
if name == 'page':
|
|
||||||
destdir = os.path.join(outdir,'page')
|
|
||||||
if name == 'glyphs':
|
|
||||||
destdir = os.path.join(outdir,'glyphs')
|
|
||||||
outputFile = os.path.join(destdir,fname)
|
|
||||||
print ".",
|
|
||||||
record = self.getBookPayloadRecord(name,index)
|
|
||||||
if record != '':
|
|
||||||
file(outputFile, 'wb').write(record)
|
|
||||||
print " "
|
|
||||||
|
|
||||||
|
|
||||||
def zipUpDir(myzip, tempdir,localname):
|
|
||||||
currentdir = tempdir
|
|
||||||
if localname != "":
|
|
||||||
currentdir = os.path.join(currentdir,localname)
|
|
||||||
list = os.listdir(currentdir)
|
|
||||||
for file in list:
|
|
||||||
afilename = file
|
|
||||||
localfilePath = os.path.join(localname, afilename)
|
|
||||||
realfilePath = os.path.join(currentdir,file)
|
|
||||||
if os.path.isfile(realfilePath):
|
|
||||||
myzip.write(realfilePath, localfilePath)
|
|
||||||
elif os.path.isdir(realfilePath):
|
|
||||||
zipUpDir(myzip, tempdir, localfilePath)
|
|
||||||
|
|
||||||
|
|
||||||
def usage(progname):
|
def usage(progname):
|
||||||
print "Removes DRM protection from Topaz ebooks and extract the contents"
|
print "Removes DRM protection from K4PC/M, Kindle, Mobi and Topaz ebooks"
|
||||||
print "Usage:"
|
print "Usage:"
|
||||||
print " %s [-k <kindle.info>] [-p <pidnums>] [-s <kindleSerialNumbers>] <infile> <outdir> " % progname
|
print " %s [-k <kindle.info>] [-p <pidnums>] [-s <kindleSerialNumbers>] <infile> <outdir> " % progname
|
||||||
|
|
||||||
|
#
|
||||||
# Main
|
# Main
|
||||||
|
#
|
||||||
def main(argv=sys.argv):
|
def main(argv=sys.argv):
|
||||||
progname = os.path.basename(argv[0])
|
progname = os.path.basename(argv[0])
|
||||||
|
|
||||||
k4 = False
|
k4 = False
|
||||||
pids = []
|
|
||||||
serials = []
|
|
||||||
kInfoFiles = []
|
kInfoFiles = []
|
||||||
|
serials = []
|
||||||
|
pids = []
|
||||||
|
|
||||||
|
print ('K4MobiDeDrm v%(__version__)s '
|
||||||
|
'provided by the work of many including DiapDealer, SomeUpdates, IHeartCabbages, CMBDTC, Skindle, DarkReverser, ApprenticeAlf, etc .' % globals())
|
||||||
|
|
||||||
try:
|
try:
|
||||||
opts, args = getopt.getopt(sys.argv[1:], "k:p:s:")
|
opts, args = getopt.getopt(sys.argv[1:], "k:p:s:")
|
||||||
except getopt.GetoptError, err:
|
except getopt.GetoptError, err:
|
||||||
print str(err)
|
print str(err)
|
||||||
usage(progname)
|
usage(progname)
|
||||||
return 1
|
sys.exit(2)
|
||||||
if len(args)<2:
|
if len(args)<2:
|
||||||
usage(progname)
|
usage(progname)
|
||||||
return 1
|
sys.exit(2)
|
||||||
|
|
||||||
for o, a in opts:
|
for o, a in opts:
|
||||||
if o == "-k":
|
if o == "-k":
|
||||||
if a == None :
|
if a == None :
|
||||||
print "Invalid parameter for -k"
|
raise DrmException("Invalid parameter for -k")
|
||||||
return 1
|
|
||||||
kInfoFiles.append(a)
|
kInfoFiles.append(a)
|
||||||
if o == "-p":
|
if o == "-p":
|
||||||
if a == None :
|
if a == None :
|
||||||
print "Invalid parameter for -p"
|
raise DrmException("Invalid parameter for -p")
|
||||||
return 1
|
|
||||||
pids = a.split(',')
|
pids = a.split(',')
|
||||||
if o == "-s":
|
if o == "-s":
|
||||||
if a == None :
|
if a == None :
|
||||||
print "Invalid parameter for -s"
|
raise DrmException("Invalid parameter for -s")
|
||||||
return 1
|
|
||||||
serials = a.split(',')
|
serials = a.split(',')
|
||||||
k4 = True
|
|
||||||
|
|
||||||
|
# try with built in Kindle Info files
|
||||||
|
k4 = True
|
||||||
|
if sys.platform.startswith('linux'):
|
||||||
|
k4 = False
|
||||||
|
kInfoFiles = None
|
||||||
infile = args[0]
|
infile = args[0]
|
||||||
outdir = args[1]
|
outdir = args[1]
|
||||||
|
return decryptBook(infile, outdir, k4, kInfoFiles, serials, pids)
|
||||||
if not os.path.isfile(infile):
|
|
||||||
print "Input File Does Not Exist"
|
|
||||||
return 1
|
|
||||||
|
|
||||||
bookname = os.path.splitext(os.path.basename(infile))[0]
|
|
||||||
tempdir = tempfile.mkdtemp()
|
|
||||||
|
|
||||||
tb = TopazBook(infile, tempdir)
|
|
||||||
title = tb.getBookTitle()
|
|
||||||
print "Processing Book: ", title
|
|
||||||
keysRecord, keysRecordRecord = tb.getPIDMetaInfo()
|
|
||||||
pidlst = kgenpids.getPidList(keysRecord, keysRecordRecord, k4, pids, serials, kInfoFiles)
|
|
||||||
|
|
||||||
try:
|
|
||||||
tb.processBook(pidlst)
|
|
||||||
except TpzDRMError, e:
|
|
||||||
print str(e)
|
|
||||||
print " Creating DeBug Full Zip Archive of Book"
|
|
||||||
zipname = os.path.join(outdir, bookname + '_debug' + '.zip')
|
|
||||||
myzip = zipfile.ZipFile(zipname,'w',zipfile.ZIP_DEFLATED, False)
|
|
||||||
zipUpDir(myzip, tempdir, '')
|
|
||||||
myzip.close()
|
|
||||||
shutil.rmtree(tempdir, True)
|
|
||||||
return 1
|
|
||||||
|
|
||||||
print " Creating HTML ZIP Archive"
|
|
||||||
zipname = os.path.join(outdir, bookname + '_nodrm' + '.zip')
|
|
||||||
myzip1 = zipfile.ZipFile(zipname,'w',zipfile.ZIP_DEFLATED, False)
|
|
||||||
myzip1.write(os.path.join(tempdir,'book.html'),'book.html')
|
|
||||||
myzip1.write(os.path.join(tempdir,'book.opf'),'book.opf')
|
|
||||||
if os.path.isfile(os.path.join(tempdir,'cover.jpg')):
|
|
||||||
myzip1.write(os.path.join(tempdir,'cover.jpg'),'cover.jpg')
|
|
||||||
myzip1.write(os.path.join(tempdir,'style.css'),'style.css')
|
|
||||||
zipUpDir(myzip1, tempdir, 'img')
|
|
||||||
myzip1.close()
|
|
||||||
|
|
||||||
print " Creating SVG ZIP Archive"
|
|
||||||
zipname = os.path.join(outdir, bookname + '_SVG' + '.zip')
|
|
||||||
myzip2 = zipfile.ZipFile(zipname,'w',zipfile.ZIP_DEFLATED, False)
|
|
||||||
myzip2.write(os.path.join(tempdir,'index_svg.xhtml'),'index_svg.xhtml')
|
|
||||||
zipUpDir(myzip2, tempdir, 'svg')
|
|
||||||
zipUpDir(myzip2, tempdir, 'img')
|
|
||||||
myzip2.close()
|
|
||||||
|
|
||||||
print " Creating XML ZIP Archive"
|
|
||||||
zipname = os.path.join(outdir, bookname + '_XML' + '.zip')
|
|
||||||
myzip3 = zipfile.ZipFile(zipname,'w',zipfile.ZIP_DEFLATED, False)
|
|
||||||
targetdir = os.path.join(tempdir,'xml')
|
|
||||||
zipUpDir(myzip3, targetdir, '')
|
|
||||||
zipUpDir(myzip3, tempdir, 'img')
|
|
||||||
myzip3.close()
|
|
||||||
|
|
||||||
shutil.rmtree(tempdir, True)
|
|
||||||
|
|
||||||
return 0
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
sys.stdout=Unbuffered(sys.stdout)
|
||||||
sys.exit(main())
|
sys.exit(main())
|
||||||
|
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ This plugin is meant to decrypt Adobe Digital Edition PDFs that are protected wi
|
|||||||
|
|
||||||
I had the much easier job of converting them to a Calibre plugin.
|
I had the much easier job of converting them to a Calibre plugin.
|
||||||
|
|
||||||
Go to Calibre's Preferences page... click on the Plugins button. Use the file dialog button to select the plugin's zip file (ineptpdf_vXX_plugin.zip) and click the 'Add' button. you're done.
|
|
||||||
|
|
||||||
This plugin is meant to decrypt Adobe Digital Edition PDFs that are protected with Adobe's Adept encryption. It is meant to function without having to install any dependencies... other than having Calibre installed, of course. It will still work if you have Python, PyCrypto and/or OpenSSL already installed, but they aren't necessary.
|
This plugin is meant to decrypt Adobe Digital Edition PDFs that are protected with Adobe's Adept encryption. It is meant to function without having to install any dependencies... other than having Calibre installed, of course. It will still work if you have Python, PyCrypto and/or OpenSSL already installed, but they aren't necessary.
|
||||||
|
|
||||||
|
|||||||
@@ -5,12 +5,11 @@ This plugin supersedes MobiDeDRM, K4DeDRM, and K4PCDeDRM and K4X plugins. If yo
|
|||||||
|
|
||||||
|
|
||||||
This plugin is meant to remove the DRM from .prc, .azw, .azw1, and .tpz ebooks. Calibre can then convert them to whatever format you desire. It is meant to function without having to install any dependencies except for Calibre being on your same machine and in the same account as your "Kindle for PC" or "Kindle for Mac" application if you are going to remove the DRM from those types of books.
|
This plugin is meant to remove the DRM from .prc, .azw, .azw1, and .tpz ebooks. Calibre can then convert them to whatever format you desire. It is meant to function without having to install any dependencies except for Calibre being on your same machine and in the same account as your "Kindle for PC" or "Kindle for Mac" application if you are going to remove the DRM from those types of books.
|
||||||
Go to Calibre's Preferences page... click on the Plugins button. Use the file dialog button to select the plugin's zip file (K4MobiDeDRM_vXX_plugin.zip) and click the 'Add' button. You're done.
|
|
||||||
|
|
||||||
Please note: Calibre does not provide any immediate feedback to indicate that adding the plugin was a success. You can always click on the File-Type plugins to see if the plugin was added.
|
|
||||||
|
|
||||||
Installation:
|
Installation:
|
||||||
Highlight the plugin (K4MobiDeDRM under the "File type plugins" category) and click the "Customize Plugin" button on Calibre's Preferences->Plugins page. Enter a comma separated list of your 10 digit PIDs. Include in this list (again separated by commas) any 16 digit serial numbers the standalone Kindles you may have (these typically begin "B0...") This is not needed if you only want to decode "Kindle for PC" or "Kindle for Mac" books.
|
|
||||||
Go to Calibre's Preferences page. Do **NOT** select "Get Plugins to enhance calibre" as this is reserved for official calibre plugins", instead select "Change calibre behavior". Under "Advanced" click on the on the Plugins button. Click on the "Load plugin from file" button at the bottom of the screen. Use the file dialog button to select the plugin's zip file (K4MobiDeDRM_vXX_plugin.zip) and click the "Add" (or it may say "Open" button. Then click on the "Yes" button in the warning dialog that appears. A Confirmation dialog appears that says the plugin has been installed.
|
Go to Calibre's Preferences page. Do **NOT** select "Get Plugins to enhance calibre" as this is reserved for official calibre plugins", instead select "Change calibre behavior". Under "Advanced" click on the on the Plugins button. Click on the "Load plugin from file" button at the bottom of the screen. Use the file dialog button to select the plugin's zip file (K4MobiDeDRM_vXX_plugin.zip) and click the "Add" (or it may say "Open" button. Then click on the "Yes" button in the warning dialog that appears. A Confirmation dialog appears that says the plugin has been installed.
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ All credit given to The Dark Reverser for the original standalone script. I had
|
|||||||
All credit given to The Dark Reverser for the original standalone script. I had the much easier job of converting it to a Calibre plugin.
|
All credit given to The Dark Reverser for the original standalone script. I had the much easier job of converting it to a Calibre plugin.
|
||||||
|
|
||||||
|
|
||||||
Go to Calibre's Preferences page... click on the Plugins button. Use the file dialog button to select the plugin's zip file (eReaderPDB2PML_vXX_plugin.zip) and click the 'Add' button. You're done.
|
|
||||||
This plugin is meant to convert secure Ereader files (PDB) to unsecured PMLZ files. Calibre can then convert it to whatever format you desire. It is meant to function without having to install any dependencies... other than having Calibre installed, of course. I've included the psyco libraries (compiled for each platform) for speed. If your system can use them, great! Otherwise, they won't be used and things will just work slower.
|
This plugin is meant to convert secure Ereader files (PDB) to unsecured PMLZ files. Calibre can then convert it to whatever format you desire. It is meant to function without having to install any dependencies... other than having Calibre installed, of course. I've included the psyco libraries (compiled for each platform) for speed. If your system can use them, great! Otherwise, they won't be used and things will just work slower.
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ with Adobe's Adept encryption. It is meant to function without having to install
|
|||||||
I had the much easier job of converting them to a Calibre plugin.
|
I had the much easier job of converting them to a Calibre plugin.
|
||||||
|
|
||||||
|
|
||||||
Go to Calibre's Preferences page... click on the Plugins button. Use the file dialog button to select the plugin's zip file (ignobleepub_vXX_plugin.zip) and
|
|
||||||
This plugin is meant to decrypt Barnes & Noble Epubs that are protected
|
This plugin is meant to decrypt Barnes & Noble Epubs that are protected
|
||||||
|
|
||||||
with Adobe's Adept encryption. It is meant to function without having to install any dependencies... other than having Calibre installed, of course. It will still work if you have Python and PyCrypto already installed, but they aren't necessary.
|
with Adobe's Adept encryption. It is meant to function without having to install any dependencies... other than having Calibre installed, of course. It will still work if you have Python and PyCrypto already installed, but they aren't necessary.
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ This plugin is meant to decrypt Adobe Digital Edition Epubs that are protected w
|
|||||||
|
|
||||||
I had the much easier job of converting them to a Calibre plugin.
|
I had the much easier job of converting them to a Calibre plugin.
|
||||||
|
|
||||||
Go to Calibre's Preferences page... click on the Plugins button. Use the file dialog button to select the plugin's zip file (ineptepub_vXX_plugin.zip) and click the 'Add' button. you're done.
|
|
||||||
|
|
||||||
This plugin is meant to decrypt Adobe Digital Edition Epubs that are protected with Adobe's Adept encryption. It is meant to function without having to install any dependencies... other than having Calibre installed, of course. It will still work if you have Python and PyCrypto already installed, but they aren't necessary.
|
This plugin is meant to decrypt Adobe Digital Edition Epubs that are protected with Adobe's Adept encryption. It is meant to function without having to install any dependencies... other than having Calibre installed, of course. It will still work if you have Python and PyCrypto already installed, but they aren't necessary.
|
||||||
|
|
||||||
|
|||||||
@@ -1,26 +0,0 @@
|
|||||||
Installing openssl on Windows 64-bit (Windows 2000 and higher)
|
|
||||||
|
|
||||||
Win64 OpenSSL v0.9.8o (8Mb)
|
|
||||||
http://www.slproweb.com/download/Win64OpenSSL-0_9_8o.exe
|
|
||||||
(if you get an error message about missing Visual C++ redistributables... cancel the install and install the below support program from Microsoft, THEN install OpenSSL)
|
|
||||||
|
|
||||||
Visual C++ 2008 Redistributables (x64) (1.7Mb)
|
|
||||||
http://www.microsoft.com/downloads/details.aspx?familyid=bd2a6171-e2d6-4230-b809-9a8d7548c1b6
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Installing openssl on Windows 32-bit (Windows 2000 and higher)
|
|
||||||
|
|
||||||
Win32 OpenSSL v0.9.8o (8Mb)
|
|
||||||
http://www.slproweb.com/download/Win32OpenSSL-0_9_8o.exe
|
|
||||||
(if you get an error message about missing Visual C++ redistributables... cancel the install and install the below support program from Microsoft, THEN install OpenSSL)
|
|
||||||
|
|
||||||
Visual C++ 2008 Redistributables (1.7Mb)
|
|
||||||
http://www.microsoft.com/downloads/details.aspx?familyid=9B2DA534-3E03-4391-8A4D-074B9F2BC1BF
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Other versions of OpenSSL (and versions for Windows older than Windows 2000) can be found on the following website.
|
|
||||||
|
|
||||||
Shining Light Productions
|
|
||||||
http://www.slproweb.com/products/Win32OpenSSL.html
|
|
||||||
Binary file not shown.
@@ -31,10 +31,15 @@
|
|||||||
# 0.0.1 - Initial release
|
# 0.0.1 - Initial release
|
||||||
# 0.0.2 - updated to distinguish it from earlier non-openssl version
|
# 0.0.2 - updated to distinguish it from earlier non-openssl version
|
||||||
# 0.0.3 - removed added psyco code as it is not supported under Calibre's Python 2.7
|
# 0.0.3 - removed added psyco code as it is not supported under Calibre's Python 2.7
|
||||||
|
# 0.0.4 - minor typos fixed
|
||||||
|
# 0.0.5 - updated to the new calibre plugin interface
|
||||||
|
|
||||||
import sys, os
|
import sys, os
|
||||||
|
|
||||||
from calibre.customize import FileTypePlugin
|
from calibre.customize import FileTypePlugin
|
||||||
|
from calibre.ptempfile import PersistentTemporaryDirectory
|
||||||
|
from calibre.constants import iswindows, isosx
|
||||||
|
from calibre_plugins.erdrpdb2pml import erdr2pml
|
||||||
|
|
||||||
class eRdrDeDRM(FileTypePlugin):
|
class eRdrDeDRM(FileTypePlugin):
|
||||||
name = 'eReader PDB 2 PML' # Name of the plugin
|
name = 'eReader PDB 2 PML' # Name of the plugin
|
||||||
@@ -42,16 +47,14 @@ class eRdrDeDRM(FileTypePlugin):
|
|||||||
Credit given to The Dark Reverser for the original standalone script.'
|
Credit given to The Dark Reverser for the original standalone script.'
|
||||||
supported_platforms = ['linux', 'osx', 'windows'] # Platforms this plugin will run on
|
supported_platforms = ['linux', 'osx', 'windows'] # Platforms this plugin will run on
|
||||||
author = 'DiapDealer' # The author of this plugin
|
author = 'DiapDealer' # The author of this plugin
|
||||||
version = (0, 0, 3) # The version number of this plugin
|
version = (0, 0, 6) # The version number of this plugin
|
||||||
file_types = set(['pdb']) # The file types that this plugin will be applied to
|
file_types = set(['pdb']) # The file types that this plugin will be applied to
|
||||||
on_import = True # Run this plugin during the import
|
on_import = True # Run this plugin during the import
|
||||||
|
minimum_calibre_version = (0, 7, 55)
|
||||||
|
|
||||||
def run(self, path_to_ebook):
|
def run(self, path_to_ebook):
|
||||||
from calibre.ptempfile import PersistentTemporaryDirectory
|
|
||||||
from calibre.constants import iswindows, isosx
|
|
||||||
|
|
||||||
global bookname, erdr2pml
|
global bookname, erdr2pml
|
||||||
import erdr2pml
|
|
||||||
|
|
||||||
infile = path_to_ebook
|
infile = path_to_ebook
|
||||||
bookname = os.path.splitext(os.path.basename(infile))[0]
|
bookname = os.path.splitext(os.path.basename(infile))[0]
|
||||||
@@ -111,7 +114,7 @@ class eRdrDeDRM(FileTypePlugin):
|
|||||||
|
|
||||||
print " Decoding File"
|
print " Decoding File"
|
||||||
sect = erdr2pml.Sectionizer(infile, 'PNRdPPrs')
|
sect = erdr2pml.Sectionizer(infile, 'PNRdPPrs')
|
||||||
er = erdr2pml.EreaderProcessor(sect.loadSection, name, cc)
|
er = erdr2pml.EreaderProcessor(sect, name, cc)
|
||||||
|
|
||||||
if er.getNumImages() > 0:
|
if er.getNumImages() > 0:
|
||||||
print " Extracting images"
|
print " Extracting images"
|
||||||
@@ -57,8 +57,13 @@
|
|||||||
# 0.16 - convert to use openssl DES (very very fast) or pure python DES if openssl's libcrypto is not available
|
# 0.16 - convert to use openssl DES (very very fast) or pure python DES if openssl's libcrypto is not available
|
||||||
# 0.17 - added support for pycrypto's DES as well
|
# 0.17 - added support for pycrypto's DES as well
|
||||||
# 0.18 - on Windows try PyCrypto first and OpenSSL next
|
# 0.18 - on Windows try PyCrypto first and OpenSSL next
|
||||||
|
# 0.19 - Modify the interface to allow use of import
|
||||||
|
# 0.20 - modify to allow use inside new interface for calibre plugins
|
||||||
|
# 0.21 - Support eReader (drm) version 11.
|
||||||
|
# - Don't reject dictionary format.
|
||||||
|
# - Ignore sidebars for dictionaries (different format?)
|
||||||
|
|
||||||
__version__='0.18'
|
__version__='0.21'
|
||||||
|
|
||||||
class Unbuffered:
|
class Unbuffered:
|
||||||
def __init__(self, stream):
|
def __init__(self, stream):
|
||||||
@@ -70,32 +75,50 @@ class Unbuffered:
|
|||||||
return getattr(self.stream, attr)
|
return getattr(self.stream, attr)
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
sys.stdout=Unbuffered(sys.stdout)
|
|
||||||
|
|
||||||
import struct, binascii, getopt, zlib, os, os.path, urllib, tempfile
|
import struct, binascii, getopt, zlib, os, os.path, urllib, tempfile
|
||||||
|
|
||||||
|
if 'calibre' in sys.modules:
|
||||||
|
inCalibre = True
|
||||||
|
else:
|
||||||
|
inCalibre = False
|
||||||
|
|
||||||
Des = None
|
Des = None
|
||||||
if sys.platform.startswith('win'):
|
if sys.platform.startswith('win'):
|
||||||
# first try with pycrypto
|
# first try with pycrypto
|
||||||
import pycrypto_des
|
if inCalibre:
|
||||||
|
from calibre_plugins.erdrpdb2pml import pycrypto_des
|
||||||
|
else:
|
||||||
|
import pycrypto_des
|
||||||
Des = pycrypto_des.load_pycrypto()
|
Des = pycrypto_des.load_pycrypto()
|
||||||
if Des == None:
|
if Des == None:
|
||||||
# they try with openssl
|
# they try with openssl
|
||||||
import openssl_des
|
if inCalibre:
|
||||||
|
from calibre_plugins.erdrpdb2pml import openssl_des
|
||||||
|
else:
|
||||||
|
import openssl_des
|
||||||
Des = openssl_des.load_libcrypto()
|
Des = openssl_des.load_libcrypto()
|
||||||
else:
|
else:
|
||||||
# first try with openssl
|
# first try with openssl
|
||||||
import openssl_des
|
if inCalibre:
|
||||||
|
from calibre_plugins.erdrpdb2pml import openssl_des
|
||||||
|
else:
|
||||||
|
import openssl_des
|
||||||
Des = openssl_des.load_libcrypto()
|
Des = openssl_des.load_libcrypto()
|
||||||
if Des == None:
|
if Des == None:
|
||||||
# then try with pycrypto
|
# then try with pycrypto
|
||||||
import pycrypto_des
|
if inCalibre:
|
||||||
|
from calibre_plugins.erdrpdb2pml import pycrypto_des
|
||||||
|
else:
|
||||||
|
import pycrypto_des
|
||||||
Des = pycrypto_des.load_pycrypto()
|
Des = pycrypto_des.load_pycrypto()
|
||||||
|
|
||||||
# if that did not work then use pure python implementation
|
# if that did not work then use pure python implementation
|
||||||
# of DES and try to speed it up with Psycho
|
# of DES and try to speed it up with Psycho
|
||||||
if Des == None:
|
if Des == None:
|
||||||
import python_des
|
if inCalibre:
|
||||||
|
from calibre_plugins.erdrpdb2pml import python_des
|
||||||
|
else:
|
||||||
|
import python_des
|
||||||
Des = python_des.Des
|
Des = python_des.Des
|
||||||
# Import Psyco if available
|
# Import Psyco if available
|
||||||
try:
|
try:
|
||||||
@@ -111,19 +134,27 @@ except ImportError:
|
|||||||
# older Python release
|
# older Python release
|
||||||
import sha
|
import sha
|
||||||
sha1 = lambda s: sha.new(s)
|
sha1 = lambda s: sha.new(s)
|
||||||
|
|
||||||
import cgi
|
import cgi
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
logging.basicConfig()
|
logging.basicConfig()
|
||||||
#logging.basicConfig(level=logging.DEBUG)
|
#logging.basicConfig(level=logging.DEBUG)
|
||||||
|
|
||||||
|
|
||||||
class Sectionizer(object):
|
class Sectionizer(object):
|
||||||
|
bkType = "Book"
|
||||||
|
|
||||||
def __init__(self, filename, ident):
|
def __init__(self, filename, ident):
|
||||||
self.contents = file(filename, 'rb').read()
|
self.contents = file(filename, 'rb').read()
|
||||||
self.header = self.contents[0:72]
|
self.header = self.contents[0:72]
|
||||||
self.num_sections, = struct.unpack('>H', self.contents[76:78])
|
self.num_sections, = struct.unpack('>H', self.contents[76:78])
|
||||||
|
# Dictionary or normal content (TODO: Not hard-coded)
|
||||||
if self.header[0x3C:0x3C+8] != ident:
|
if self.header[0x3C:0x3C+8] != ident:
|
||||||
raise ValueError('Invalid file format')
|
if self.header[0x3C:0x3C+8] == "PDctPPrs":
|
||||||
|
self.bkType = "Dict"
|
||||||
|
else:
|
||||||
|
raise ValueError('Invalid file format')
|
||||||
self.sections = []
|
self.sections = []
|
||||||
for i in xrange(self.num_sections):
|
for i in xrange(self.num_sections):
|
||||||
offset, a1,a2,a3,a4 = struct.unpack('>LBBBB', self.contents[78+i*8:78+i*8+8])
|
offset, a1,a2,a3,a4 = struct.unpack('>LBBBB', self.contents[78+i*8:78+i*8+8])
|
||||||
@@ -160,15 +191,15 @@ def deXOR(text, sp, table):
|
|||||||
return r
|
return r
|
||||||
|
|
||||||
class EreaderProcessor(object):
|
class EreaderProcessor(object):
|
||||||
def __init__(self, section_reader, username, creditcard):
|
def __init__(self, sect, username, creditcard):
|
||||||
self.section_reader = section_reader
|
self.section_reader = sect.loadSection
|
||||||
data = section_reader(0)
|
data = self.section_reader(0)
|
||||||
version, = struct.unpack('>H', data[0:2])
|
version, = struct.unpack('>H', data[0:2])
|
||||||
self.version = version
|
self.version = version
|
||||||
logging.info('eReader file format version %s', version)
|
logging.info('eReader file format version %s', version)
|
||||||
if version != 272 and version != 260 and version != 259:
|
if version != 272 and version != 260 and version != 259:
|
||||||
raise ValueError('incorrect eReader version %d (error 1)' % version)
|
raise ValueError('incorrect eReader version %d (error 1)' % version)
|
||||||
data = section_reader(1)
|
data = self.section_reader(1)
|
||||||
self.data = data
|
self.data = data
|
||||||
des = Des(fixKey(data[0:8]))
|
des = Des(fixKey(data[0:8]))
|
||||||
cookie_shuf, cookie_size = struct.unpack('>LL', des.decrypt(data[-8:]))
|
cookie_shuf, cookie_size = struct.unpack('>LL', des.decrypt(data[-8:]))
|
||||||
@@ -197,11 +228,17 @@ class EreaderProcessor(object):
|
|||||||
self.num_text_pages = struct.unpack('>H', r[2:4])[0] - 1
|
self.num_text_pages = struct.unpack('>H', r[2:4])[0] - 1
|
||||||
self.num_image_pages = struct.unpack('>H', r[26:26+2])[0]
|
self.num_image_pages = struct.unpack('>H', r[26:26+2])[0]
|
||||||
self.first_image_page = struct.unpack('>H', r[24:24+2])[0]
|
self.first_image_page = struct.unpack('>H', r[24:24+2])[0]
|
||||||
|
# Default values
|
||||||
|
self.num_footnote_pages = 0
|
||||||
|
self.num_sidebar_pages = 0
|
||||||
|
self.first_footnote_page = -1
|
||||||
|
self.first_sidebar_page = -1
|
||||||
if self.version == 272:
|
if self.version == 272:
|
||||||
self.num_footnote_pages = struct.unpack('>H', r[46:46+2])[0]
|
self.num_footnote_pages = struct.unpack('>H', r[46:46+2])[0]
|
||||||
self.first_footnote_page = struct.unpack('>H', r[44:44+2])[0]
|
self.first_footnote_page = struct.unpack('>H', r[44:44+2])[0]
|
||||||
self.num_sidebar_pages = struct.unpack('>H', r[38:38+2])[0]
|
if (sect.bkType == "Book"):
|
||||||
self.first_sidebar_page = struct.unpack('>H', r[36:36+2])[0]
|
self.num_sidebar_pages = struct.unpack('>H', r[38:38+2])[0]
|
||||||
|
self.first_sidebar_page = struct.unpack('>H', r[36:36+2])[0]
|
||||||
# self.num_bookinfo_pages = struct.unpack('>H', r[34:34+2])[0]
|
# self.num_bookinfo_pages = struct.unpack('>H', r[34:34+2])[0]
|
||||||
# self.first_bookinfo_page = struct.unpack('>H', r[32:32+2])[0]
|
# self.first_bookinfo_page = struct.unpack('>H', r[32:32+2])[0]
|
||||||
# self.num_chapter_pages = struct.unpack('>H', r[22:22+2])[0]
|
# self.num_chapter_pages = struct.unpack('>H', r[22:22+2])[0]
|
||||||
@@ -217,10 +254,8 @@ class EreaderProcessor(object):
|
|||||||
self.xortable_size = struct.unpack('>H', r[42:42+2])[0]
|
self.xortable_size = struct.unpack('>H', r[42:42+2])[0]
|
||||||
self.xortable = self.data[self.xortable_offset:self.xortable_offset + self.xortable_size]
|
self.xortable = self.data[self.xortable_offset:self.xortable_offset + self.xortable_size]
|
||||||
else:
|
else:
|
||||||
self.num_footnote_pages = 0
|
# Nothing needs to be done
|
||||||
self.num_sidebar_pages = 0
|
pass
|
||||||
self.first_footnote_page = -1
|
|
||||||
self.first_sidebar_page = -1
|
|
||||||
# self.num_bookinfo_pages = 0
|
# self.num_bookinfo_pages = 0
|
||||||
# self.num_chapter_pages = 0
|
# self.num_chapter_pages = 0
|
||||||
# self.num_link_pages = 0
|
# self.num_link_pages = 0
|
||||||
@@ -245,10 +280,14 @@ class EreaderProcessor(object):
|
|||||||
encrypted_key_sha = r[44:44+20]
|
encrypted_key_sha = r[44:44+20]
|
||||||
encrypted_key = r[64:64+8]
|
encrypted_key = r[64:64+8]
|
||||||
elif version == 260:
|
elif version == 260:
|
||||||
if drm_sub_version != 13:
|
if drm_sub_version != 13 and drm_sub_version != 11:
|
||||||
raise ValueError('incorrect eReader version %d (error 3)' % drm_sub_version)
|
raise ValueError('incorrect eReader version %d (error 3)' % drm_sub_version)
|
||||||
encrypted_key = r[44:44+8]
|
if drm_sub_version == 13:
|
||||||
encrypted_key_sha = r[52:52+20]
|
encrypted_key = r[44:44+8]
|
||||||
|
encrypted_key_sha = r[52:52+20]
|
||||||
|
else:
|
||||||
|
encrypted_key = r[64:64+8]
|
||||||
|
encrypted_key_sha = r[44:44+20]
|
||||||
elif version == 272:
|
elif version == 272:
|
||||||
encrypted_key = r[172:172+8]
|
encrypted_key = r[172:172+8]
|
||||||
encrypted_key_sha = r[56:56+20]
|
encrypted_key_sha = r[56:56+20]
|
||||||
@@ -334,6 +373,12 @@ class EreaderProcessor(object):
|
|||||||
r += fmarker
|
r += fmarker
|
||||||
fnote_ids = fnote_ids[id_len+4:]
|
fnote_ids = fnote_ids[id_len+4:]
|
||||||
|
|
||||||
|
# TODO: Handle dictionary index (?) pages - which are also marked as
|
||||||
|
# sidebar_pages (?). For now dictionary sidebars are ignored
|
||||||
|
# For dictionaries - record 0 is null terminated strings, followed by
|
||||||
|
# blocks of around 62000 bytes and a final block. Not sure of the
|
||||||
|
# encoding
|
||||||
|
|
||||||
# now handle sidebar pages
|
# now handle sidebar pages
|
||||||
if self.num_sidebar_pages > 0:
|
if self.num_sidebar_pages > 0:
|
||||||
r += '\n'
|
r += '\n'
|
||||||
@@ -346,7 +391,7 @@ class EreaderProcessor(object):
|
|||||||
id_len = ord(sbar_ids[2])
|
id_len = ord(sbar_ids[2])
|
||||||
id = sbar_ids[3:3+id_len]
|
id = sbar_ids[3:3+id_len]
|
||||||
smarker = '<sidebar id="%s">\n' % id
|
smarker = '<sidebar id="%s">\n' % id
|
||||||
smarker += zlib.decompress(des.decrypt(self.section_reader(self.first_footnote_page + i)))
|
smarker += zlib.decompress(des.decrypt(self.section_reader(self.first_sidebar_page + i)))
|
||||||
smarker += '\n</sidebar>\n'
|
smarker += '\n</sidebar>\n'
|
||||||
r += smarker
|
r += smarker
|
||||||
sbar_ids = sbar_ids[id_len+4:]
|
sbar_ids = sbar_ids[id_len+4:]
|
||||||
@@ -364,10 +409,10 @@ def cleanPML(pml):
|
|||||||
def convertEreaderToPml(infile, name, cc, outdir):
|
def convertEreaderToPml(infile, name, cc, outdir):
|
||||||
if not os.path.exists(outdir):
|
if not os.path.exists(outdir):
|
||||||
os.makedirs(outdir)
|
os.makedirs(outdir)
|
||||||
|
bookname = os.path.splitext(os.path.basename(infile))[0]
|
||||||
print " Decoding File"
|
print " Decoding File"
|
||||||
sect = Sectionizer(infile, 'PNRdPPrs')
|
sect = Sectionizer(infile, 'PNRdPPrs')
|
||||||
er = EreaderProcessor(sect.loadSection, name, cc)
|
er = EreaderProcessor(sect, name, cc)
|
||||||
|
|
||||||
if er.getNumImages() > 0:
|
if er.getNumImages() > 0:
|
||||||
print " Extracting images"
|
print " Extracting images"
|
||||||
@@ -390,6 +435,47 @@ def convertEreaderToPml(infile, name, cc, outdir):
|
|||||||
# file(os.path.join(outdir, 'bookinfo.txt'),'wb').write(bkinfo)
|
# file(os.path.join(outdir, 'bookinfo.txt'),'wb').write(bkinfo)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def decryptBook(infile, outdir, name, cc, make_pmlz):
|
||||||
|
if make_pmlz :
|
||||||
|
# ignore specified outdir, use tempdir instead
|
||||||
|
outdir = tempfile.mkdtemp()
|
||||||
|
try:
|
||||||
|
print "Processing..."
|
||||||
|
convertEreaderToPml(infile, name, cc, outdir)
|
||||||
|
if make_pmlz :
|
||||||
|
import zipfile
|
||||||
|
import shutil
|
||||||
|
print " Creating PMLZ file"
|
||||||
|
zipname = infile[:-4] + '.pmlz'
|
||||||
|
myZipFile = zipfile.ZipFile(zipname,'w',zipfile.ZIP_STORED, False)
|
||||||
|
list = os.listdir(outdir)
|
||||||
|
for file in list:
|
||||||
|
localname = file
|
||||||
|
filePath = os.path.join(outdir,file)
|
||||||
|
if os.path.isfile(filePath):
|
||||||
|
myZipFile.write(filePath, localname)
|
||||||
|
elif os.path.isdir(filePath):
|
||||||
|
imageList = os.listdir(filePath)
|
||||||
|
localimgdir = os.path.basename(filePath)
|
||||||
|
for image in imageList:
|
||||||
|
localname = os.path.join(localimgdir,image)
|
||||||
|
imagePath = os.path.join(filePath,image)
|
||||||
|
if os.path.isfile(imagePath):
|
||||||
|
myZipFile.write(imagePath, localname)
|
||||||
|
myZipFile.close()
|
||||||
|
# remove temporary directory
|
||||||
|
shutil.rmtree(outdir, True)
|
||||||
|
print 'output is %s' % zipname
|
||||||
|
else :
|
||||||
|
print 'output in %s' % outdir
|
||||||
|
print "done"
|
||||||
|
except ValueError, e:
|
||||||
|
print "Error: %s" % e
|
||||||
|
return 1
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
def usage():
|
def usage():
|
||||||
print "Converts DRMed eReader books to PML Source"
|
print "Converts DRMed eReader books to PML Source"
|
||||||
print "Usage:"
|
print "Usage:"
|
||||||
@@ -404,8 +490,8 @@ def usage():
|
|||||||
print " It's enough to enter the last 8 digits of the credit card number"
|
print " It's enough to enter the last 8 digits of the credit card number"
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|
||||||
def main(argv=None):
|
def main(argv=None):
|
||||||
global bookname
|
|
||||||
try:
|
try:
|
||||||
opts, args = getopt.getopt(sys.argv[1:], "h", ["make-pmlz"])
|
opts, args = getopt.getopt(sys.argv[1:], "h", ["make-pmlz"])
|
||||||
except getopt.GetoptError, err:
|
except getopt.GetoptError, err:
|
||||||
@@ -413,76 +499,29 @@ def main(argv=None):
|
|||||||
usage()
|
usage()
|
||||||
return 1
|
return 1
|
||||||
make_pmlz = False
|
make_pmlz = False
|
||||||
zipname = None
|
|
||||||
for o, a in opts:
|
for o, a in opts:
|
||||||
if o == "-h":
|
if o == "-h":
|
||||||
usage()
|
usage()
|
||||||
return 0
|
return 0
|
||||||
elif o == "--make-pmlz":
|
elif o == "--make-pmlz":
|
||||||
make_pmlz = True
|
make_pmlz = True
|
||||||
zipname = ''
|
|
||||||
|
|
||||||
print "eRdr2Pml v%s. Copyright (c) 2009 The Dark Reverser" % __version__
|
print "eRdr2Pml v%s. Copyright (c) 2009 The Dark Reverser" % __version__
|
||||||
|
|
||||||
if len(args)!=3 and len(args)!=4:
|
if len(args)!=3 and len(args)!=4:
|
||||||
usage()
|
usage()
|
||||||
return 1
|
return 1
|
||||||
else:
|
|
||||||
if len(args)==3:
|
|
||||||
infile, name, cc = args[0], args[1], args[2]
|
|
||||||
outdir = infile[:-4] + '_Source'
|
|
||||||
elif len(args)==4:
|
|
||||||
infile, outdir, name, cc = args[0], args[1], args[2], args[3]
|
|
||||||
|
|
||||||
if make_pmlz :
|
if len(args)==3:
|
||||||
# ignore specified outdir, use tempdir instead
|
infile, name, cc = args[0], args[1], args[2]
|
||||||
outdir = tempfile.mkdtemp()
|
outdir = infile[:-4] + '_Source'
|
||||||
|
elif len(args)==4:
|
||||||
|
infile, outdir, name, cc = args[0], args[1], args[2], args[3]
|
||||||
|
|
||||||
bookname = os.path.splitext(os.path.basename(infile))[0]
|
return decryptBook(infile, outdir, name, cc, make_pmlz)
|
||||||
|
|
||||||
try:
|
|
||||||
print "Processing..."
|
|
||||||
import time
|
|
||||||
start_time = time.time()
|
|
||||||
convertEreaderToPml(infile, name, cc, outdir)
|
|
||||||
|
|
||||||
if make_pmlz :
|
|
||||||
import zipfile
|
|
||||||
import shutil
|
|
||||||
print " Creating PMLZ file"
|
|
||||||
zipname = infile[:-4] + '.pmlz'
|
|
||||||
myZipFile = zipfile.ZipFile(zipname,'w',zipfile.ZIP_STORED, False)
|
|
||||||
list = os.listdir(outdir)
|
|
||||||
for file in list:
|
|
||||||
localname = file
|
|
||||||
filePath = os.path.join(outdir,file)
|
|
||||||
if os.path.isfile(filePath):
|
|
||||||
myZipFile.write(filePath, localname)
|
|
||||||
elif os.path.isdir(filePath):
|
|
||||||
imageList = os.listdir(filePath)
|
|
||||||
localimgdir = os.path.basename(filePath)
|
|
||||||
for image in imageList:
|
|
||||||
localname = os.path.join(localimgdir,image)
|
|
||||||
imagePath = os.path.join(filePath,image)
|
|
||||||
if os.path.isfile(imagePath):
|
|
||||||
myZipFile.write(imagePath, localname)
|
|
||||||
myZipFile.close()
|
|
||||||
# remove temporary directory
|
|
||||||
shutil.rmtree(outdir, True)
|
|
||||||
|
|
||||||
end_time = time.time()
|
|
||||||
search_time = end_time - start_time
|
|
||||||
print 'elapsed time: %.2f seconds' % (search_time, )
|
|
||||||
if make_pmlz :
|
|
||||||
print 'output is %s' % zipname
|
|
||||||
else :
|
|
||||||
print 'output in %s' % outdir
|
|
||||||
print "done"
|
|
||||||
except ValueError, e:
|
|
||||||
print "Error: %s" % e
|
|
||||||
return 1
|
|
||||||
return 0
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
sys.stdout=Unbuffered(sys.stdout)
|
||||||
sys.exit(main())
|
sys.exit(main())
|
||||||
|
|
||||||
|
|||||||
Binary file not shown.
@@ -4,7 +4,7 @@
|
|||||||
# Released under the terms of the GNU General Public Licence, version 3 or
|
# Released under the terms of the GNU General Public Licence, version 3 or
|
||||||
# later. <http://www.gnu.org/licenses/>
|
# later. <http://www.gnu.org/licenses/>
|
||||||
#
|
#
|
||||||
# Requires Calibre version 0.6.44 or higher.
|
# Requires Calibre version 0.7.55 or higher.
|
||||||
#
|
#
|
||||||
# All credit given to I <3 Cabbages for the original standalone scripts.
|
# All credit given to I <3 Cabbages for the original standalone scripts.
|
||||||
# I had the much easier job of converting them to Calibre a plugin.
|
# I had the much easier job of converting them to Calibre a plugin.
|
||||||
@@ -46,6 +46,9 @@
|
|||||||
# - Incorporated SomeUpdates zipfix routine.
|
# - Incorporated SomeUpdates zipfix routine.
|
||||||
# 0.1.2 - bug fix for non-ascii file names in encryption.xml
|
# 0.1.2 - bug fix for non-ascii file names in encryption.xml
|
||||||
# 0.1.3 - Try PyCrypto on Windows first
|
# 0.1.3 - Try PyCrypto on Windows first
|
||||||
|
# 0.1.4 - update zipfix to deal with mimetype not in correct place
|
||||||
|
# 0.1.5 - update zipfix to deal with completely missing mimetype files
|
||||||
|
# 0.1.6 - update ot the new calibre plugin interface
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Decrypt Barnes & Noble ADEPT encrypted EPUB books.
|
Decrypt Barnes & Noble ADEPT encrypted EPUB books.
|
||||||
@@ -264,6 +267,7 @@ def plugin_main(userkey, inpath, outpath):
|
|||||||
return 0
|
return 0
|
||||||
|
|
||||||
from calibre.customize import FileTypePlugin
|
from calibre.customize import FileTypePlugin
|
||||||
|
from calibre.constants import iswindows, isosx
|
||||||
|
|
||||||
class IgnobleDeDRM(FileTypePlugin):
|
class IgnobleDeDRM(FileTypePlugin):
|
||||||
name = 'Ignoble Epub DeDRM'
|
name = 'Ignoble Epub DeDRM'
|
||||||
@@ -271,8 +275,8 @@ class IgnobleDeDRM(FileTypePlugin):
|
|||||||
Credit given to I <3 Cabbages for the original stand-alone scripts.'
|
Credit given to I <3 Cabbages for the original stand-alone scripts.'
|
||||||
supported_platforms = ['linux', 'osx', 'windows']
|
supported_platforms = ['linux', 'osx', 'windows']
|
||||||
author = 'DiapDealer'
|
author = 'DiapDealer'
|
||||||
version = (0, 1, 3)
|
version = (0, 1, 6)
|
||||||
minimum_calibre_version = (0, 6, 44) # Compiled python libraries cannot be imported in earlier versions.
|
minimum_calibre_version = (0, 7, 55) # Compiled python libraries cannot be imported in earlier versions.
|
||||||
file_types = set(['epub'])
|
file_types = set(['epub'])
|
||||||
on_import = True
|
on_import = True
|
||||||
|
|
||||||
@@ -280,10 +284,6 @@ class IgnobleDeDRM(FileTypePlugin):
|
|||||||
global AES
|
global AES
|
||||||
global AES2
|
global AES2
|
||||||
|
|
||||||
from calibre.gui2 import is_ok_to_use_qt
|
|
||||||
from PyQt4.Qt import QMessageBox
|
|
||||||
from calibre.constants import iswindows, isosx
|
|
||||||
|
|
||||||
AES, AES2 = _load_crypto()
|
AES, AES2 = _load_crypto()
|
||||||
|
|
||||||
if AES == None or AES2 == None:
|
if AES == None or AES2 == None:
|
||||||
@@ -339,7 +339,7 @@ class IgnobleDeDRM(FileTypePlugin):
|
|||||||
for userkey in userkeys:
|
for userkey in userkeys:
|
||||||
# Create a TemporaryPersistent file to work with.
|
# Create a TemporaryPersistent file to work with.
|
||||||
# Check original epub archive for zip errors.
|
# Check original epub archive for zip errors.
|
||||||
import zipfix
|
from calibre_plugins.ignobleepub import zipfix
|
||||||
inf = self.temporary_file('.epub')
|
inf = self.temporary_file('.epub')
|
||||||
try:
|
try:
|
||||||
fr = zipfix.fixZip(path_to_ebook, inf.name)
|
fr = zipfix.fixZip(path_to_ebook, inf.name)
|
||||||
|
|||||||
@@ -13,9 +13,20 @@ _FILENAME_LEN_OFFSET = 26
|
|||||||
_EXTRA_LEN_OFFSET = 28
|
_EXTRA_LEN_OFFSET = 28
|
||||||
_FILENAME_OFFSET = 30
|
_FILENAME_OFFSET = 30
|
||||||
_MAX_SIZE = 64 * 1024
|
_MAX_SIZE = 64 * 1024
|
||||||
|
_MIMETYPE = 'application/epub+zip'
|
||||||
|
|
||||||
|
class ZipInfo(zipfile.ZipInfo):
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
if 'compress_type' in kwargs:
|
||||||
|
compress_type = kwargs.pop('compress_type')
|
||||||
|
super(ZipInfo, self).__init__(*args, **kwargs)
|
||||||
|
self.compress_type = compress_type
|
||||||
|
|
||||||
class fixZip:
|
class fixZip:
|
||||||
def __init__(self, zinput, zoutput):
|
def __init__(self, zinput, zoutput):
|
||||||
|
self.ztype = 'zip'
|
||||||
|
if zinput.lower().find('.epub') >= 0 :
|
||||||
|
self.ztype = 'epub'
|
||||||
self.inzip = zipfile.ZipFile(zinput,'r')
|
self.inzip = zipfile.ZipFile(zinput,'r')
|
||||||
self.outzip = zipfile.ZipFile(zoutput,'w')
|
self.outzip = zipfile.ZipFile(zoutput,'w')
|
||||||
# open the input zip for reading only as a raw file
|
# open the input zip for reading only as a raw file
|
||||||
@@ -82,22 +93,28 @@ class fixZip:
|
|||||||
# and copy member over to output archive
|
# and copy member over to output archive
|
||||||
# if problems exist with local vs central filename, fix them
|
# if problems exist with local vs central filename, fix them
|
||||||
|
|
||||||
for i, zinfo in enumerate(self.inzip.infolist()):
|
# if epub write mimetype file first, with no compression
|
||||||
data = None
|
if self.ztype == 'epub':
|
||||||
nzinfo = zinfo
|
nzinfo = ZipInfo('mimetype', compress_type=zipfile.ZIP_STORED)
|
||||||
|
self.outzip.writestr(nzinfo, _MIMETYPE)
|
||||||
|
|
||||||
try:
|
# write the rest of the files
|
||||||
data = self.inzip.read(zinfo)
|
for zinfo in self.inzip.infolist():
|
||||||
except zipfile.BadZipfile or zipfile.error:
|
if zinfo.filename != "mimetype" or self.ztype == '.zip':
|
||||||
local_name = self.getlocalname(zinfo)
|
data = None
|
||||||
data = self.getfiledata(zinfo)
|
nzinfo = zinfo
|
||||||
nzinfo.filename = local_name
|
try:
|
||||||
|
data = self.inzip.read(zinfo.filename)
|
||||||
|
except zipfile.BadZipfile or zipfile.error:
|
||||||
|
local_name = self.getlocalname(zinfo)
|
||||||
|
data = self.getfiledata(zinfo)
|
||||||
|
nzinfo.filename = local_name
|
||||||
|
|
||||||
nzinfo.date_time = zinfo.date_time
|
nzinfo.date_time = zinfo.date_time
|
||||||
nzinfo.compress_type = zinfo.compress_type
|
nzinfo.compress_type = zinfo.compress_type
|
||||||
nzinfo.flag_bits = 0
|
nzinfo.flag_bits = 0
|
||||||
nzinfo.internal_attr = 0
|
nzinfo.internal_attr = 0
|
||||||
self.outzip.writestr(nzinfo,data)
|
self.outzip.writestr(nzinfo,data)
|
||||||
|
|
||||||
self.bzf.close()
|
self.bzf.close()
|
||||||
self.inzip.close()
|
self.inzip.close()
|
||||||
@@ -111,14 +128,7 @@ def usage():
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
def main(argv=sys.argv):
|
def repairBook(infile, outfile):
|
||||||
if len(argv)!=3:
|
|
||||||
usage()
|
|
||||||
return 1
|
|
||||||
infile = None
|
|
||||||
outfile = None
|
|
||||||
infile = argv[1]
|
|
||||||
outfile = argv[2]
|
|
||||||
if not os.path.exists(infile):
|
if not os.path.exists(infile):
|
||||||
print "Error: Input Zip File does not exist"
|
print "Error: Input Zip File does not exist"
|
||||||
return 1
|
return 1
|
||||||
@@ -130,6 +140,16 @@ def main(argv=sys.argv):
|
|||||||
print "Error Occurred ", e
|
print "Error Occurred ", e
|
||||||
return 2
|
return 2
|
||||||
|
|
||||||
|
|
||||||
|
def main(argv=sys.argv):
|
||||||
|
if len(argv)!=3:
|
||||||
|
usage()
|
||||||
|
return 1
|
||||||
|
infile = argv[1]
|
||||||
|
outfile = argv[2]
|
||||||
|
return repairBook(infile, outfile)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__' :
|
if __name__ == '__main__' :
|
||||||
sys.exit(main())
|
sys.exit(main())
|
||||||
|
|
||||||
|
|||||||
Binary file not shown.
@@ -4,7 +4,7 @@
|
|||||||
# Released under the terms of the GNU General Public Licence, version 3 or
|
# Released under the terms of the GNU General Public Licence, version 3 or
|
||||||
# later. <http://www.gnu.org/licenses/>
|
# later. <http://www.gnu.org/licenses/>
|
||||||
#
|
#
|
||||||
# Requires Calibre version 0.6.44 or higher.
|
# Requires Calibre version 0.7.55 or higher.
|
||||||
#
|
#
|
||||||
# All credit given to I <3 Cabbages for the original standalone scripts.
|
# All credit given to I <3 Cabbages for the original standalone scripts.
|
||||||
# I had the much easier job of converting them to a Calibre plugin.
|
# I had the much easier job of converting them to a Calibre plugin.
|
||||||
@@ -47,7 +47,9 @@
|
|||||||
# result of Calibre changing to python 2.7.
|
# result of Calibre changing to python 2.7.
|
||||||
# 0.1.3 - bug fix for epubs with non-ascii chars in file names
|
# 0.1.3 - bug fix for epubs with non-ascii chars in file names
|
||||||
# 0.1.4 - default to try PyCrypto first on Windows
|
# 0.1.4 - default to try PyCrypto first on Windows
|
||||||
|
# 0.1.5 - update zipfix to handle out of position mimetypes
|
||||||
|
# 0.1.6 - update zipfix to handle completely missing mimetype files
|
||||||
|
# 0.1.7 - update to new calibre plugin interface
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Decrypt Adobe ADEPT-encrypted EPUB books.
|
Decrypt Adobe ADEPT-encrypted EPUB books.
|
||||||
@@ -364,6 +366,7 @@ def plugin_main(userkey, inpath, outpath):
|
|||||||
return 0
|
return 0
|
||||||
|
|
||||||
from calibre.customize import FileTypePlugin
|
from calibre.customize import FileTypePlugin
|
||||||
|
from calibre.constants import iswindows, isosx
|
||||||
|
|
||||||
class IneptDeDRM(FileTypePlugin):
|
class IneptDeDRM(FileTypePlugin):
|
||||||
name = 'Inept Epub DeDRM'
|
name = 'Inept Epub DeDRM'
|
||||||
@@ -371,8 +374,8 @@ class IneptDeDRM(FileTypePlugin):
|
|||||||
Credit given to I <3 Cabbages for the original stand-alone scripts.'
|
Credit given to I <3 Cabbages for the original stand-alone scripts.'
|
||||||
supported_platforms = ['linux', 'osx', 'windows']
|
supported_platforms = ['linux', 'osx', 'windows']
|
||||||
author = 'DiapDealer'
|
author = 'DiapDealer'
|
||||||
version = (0, 1, 4)
|
version = (0, 1, 7)
|
||||||
minimum_calibre_version = (0, 6, 44) # Compiled python libraries cannot be imported in earlier versions.
|
minimum_calibre_version = (0, 7, 55) # Compiled python libraries cannot be imported in earlier versions.
|
||||||
file_types = set(['epub'])
|
file_types = set(['epub'])
|
||||||
on_import = True
|
on_import = True
|
||||||
priority = 100
|
priority = 100
|
||||||
@@ -381,10 +384,6 @@ class IneptDeDRM(FileTypePlugin):
|
|||||||
global AES
|
global AES
|
||||||
global RSA
|
global RSA
|
||||||
|
|
||||||
from calibre.gui2 import is_ok_to_use_qt
|
|
||||||
from PyQt4.Qt import QMessageBox
|
|
||||||
from calibre.constants import iswindows, isosx
|
|
||||||
|
|
||||||
AES, RSA = _load_crypto()
|
AES, RSA = _load_crypto()
|
||||||
|
|
||||||
if AES == None or RSA == None:
|
if AES == None or RSA == None:
|
||||||
@@ -417,7 +416,7 @@ class IneptDeDRM(FileTypePlugin):
|
|||||||
# Calibre's configuration directory for future use.
|
# Calibre's configuration directory for future use.
|
||||||
if iswindows or isosx:
|
if iswindows or isosx:
|
||||||
# ADE key retrieval script included in respective OS folder.
|
# ADE key retrieval script included in respective OS folder.
|
||||||
from ade_key import retrieve_key
|
from calibre_plugins.ineptepub.ade_key import retrieve_key
|
||||||
try:
|
try:
|
||||||
keydata = retrieve_key()
|
keydata = retrieve_key()
|
||||||
userkeys.append(keydata)
|
userkeys.append(keydata)
|
||||||
@@ -438,7 +437,7 @@ class IneptDeDRM(FileTypePlugin):
|
|||||||
for userkey in userkeys:
|
for userkey in userkeys:
|
||||||
# Create a TemporaryPersistent file to work with.
|
# Create a TemporaryPersistent file to work with.
|
||||||
# Check original epub archive for zip errors.
|
# Check original epub archive for zip errors.
|
||||||
import zipfix
|
from calibre_plugins.ineptepub import zipfix
|
||||||
inf = self.temporary_file('.epub')
|
inf = self.temporary_file('.epub')
|
||||||
try:
|
try:
|
||||||
fr = zipfix.fixZip(path_to_ebook, inf.name)
|
fr = zipfix.fixZip(path_to_ebook, inf.name)
|
||||||
@@ -13,9 +13,20 @@ _FILENAME_LEN_OFFSET = 26
|
|||||||
_EXTRA_LEN_OFFSET = 28
|
_EXTRA_LEN_OFFSET = 28
|
||||||
_FILENAME_OFFSET = 30
|
_FILENAME_OFFSET = 30
|
||||||
_MAX_SIZE = 64 * 1024
|
_MAX_SIZE = 64 * 1024
|
||||||
|
_MIMETYPE = 'application/epub+zip'
|
||||||
|
|
||||||
|
class ZipInfo(zipfile.ZipInfo):
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
if 'compress_type' in kwargs:
|
||||||
|
compress_type = kwargs.pop('compress_type')
|
||||||
|
super(ZipInfo, self).__init__(*args, **kwargs)
|
||||||
|
self.compress_type = compress_type
|
||||||
|
|
||||||
class fixZip:
|
class fixZip:
|
||||||
def __init__(self, zinput, zoutput):
|
def __init__(self, zinput, zoutput):
|
||||||
|
self.ztype = 'zip'
|
||||||
|
if zinput.lower().find('.epub') >= 0 :
|
||||||
|
self.ztype = 'epub'
|
||||||
self.inzip = zipfile.ZipFile(zinput,'r')
|
self.inzip = zipfile.ZipFile(zinput,'r')
|
||||||
self.outzip = zipfile.ZipFile(zoutput,'w')
|
self.outzip = zipfile.ZipFile(zoutput,'w')
|
||||||
# open the input zip for reading only as a raw file
|
# open the input zip for reading only as a raw file
|
||||||
@@ -82,22 +93,28 @@ class fixZip:
|
|||||||
# and copy member over to output archive
|
# and copy member over to output archive
|
||||||
# if problems exist with local vs central filename, fix them
|
# if problems exist with local vs central filename, fix them
|
||||||
|
|
||||||
for i, zinfo in enumerate(self.inzip.infolist()):
|
# if epub write mimetype file first, with no compression
|
||||||
data = None
|
if self.ztype == 'epub':
|
||||||
nzinfo = zinfo
|
nzinfo = ZipInfo('mimetype', compress_type=zipfile.ZIP_STORED)
|
||||||
|
self.outzip.writestr(nzinfo, _MIMETYPE)
|
||||||
|
|
||||||
try:
|
# write the rest of the files
|
||||||
data = self.inzip.read(zinfo)
|
for zinfo in self.inzip.infolist():
|
||||||
except zipfile.BadZipfile or zipfile.error:
|
if zinfo.filename != "mimetype" or self.ztype == '.zip':
|
||||||
local_name = self.getlocalname(zinfo)
|
data = None
|
||||||
data = self.getfiledata(zinfo)
|
nzinfo = zinfo
|
||||||
nzinfo.filename = local_name
|
try:
|
||||||
|
data = self.inzip.read(zinfo.filename)
|
||||||
|
except zipfile.BadZipfile or zipfile.error:
|
||||||
|
local_name = self.getlocalname(zinfo)
|
||||||
|
data = self.getfiledata(zinfo)
|
||||||
|
nzinfo.filename = local_name
|
||||||
|
|
||||||
nzinfo.date_time = zinfo.date_time
|
nzinfo.date_time = zinfo.date_time
|
||||||
nzinfo.compress_type = zinfo.compress_type
|
nzinfo.compress_type = zinfo.compress_type
|
||||||
nzinfo.flag_bits = 0
|
nzinfo.flag_bits = 0
|
||||||
nzinfo.internal_attr = 0
|
nzinfo.internal_attr = 0
|
||||||
self.outzip.writestr(nzinfo,data)
|
self.outzip.writestr(nzinfo,data)
|
||||||
|
|
||||||
self.bzf.close()
|
self.bzf.close()
|
||||||
self.inzip.close()
|
self.inzip.close()
|
||||||
@@ -111,14 +128,7 @@ def usage():
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
def main(argv=sys.argv):
|
def repairBook(infile, outfile):
|
||||||
if len(argv)!=3:
|
|
||||||
usage()
|
|
||||||
return 1
|
|
||||||
infile = None
|
|
||||||
outfile = None
|
|
||||||
infile = argv[1]
|
|
||||||
outfile = argv[2]
|
|
||||||
if not os.path.exists(infile):
|
if not os.path.exists(infile):
|
||||||
print "Error: Input Zip File does not exist"
|
print "Error: Input Zip File does not exist"
|
||||||
return 1
|
return 1
|
||||||
@@ -130,6 +140,16 @@ def main(argv=sys.argv):
|
|||||||
print "Error Occurred ", e
|
print "Error Occurred ", e
|
||||||
return 2
|
return 2
|
||||||
|
|
||||||
|
|
||||||
|
def main(argv=sys.argv):
|
||||||
|
if len(argv)!=3:
|
||||||
|
usage()
|
||||||
|
return 1
|
||||||
|
infile = argv[1]
|
||||||
|
outfile = argv[2]
|
||||||
|
return repairBook(infile, outfile)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__' :
|
if __name__ == '__main__' :
|
||||||
sys.exit(main())
|
sys.exit(main())
|
||||||
|
|
||||||
|
|||||||
Binary file not shown.
@@ -1,6 +1,6 @@
|
|||||||
#! /usr/bin/env python
|
#! /usr/bin/env python
|
||||||
|
|
||||||
# ineptpdf_plugin.py
|
# ineptpdf plugin __init__.py
|
||||||
# Released under the terms of the GNU General Public Licence, version 3 or
|
# Released under the terms of the GNU General Public Licence, version 3 or
|
||||||
# later. <http://www.gnu.org/licenses/>
|
# later. <http://www.gnu.org/licenses/>
|
||||||
|
|
||||||
@@ -11,7 +11,7 @@
|
|||||||
# be able to read OUR books on whatever device we want and to keep
|
# be able to read OUR books on whatever device we want and to keep
|
||||||
# readable for a long, long time
|
# readable for a long, long time
|
||||||
|
|
||||||
# Requires Calibre version 0.6.44 or higher.
|
# Requires Calibre version 0.7.55 or higher.
|
||||||
#
|
#
|
||||||
# All credit given to I <3 Cabbages for the original standalone scripts.
|
# All credit given to I <3 Cabbages for the original standalone scripts.
|
||||||
# I had the much easier job of converting them to a Calibre plugin.
|
# I had the much easier job of converting them to a Calibre plugin.
|
||||||
@@ -47,7 +47,11 @@
|
|||||||
# ** NOTE ** There is no plugin customization data for the Inept PDF DeDRM plugin.
|
# ** NOTE ** There is no plugin customization data for the Inept PDF DeDRM plugin.
|
||||||
#
|
#
|
||||||
# Revision history:
|
# Revision history:
|
||||||
# 0.1 - Initial release
|
# 0.1 - Initial release
|
||||||
|
# 0.1.1 - back port ineptpdf 8.4.X support for increased number of encryption methods
|
||||||
|
# 0.1.2 - back port ineptpdf 8.4.X bug fixes
|
||||||
|
# 0.1.3 - add in fix for improper rejection of session bookkeys with len(bookkey) = length + 1
|
||||||
|
# 0.1.4 - update to the new calibre plugin interface
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Decrypts Adobe ADEPT-encrypted PDF files.
|
Decrypts Adobe ADEPT-encrypted PDF files.
|
||||||
@@ -171,6 +175,7 @@ def _load_crypto_libcrypto():
|
|||||||
return out.raw
|
return out.raw
|
||||||
|
|
||||||
class AES(object):
|
class AES(object):
|
||||||
|
MODE_CBC = 0
|
||||||
@classmethod
|
@classmethod
|
||||||
def new(cls, userkey, mode, iv):
|
def new(cls, userkey, mode, iv):
|
||||||
self = AES()
|
self = AES()
|
||||||
@@ -1544,16 +1549,30 @@ class PDFDocument(object):
|
|||||||
bookkey = bookkey[index:]
|
bookkey = bookkey[index:]
|
||||||
ebx_V = int_value(param.get('V', 4))
|
ebx_V = int_value(param.get('V', 4))
|
||||||
ebx_type = int_value(param.get('EBX_ENCRYPTIONTYPE', 6))
|
ebx_type = int_value(param.get('EBX_ENCRYPTIONTYPE', 6))
|
||||||
# added because of the booktype / decryption book session key error
|
# added because of improper booktype / decryption book session key errors
|
||||||
if ebx_V == 3:
|
if length > 0:
|
||||||
V = 3
|
if len(bookkey) == length:
|
||||||
elif ebx_V < 4 or ebx_type < 6:
|
if ebx_V == 3:
|
||||||
V = ord(bookkey[0])
|
V = 3
|
||||||
bookkey = bookkey[1:]
|
else:
|
||||||
|
V = 2
|
||||||
|
elif len(bookkey) == length + 1:
|
||||||
|
V = ord(bookkey[0])
|
||||||
|
bookkey = bookkey[1:]
|
||||||
|
else:
|
||||||
|
print "ebx_V is %d and ebx_type is %d" % (ebx_V, ebx_type)
|
||||||
|
print "length is %d and len(bookkey) is %d" % (length, len(bookkey))
|
||||||
|
print "bookkey[0] is %d" % ord(bookkey[0])
|
||||||
|
raise ADEPTError('error decrypting book session key - mismatched length')
|
||||||
else:
|
else:
|
||||||
V = 2
|
# proper length unknown try with whatever you have
|
||||||
if length and len(bookkey) != length:
|
print "ebx_V is %d and ebx_type is %d" % (ebx_V, ebx_type)
|
||||||
raise ADEPTError('error decrypting book session key')
|
print "length is %d and len(bookkey) is %d" % (length, len(bookkey))
|
||||||
|
print "bookkey[0] is %d" % ord(bookkey[0])
|
||||||
|
if ebx_V == 3:
|
||||||
|
V = 3
|
||||||
|
else:
|
||||||
|
V = 2
|
||||||
self.decrypt_key = bookkey
|
self.decrypt_key = bookkey
|
||||||
self.genkey = self.genkey_v3 if V == 3 else self.genkey_v2
|
self.genkey = self.genkey_v3 if V == 3 else self.genkey_v2
|
||||||
self.decipher = self.decrypt_rc4
|
self.decipher = self.decrypt_rc4
|
||||||
@@ -2109,6 +2128,7 @@ def plugin_main(keypath, inpath, outpath):
|
|||||||
|
|
||||||
|
|
||||||
from calibre.customize import FileTypePlugin
|
from calibre.customize import FileTypePlugin
|
||||||
|
from calibre.constants import iswindows, isosx
|
||||||
|
|
||||||
class IneptPDFDeDRM(FileTypePlugin):
|
class IneptPDFDeDRM(FileTypePlugin):
|
||||||
name = 'Inept PDF DeDRM'
|
name = 'Inept PDF DeDRM'
|
||||||
@@ -2116,17 +2136,14 @@ class IneptPDFDeDRM(FileTypePlugin):
|
|||||||
Credit given to I <3 Cabbages for the original stand-alone scripts.'
|
Credit given to I <3 Cabbages for the original stand-alone scripts.'
|
||||||
supported_platforms = ['linux', 'osx', 'windows']
|
supported_platforms = ['linux', 'osx', 'windows']
|
||||||
author = 'DiapDealer'
|
author = 'DiapDealer'
|
||||||
version = (0, 1, 2)
|
version = (0, 1, 4)
|
||||||
minimum_calibre_version = (0, 6, 44) # Compiled python libraries cannot be imported in earlier versions.
|
minimum_calibre_version = (0, 7, 55) # for the new plugin interface
|
||||||
file_types = set(['pdf'])
|
file_types = set(['pdf'])
|
||||||
on_import = True
|
on_import = True
|
||||||
|
|
||||||
def run(self, path_to_ebook):
|
def run(self, path_to_ebook):
|
||||||
global ARC4, RSA, AES
|
global ARC4, RSA, AES
|
||||||
|
|
||||||
from calibre.gui2 import is_ok_to_use_qt
|
|
||||||
from PyQt4.Qt import QMessageBox
|
|
||||||
from calibre.constants import iswindows, isosx
|
|
||||||
|
|
||||||
ARC4, RSA, AES = _load_crypto()
|
ARC4, RSA, AES = _load_crypto()
|
||||||
|
|
||||||
@@ -2160,7 +2177,7 @@ class IneptPDFDeDRM(FileTypePlugin):
|
|||||||
# Calibre's configuration directory for future use.
|
# Calibre's configuration directory for future use.
|
||||||
if iswindows or isosx:
|
if iswindows or isosx:
|
||||||
# ADE key retrieval script.
|
# ADE key retrieval script.
|
||||||
from ade_key import retrieve_key
|
from calibre_plugins.ineptpdf.ade_key import retrieve_key
|
||||||
try:
|
try:
|
||||||
keydata = retrieve_key()
|
keydata = retrieve_key()
|
||||||
userkeys.append(keydata)
|
userkeys.append(keydata)
|
||||||
Binary file not shown.
@@ -1,334 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
|
|
||||||
# engine to remove drm from Kindle for Mac and Kindle for PC books
|
|
||||||
# for personal use for archiving and converting your ebooks
|
|
||||||
|
|
||||||
# PLEASE DO NOT PIRATE EBOOKS!
|
|
||||||
|
|
||||||
# We want all authors and publishers, and eBook stores to live
|
|
||||||
# long and prosperous lives but at the same time we just want to
|
|
||||||
# be able to read OUR books on whatever device we want and to keep
|
|
||||||
# readable for a long, long time
|
|
||||||
|
|
||||||
# This borrows very heavily from works by CMBDTC, IHeartCabbages, skindle,
|
|
||||||
# unswindle, DarkReverser, ApprenticeAlf, DiapDealer, some_updates
|
|
||||||
# and many many others
|
|
||||||
|
|
||||||
# It can run standalone to convert K4M/K4PC/Mobi files, or it can be installed as a
|
|
||||||
# plugin for Calibre (http://calibre-ebook.com/about) so that importing
|
|
||||||
# K4 or Mobi with DRM is no londer a multi-step process.
|
|
||||||
#
|
|
||||||
# ***NOTE*** If you are using this script as a calibre plugin for a K4M or K4PC ebook
|
|
||||||
# then calibre must be installed on the same machine and in the same account as K4PC or K4M
|
|
||||||
# for the plugin version to function properly.
|
|
||||||
#
|
|
||||||
# To create a Calibre plugin, rename this file so that the filename
|
|
||||||
# ends in '_plugin.py', put it into a ZIP file with all its supporting python routines
|
|
||||||
# and import that ZIP into Calibre using its plugin configuration GUI.
|
|
||||||
|
|
||||||
from __future__ import with_statement
|
|
||||||
|
|
||||||
__version__ = '1.9'
|
|
||||||
|
|
||||||
class Unbuffered:
|
|
||||||
def __init__(self, stream):
|
|
||||||
self.stream = stream
|
|
||||||
def write(self, data):
|
|
||||||
self.stream.write(data)
|
|
||||||
self.stream.flush()
|
|
||||||
def __getattr__(self, attr):
|
|
||||||
return getattr(self.stream, attr)
|
|
||||||
|
|
||||||
import sys
|
|
||||||
import os, csv, getopt
|
|
||||||
import binascii
|
|
||||||
import zlib
|
|
||||||
import re
|
|
||||||
import zlib, zipfile, tempfile, shutil
|
|
||||||
from struct import pack, unpack, unpack_from
|
|
||||||
|
|
||||||
class DrmException(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
if 'calibre' in sys.modules:
|
|
||||||
inCalibre = True
|
|
||||||
else:
|
|
||||||
inCalibre = False
|
|
||||||
|
|
||||||
def zipUpDir(myzip, tempdir,localname):
|
|
||||||
currentdir = tempdir
|
|
||||||
if localname != "":
|
|
||||||
currentdir = os.path.join(currentdir,localname)
|
|
||||||
list = os.listdir(currentdir)
|
|
||||||
for file in list:
|
|
||||||
afilename = file
|
|
||||||
localfilePath = os.path.join(localname, afilename)
|
|
||||||
realfilePath = os.path.join(currentdir,file)
|
|
||||||
if os.path.isfile(realfilePath):
|
|
||||||
myzip.write(realfilePath, localfilePath)
|
|
||||||
elif os.path.isdir(realfilePath):
|
|
||||||
zipUpDir(myzip, tempdir, localfilePath)
|
|
||||||
|
|
||||||
def usage(progname):
|
|
||||||
print "Removes DRM protection from K4PC/M, Kindle, Mobi and Topaz ebooks"
|
|
||||||
print "Usage:"
|
|
||||||
print " %s [-k <kindle.info>] [-p <pidnums>] [-s <kindleSerialNumbers>] <infile> <outdir> " % progname
|
|
||||||
|
|
||||||
#
|
|
||||||
# Main
|
|
||||||
#
|
|
||||||
def main(argv=sys.argv):
|
|
||||||
import mobidedrm
|
|
||||||
import topazextract
|
|
||||||
import kgenpids
|
|
||||||
progname = os.path.basename(argv[0])
|
|
||||||
|
|
||||||
k4 = False
|
|
||||||
kInfoFiles = []
|
|
||||||
serials = []
|
|
||||||
pids = []
|
|
||||||
|
|
||||||
print ('K4MobiDeDrm v%(__version__)s '
|
|
||||||
'provided by the work of many including DiapDealer, SomeUpdates, IHeartCabbages, CMBDTC, Skindle, DarkReverser, ApprenticeAlf, etc .' % globals())
|
|
||||||
|
|
||||||
print ' '
|
|
||||||
try:
|
|
||||||
opts, args = getopt.getopt(sys.argv[1:], "k:p:s:")
|
|
||||||
except getopt.GetoptError, err:
|
|
||||||
print str(err)
|
|
||||||
usage(progname)
|
|
||||||
sys.exit(2)
|
|
||||||
if len(args)<2:
|
|
||||||
usage(progname)
|
|
||||||
sys.exit(2)
|
|
||||||
|
|
||||||
for o, a in opts:
|
|
||||||
if o == "-k":
|
|
||||||
if a == None :
|
|
||||||
raise DrmException("Invalid parameter for -k")
|
|
||||||
kInfoFiles.append(a)
|
|
||||||
if o == "-p":
|
|
||||||
if a == None :
|
|
||||||
raise DrmException("Invalid parameter for -p")
|
|
||||||
pids = a.split(',')
|
|
||||||
if o == "-s":
|
|
||||||
if a == None :
|
|
||||||
raise DrmException("Invalid parameter for -s")
|
|
||||||
serials = a.split(',')
|
|
||||||
|
|
||||||
# try with built in Kindle Info files
|
|
||||||
k4 = True
|
|
||||||
|
|
||||||
infile = args[0]
|
|
||||||
outdir = args[1]
|
|
||||||
|
|
||||||
# handle the obvious cases at the beginning
|
|
||||||
if not os.path.isfile(infile):
|
|
||||||
print "Error: Input file does not exist"
|
|
||||||
return 1
|
|
||||||
|
|
||||||
mobi = True
|
|
||||||
magic3 = file(infile,'rb').read(3)
|
|
||||||
if magic3 == 'TPZ':
|
|
||||||
mobi = False
|
|
||||||
|
|
||||||
bookname = os.path.splitext(os.path.basename(infile))[0]
|
|
||||||
|
|
||||||
if mobi:
|
|
||||||
mb = mobidedrm.MobiBook(infile)
|
|
||||||
else:
|
|
||||||
tempdir = tempfile.mkdtemp()
|
|
||||||
mb = topazextract.TopazBook(infile, tempdir)
|
|
||||||
|
|
||||||
title = mb.getBookTitle()
|
|
||||||
print "Processing Book: ", title
|
|
||||||
|
|
||||||
# build pid list
|
|
||||||
md1, md2 = mb.getPIDMetaInfo()
|
|
||||||
pidlst = kgenpids.getPidList(md1, md2, k4, pids, serials, kInfoFiles)
|
|
||||||
|
|
||||||
try:
|
|
||||||
if mobi:
|
|
||||||
unlocked_file = mb.processBook(pidlst)
|
|
||||||
else:
|
|
||||||
mb.processBook(pidlst)
|
|
||||||
|
|
||||||
except mobidedrm.DrmException, e:
|
|
||||||
print " ... not suceessful " + str(e) + "\n"
|
|
||||||
return 1
|
|
||||||
except topazextract.TpzDRMError, e:
|
|
||||||
print str(e)
|
|
||||||
print " Creating DeBug Full Zip Archive of Book"
|
|
||||||
zipname = os.path.join(outdir, bookname + '_debug' + '.zip')
|
|
||||||
myzip = zipfile.ZipFile(zipname,'w',zipfile.ZIP_DEFLATED, False)
|
|
||||||
zipUpDir(myzip, tempdir, '')
|
|
||||||
myzip.close()
|
|
||||||
shutil.rmtree(tempdir, True)
|
|
||||||
return 1
|
|
||||||
|
|
||||||
if mobi:
|
|
||||||
outfile = os.path.join(outdir,bookname + '_nodrm' + '.azw')
|
|
||||||
file(outfile, 'wb').write(unlocked_file)
|
|
||||||
return 0
|
|
||||||
|
|
||||||
# topaz: build up zip archives of results
|
|
||||||
print " Creating HTML ZIP Archive"
|
|
||||||
zipname = os.path.join(outdir, bookname + '_nodrm' + '.zip')
|
|
||||||
myzip1 = zipfile.ZipFile(zipname,'w',zipfile.ZIP_DEFLATED, False)
|
|
||||||
myzip1.write(os.path.join(tempdir,'book.html'),'book.html')
|
|
||||||
myzip1.write(os.path.join(tempdir,'book.opf'),'book.opf')
|
|
||||||
if os.path.isfile(os.path.join(tempdir,'cover.jpg')):
|
|
||||||
myzip1.write(os.path.join(tempdir,'cover.jpg'),'cover.jpg')
|
|
||||||
myzip1.write(os.path.join(tempdir,'style.css'),'style.css')
|
|
||||||
zipUpDir(myzip1, tempdir, 'img')
|
|
||||||
myzip1.close()
|
|
||||||
|
|
||||||
print " Creating SVG ZIP Archive"
|
|
||||||
zipname = os.path.join(outdir, bookname + '_SVG' + '.zip')
|
|
||||||
myzip2 = zipfile.ZipFile(zipname,'w',zipfile.ZIP_DEFLATED, False)
|
|
||||||
myzip2.write(os.path.join(tempdir,'index_svg.xhtml'),'index_svg.xhtml')
|
|
||||||
zipUpDir(myzip2, tempdir, 'svg')
|
|
||||||
zipUpDir(myzip2, tempdir, 'img')
|
|
||||||
myzip2.close()
|
|
||||||
|
|
||||||
print " Creating XML ZIP Archive"
|
|
||||||
zipname = os.path.join(outdir, bookname + '_XML' + '.zip')
|
|
||||||
myzip3 = zipfile.ZipFile(zipname,'w',zipfile.ZIP_DEFLATED, False)
|
|
||||||
targetdir = os.path.join(tempdir,'xml')
|
|
||||||
zipUpDir(myzip3, targetdir, '')
|
|
||||||
zipUpDir(myzip3, tempdir, 'img')
|
|
||||||
myzip3.close()
|
|
||||||
|
|
||||||
shutil.rmtree(tempdir, True)
|
|
||||||
return 0
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
sys.stdout=Unbuffered(sys.stdout)
|
|
||||||
sys.exit(main())
|
|
||||||
|
|
||||||
if not __name__ == "__main__" and inCalibre:
|
|
||||||
from calibre.customize import FileTypePlugin
|
|
||||||
|
|
||||||
class K4DeDRM(FileTypePlugin):
|
|
||||||
name = 'K4PC, K4Mac, Mobi DeDRM' # Name of the plugin
|
|
||||||
description = 'Removes DRM from K4PC and Mac, Kindle Mobi and Topaz files. \
|
|
||||||
Provided by the work of many including DiapDealer, SomeUpdates, IHeartCabbages, CMBDTC, Skindle, DarkReverser, ApprenticeAlf, etc.'
|
|
||||||
supported_platforms = ['osx', 'windows', 'linux'] # Platforms this plugin will run on
|
|
||||||
author = 'DiapDealer, SomeUpdates' # The author of this plugin
|
|
||||||
version = (0, 1, 9) # The version number of this plugin
|
|
||||||
file_types = set(['prc','mobi','azw','azw1','tpz']) # The file types that this plugin will be applied to
|
|
||||||
on_import = True # Run this plugin during the import
|
|
||||||
priority = 210 # run this plugin before mobidedrm, k4pcdedrm, k4dedrm
|
|
||||||
|
|
||||||
def run(self, path_to_ebook):
|
|
||||||
from calibre.gui2 import is_ok_to_use_qt
|
|
||||||
from PyQt4.Qt import QMessageBox
|
|
||||||
from calibre.ptempfile import PersistentTemporaryDirectory
|
|
||||||
|
|
||||||
import kgenpids
|
|
||||||
import zlib
|
|
||||||
import zipfile
|
|
||||||
import topazextract
|
|
||||||
import mobidedrm
|
|
||||||
|
|
||||||
k4 = True
|
|
||||||
pids = []
|
|
||||||
serials = []
|
|
||||||
kInfoFiles = []
|
|
||||||
|
|
||||||
# Get supplied list of PIDs to try from plugin customization.
|
|
||||||
customvalues = self.site_customization.split(',')
|
|
||||||
for customvalue in customvalues:
|
|
||||||
customvalue = str(customvalue)
|
|
||||||
customvalue = customvalue.strip()
|
|
||||||
if len(customvalue) == 10 or len(customvalue) == 8:
|
|
||||||
pids.append(customvalue)
|
|
||||||
else :
|
|
||||||
if len(customvalue) == 16 and customvalue[0] == 'B':
|
|
||||||
serials.append(customvalue)
|
|
||||||
else:
|
|
||||||
print "%s is not a valid Kindle serial number or PID." % str(customvalue)
|
|
||||||
|
|
||||||
# Load any kindle info files (*.info) included Calibre's config directory.
|
|
||||||
try:
|
|
||||||
# Find Calibre's configuration directory.
|
|
||||||
confpath = os.path.split(os.path.split(self.plugin_path)[0])[0]
|
|
||||||
print 'K4MobiDeDRM: Calibre configuration directory = %s' % confpath
|
|
||||||
files = os.listdir(confpath)
|
|
||||||
filefilter = re.compile("\.info$", re.IGNORECASE)
|
|
||||||
files = filter(filefilter.search, files)
|
|
||||||
|
|
||||||
if files:
|
|
||||||
for filename in files:
|
|
||||||
fpath = os.path.join(confpath, filename)
|
|
||||||
kInfoFiles.append(fpath)
|
|
||||||
print 'K4MobiDeDRM: Kindle info file %s found in config folder.' % filename
|
|
||||||
except IOError:
|
|
||||||
print 'K4MobiDeDRM: Error reading kindle info files from config directory.'
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
mobi = True
|
|
||||||
magic3 = file(path_to_ebook,'rb').read(3)
|
|
||||||
if magic3 == 'TPZ':
|
|
||||||
mobi = False
|
|
||||||
|
|
||||||
bookname = os.path.splitext(os.path.basename(path_to_ebook))[0]
|
|
||||||
|
|
||||||
if mobi:
|
|
||||||
mb = mobidedrm.MobiBook(path_to_ebook)
|
|
||||||
else:
|
|
||||||
tempdir = PersistentTemporaryDirectory()
|
|
||||||
mb = topazextract.TopazBook(path_to_ebook, tempdir)
|
|
||||||
|
|
||||||
title = mb.getBookTitle()
|
|
||||||
md1, md2 = mb.getPIDMetaInfo()
|
|
||||||
pidlst = kgenpids.getPidList(md1, md2, k4, pids, serials, kInfoFiles)
|
|
||||||
|
|
||||||
try:
|
|
||||||
if mobi:
|
|
||||||
unlocked_file = mb.processBook(pidlst)
|
|
||||||
else:
|
|
||||||
mb.processBook(pidlst)
|
|
||||||
|
|
||||||
except mobidedrm.DrmException:
|
|
||||||
#if you reached here then no luck raise and exception
|
|
||||||
if is_ok_to_use_qt():
|
|
||||||
d = QMessageBox(QMessageBox.Warning, "K4MobiDeDRM Plugin", "Error decoding: %s\n" % path_to_ebook)
|
|
||||||
d.show()
|
|
||||||
d.raise_()
|
|
||||||
d.exec_()
|
|
||||||
raise Exception("K4MobiDeDRM plugin could not decode the file")
|
|
||||||
return ""
|
|
||||||
except topazextract.TpzDRMError:
|
|
||||||
#if you reached here then no luck raise and exception
|
|
||||||
if is_ok_to_use_qt():
|
|
||||||
d = QMessageBox(QMessageBox.Warning, "K4MobiDeDRM Plugin", "Error decoding: %s\n" % path_to_ebook)
|
|
||||||
d.show()
|
|
||||||
d.raise_()
|
|
||||||
d.exec_()
|
|
||||||
raise Exception("K4MobiDeDRM plugin could not decode the file")
|
|
||||||
return ""
|
|
||||||
|
|
||||||
print "Success!"
|
|
||||||
if mobi:
|
|
||||||
of = self.temporary_file(bookname+'.mobi')
|
|
||||||
of.write(unlocked_file)
|
|
||||||
of.close()
|
|
||||||
return of.name
|
|
||||||
|
|
||||||
# topaz: build up zip archives of results
|
|
||||||
print " Creating HTML ZIP Archive"
|
|
||||||
of = self.temporary_file(bookname + '.zip')
|
|
||||||
myzip = zipfile.ZipFile(of.name,'w',zipfile.ZIP_DEFLATED, False)
|
|
||||||
myzip.write(os.path.join(tempdir,'book.html'),'book.html')
|
|
||||||
myzip.write(os.path.join(tempdir,'book.opf'),'book.opf')
|
|
||||||
if os.path.isfile(os.path.join(tempdir,'cover.jpg')):
|
|
||||||
myzip.write(os.path.join(tempdir,'cover.jpg'),'cover.jpg')
|
|
||||||
myzip.write(os.path.join(tempdir,'style.css'),'style.css')
|
|
||||||
zipUpDir(myzip, tempdir, 'img')
|
|
||||||
myzip.close()
|
|
||||||
return of.name
|
|
||||||
|
|
||||||
def customization_help(self, gui=False):
|
|
||||||
return 'Enter 10 character PIDs and/or Kindle serial numbers, separated by commas.'
|
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -1,110 +1,843 @@
|
|||||||
# K4PC Windows specific routines
|
#! /usr/bin/python
|
||||||
|
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
|
||||||
|
# For use with Topaz Scripts Version 2.6
|
||||||
|
|
||||||
from __future__ import with_statement
|
class Unbuffered:
|
||||||
|
def __init__(self, stream):
|
||||||
|
self.stream = stream
|
||||||
|
def write(self, data):
|
||||||
|
self.stream.write(data)
|
||||||
|
self.stream.flush()
|
||||||
|
def __getattr__(self, attr):
|
||||||
|
return getattr(self.stream, attr)
|
||||||
|
|
||||||
import sys, os
|
import sys
|
||||||
|
sys.stdout=Unbuffered(sys.stdout)
|
||||||
|
|
||||||
from ctypes import windll, c_char_p, c_wchar_p, c_uint, POINTER, byref, \
|
import csv
|
||||||
create_unicode_buffer, create_string_buffer, CFUNCTYPE, addressof, \
|
import os
|
||||||
string_at, Structure, c_void_p, cast
|
import getopt
|
||||||
|
from struct import pack
|
||||||
|
from struct import unpack
|
||||||
|
|
||||||
import _winreg as winreg
|
class TpzDRMError(Exception):
|
||||||
|
|
||||||
import traceback
|
|
||||||
|
|
||||||
MAX_PATH = 255
|
|
||||||
|
|
||||||
kernel32 = windll.kernel32
|
|
||||||
advapi32 = windll.advapi32
|
|
||||||
crypt32 = windll.crypt32
|
|
||||||
|
|
||||||
|
|
||||||
# Various character maps used to decrypt books. Probably supposed to act as obfuscation
|
|
||||||
charMap1 = "n5Pr6St7Uv8Wx9YzAb0Cd1Ef2Gh3Jk4M"
|
|
||||||
charMap2 = "AaZzB0bYyCc1XxDdW2wEeVv3FfUuG4g-TtHh5SsIiR6rJjQq7KkPpL8lOoMm9Nn_"
|
|
||||||
charMap3 = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
|
|
||||||
charMap4 = "ABCDEFGHIJKLMNPQRSTUVWXYZ123456789"
|
|
||||||
|
|
||||||
class DrmException(Exception):
|
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
# Get a 7 bit encoded number from string. The most
|
||||||
|
# significant byte comes first and has the high bit (8th) set
|
||||||
|
|
||||||
class DataBlob(Structure):
|
def readEncodedNumber(file):
|
||||||
_fields_ = [('cbData', c_uint),
|
flag = False
|
||||||
('pbData', c_void_p)]
|
c = file.read(1)
|
||||||
DataBlob_p = POINTER(DataBlob)
|
if (len(c) == 0):
|
||||||
|
return None
|
||||||
|
data = ord(c)
|
||||||
|
|
||||||
|
if data == 0xFF:
|
||||||
|
flag = True
|
||||||
|
c = file.read(1)
|
||||||
|
if (len(c) == 0):
|
||||||
|
return None
|
||||||
|
data = ord(c)
|
||||||
|
|
||||||
|
if data >= 0x80:
|
||||||
|
datax = (data & 0x7F)
|
||||||
|
while data >= 0x80 :
|
||||||
|
c = file.read(1)
|
||||||
|
if (len(c) == 0):
|
||||||
|
return None
|
||||||
|
data = ord(c)
|
||||||
|
datax = (datax <<7) + (data & 0x7F)
|
||||||
|
data = datax
|
||||||
|
|
||||||
|
if flag:
|
||||||
|
data = -data
|
||||||
|
return data
|
||||||
|
|
||||||
|
|
||||||
def GetSystemDirectory():
|
# returns a binary string that encodes a number into 7 bits
|
||||||
GetSystemDirectoryW = kernel32.GetSystemDirectoryW
|
# most significant byte first which has the high bit set
|
||||||
GetSystemDirectoryW.argtypes = [c_wchar_p, c_uint]
|
|
||||||
GetSystemDirectoryW.restype = c_uint
|
|
||||||
def GetSystemDirectory():
|
|
||||||
buffer = create_unicode_buffer(MAX_PATH + 1)
|
|
||||||
GetSystemDirectoryW(buffer, len(buffer))
|
|
||||||
return buffer.value
|
|
||||||
return GetSystemDirectory
|
|
||||||
GetSystemDirectory = GetSystemDirectory()
|
|
||||||
|
|
||||||
def GetVolumeSerialNumber():
|
def encodeNumber(number):
|
||||||
GetVolumeInformationW = kernel32.GetVolumeInformationW
|
result = ""
|
||||||
GetVolumeInformationW.argtypes = [c_wchar_p, c_wchar_p, c_uint,
|
negative = False
|
||||||
POINTER(c_uint), POINTER(c_uint),
|
flag = 0
|
||||||
POINTER(c_uint), c_wchar_p, c_uint]
|
|
||||||
GetVolumeInformationW.restype = c_uint
|
if number < 0 :
|
||||||
def GetVolumeSerialNumber(path = GetSystemDirectory().split('\\')[0] + '\\'):
|
number = -number + 1
|
||||||
vsn = c_uint(0)
|
negative = True
|
||||||
GetVolumeInformationW(path, None, 0, byref(vsn), None, None, None, 0)
|
|
||||||
return str(vsn.value)
|
while True:
|
||||||
return GetVolumeSerialNumber
|
byte = number & 0x7F
|
||||||
GetVolumeSerialNumber = GetVolumeSerialNumber()
|
number = number >> 7
|
||||||
|
byte += flag
|
||||||
|
result += chr(byte)
|
||||||
|
flag = 0x80
|
||||||
|
if number == 0 :
|
||||||
|
if (byte == 0xFF and negative == False) :
|
||||||
|
result += chr(0x80)
|
||||||
|
break
|
||||||
|
|
||||||
|
if negative:
|
||||||
|
result += chr(0xFF)
|
||||||
|
|
||||||
|
return result[::-1]
|
||||||
|
|
||||||
|
|
||||||
def GetUserName():
|
|
||||||
GetUserNameW = advapi32.GetUserNameW
|
# create / read a length prefixed string from the file
|
||||||
GetUserNameW.argtypes = [c_wchar_p, POINTER(c_uint)]
|
|
||||||
GetUserNameW.restype = c_uint
|
def lengthPrefixString(data):
|
||||||
def GetUserName():
|
return encodeNumber(len(data))+data
|
||||||
buffer = create_unicode_buffer(32)
|
|
||||||
size = c_uint(len(buffer))
|
def readString(file):
|
||||||
while not GetUserNameW(buffer, byref(size)):
|
stringLength = readEncodedNumber(file)
|
||||||
buffer = create_unicode_buffer(len(buffer) * 2)
|
if (stringLength == None):
|
||||||
size.value = len(buffer)
|
return ""
|
||||||
return buffer.value.encode('utf-16-le')[::2]
|
sv = file.read(stringLength)
|
||||||
return GetUserName
|
if (len(sv) != stringLength):
|
||||||
GetUserName = GetUserName()
|
return ""
|
||||||
|
return unpack(str(stringLength)+"s",sv)[0]
|
||||||
|
|
||||||
|
|
||||||
def CryptUnprotectData():
|
# convert a binary string generated by encodeNumber (7 bit encoded number)
|
||||||
_CryptUnprotectData = crypt32.CryptUnprotectData
|
# to the value you would find inside the page*.dat files to be processed
|
||||||
_CryptUnprotectData.argtypes = [DataBlob_p, c_wchar_p, DataBlob_p,
|
|
||||||
c_void_p, c_void_p, c_uint, DataBlob_p]
|
def convert(i):
|
||||||
_CryptUnprotectData.restype = c_uint
|
result = ''
|
||||||
def CryptUnprotectData(indata, entropy):
|
val = encodeNumber(i)
|
||||||
indatab = create_string_buffer(indata)
|
for j in xrange(len(val)):
|
||||||
indata = DataBlob(len(indata), cast(indatab, c_void_p))
|
c = ord(val[j:j+1])
|
||||||
entropyb = create_string_buffer(entropy)
|
result += '%02x' % c
|
||||||
entropy = DataBlob(len(entropy), cast(entropyb, c_void_p))
|
return result
|
||||||
outdata = DataBlob()
|
|
||||||
if not _CryptUnprotectData(byref(indata), None, byref(entropy),
|
|
||||||
None, None, 0, byref(outdata)):
|
|
||||||
raise DrmException("Failed to Unprotect Data")
|
# the complete string table used to store all book text content
|
||||||
return string_at(outdata.pbData, outdata.cbData)
|
# as well as the xml tokens and values that make sense out of it
|
||||||
return CryptUnprotectData
|
|
||||||
CryptUnprotectData = CryptUnprotectData()
|
class Dictionary(object):
|
||||||
|
def __init__(self, dictFile):
|
||||||
|
self.filename = dictFile
|
||||||
|
self.size = 0
|
||||||
|
self.fo = file(dictFile,'rb')
|
||||||
|
self.stable = []
|
||||||
|
self.size = readEncodedNumber(self.fo)
|
||||||
|
for i in xrange(self.size):
|
||||||
|
self.stable.append(self.escapestr(readString(self.fo)))
|
||||||
|
self.pos = 0
|
||||||
|
|
||||||
|
def escapestr(self, str):
|
||||||
|
str = str.replace('&','&')
|
||||||
|
str = str.replace('<','<')
|
||||||
|
str = str.replace('>','>')
|
||||||
|
str = str.replace('=','=')
|
||||||
|
return str
|
||||||
|
|
||||||
|
def lookup(self,val):
|
||||||
|
if ((val >= 0) and (val < self.size)) :
|
||||||
|
self.pos = val
|
||||||
|
return self.stable[self.pos]
|
||||||
|
else:
|
||||||
|
print "Error - %d outside of string table limits" % val
|
||||||
|
raise TpzDRMError('outside of string table limits')
|
||||||
|
# sys.exit(-1)
|
||||||
|
|
||||||
|
def getSize(self):
|
||||||
|
return self.size
|
||||||
|
|
||||||
|
def getPos(self):
|
||||||
|
return self.pos
|
||||||
|
|
||||||
|
def dumpDict(self):
|
||||||
|
for i in xrange(self.size):
|
||||||
|
print "%d %s %s" % (i, convert(i), self.stable[i])
|
||||||
|
return
|
||||||
|
|
||||||
|
# parses the xml snippets that are represented by each page*.dat file.
|
||||||
|
# also parses the other0.dat file - the main stylesheet
|
||||||
|
# and information used to inject the xml snippets into page*.dat files
|
||||||
|
|
||||||
|
class PageParser(object):
|
||||||
|
def __init__(self, filename, dict, debug, flat_xml):
|
||||||
|
self.fo = file(filename,'rb')
|
||||||
|
self.id = os.path.basename(filename).replace('.dat','')
|
||||||
|
self.dict = dict
|
||||||
|
self.debug = debug
|
||||||
|
self.flat_xml = flat_xml
|
||||||
|
self.tagpath = []
|
||||||
|
self.doc = []
|
||||||
|
self.snippetList = []
|
||||||
|
|
||||||
|
|
||||||
|
# hash table used to enable the decoding process
|
||||||
|
# This has all been developed by trial and error so it may still have omissions or
|
||||||
|
# contain errors
|
||||||
|
# Format:
|
||||||
|
# tag : (number of arguments, argument type, subtags present, special case of subtags presents when escaped)
|
||||||
|
|
||||||
|
token_tags = {
|
||||||
|
'x' : (1, 'scalar_number', 0, 0),
|
||||||
|
'y' : (1, 'scalar_number', 0, 0),
|
||||||
|
'h' : (1, 'scalar_number', 0, 0),
|
||||||
|
'w' : (1, 'scalar_number', 0, 0),
|
||||||
|
'firstWord' : (1, 'scalar_number', 0, 0),
|
||||||
|
'lastWord' : (1, 'scalar_number', 0, 0),
|
||||||
|
'rootID' : (1, 'scalar_number', 0, 0),
|
||||||
|
'stemID' : (1, 'scalar_number', 0, 0),
|
||||||
|
'type' : (1, 'scalar_text', 0, 0),
|
||||||
|
|
||||||
|
'info' : (0, 'number', 1, 0),
|
||||||
|
|
||||||
|
'info.word' : (0, 'number', 1, 1),
|
||||||
|
'info.word.ocrText' : (1, 'text', 0, 0),
|
||||||
|
'info.word.firstGlyph' : (1, 'raw', 0, 0),
|
||||||
|
'info.word.lastGlyph' : (1, 'raw', 0, 0),
|
||||||
|
'info.word.bl' : (1, 'raw', 0, 0),
|
||||||
|
'info.word.link_id' : (1, 'number', 0, 0),
|
||||||
|
|
||||||
|
'glyph' : (0, 'number', 1, 1),
|
||||||
|
'glyph.x' : (1, 'number', 0, 0),
|
||||||
|
'glyph.y' : (1, 'number', 0, 0),
|
||||||
|
'glyph.glyphID' : (1, 'number', 0, 0),
|
||||||
|
|
||||||
|
'dehyphen' : (0, 'number', 1, 1),
|
||||||
|
'dehyphen.rootID' : (1, 'number', 0, 0),
|
||||||
|
'dehyphen.stemID' : (1, 'number', 0, 0),
|
||||||
|
'dehyphen.stemPage' : (1, 'number', 0, 0),
|
||||||
|
'dehyphen.sh' : (1, 'number', 0, 0),
|
||||||
|
|
||||||
|
'links' : (0, 'number', 1, 1),
|
||||||
|
'links.page' : (1, 'number', 0, 0),
|
||||||
|
'links.rel' : (1, 'number', 0, 0),
|
||||||
|
'links.row' : (1, 'number', 0, 0),
|
||||||
|
'links.title' : (1, 'text', 0, 0),
|
||||||
|
'links.href' : (1, 'text', 0, 0),
|
||||||
|
'links.type' : (1, 'text', 0, 0),
|
||||||
|
|
||||||
|
'paraCont' : (0, 'number', 1, 1),
|
||||||
|
'paraCont.rootID' : (1, 'number', 0, 0),
|
||||||
|
'paraCont.stemID' : (1, 'number', 0, 0),
|
||||||
|
'paraCont.stemPage' : (1, 'number', 0, 0),
|
||||||
|
|
||||||
|
'paraStems' : (0, 'number', 1, 1),
|
||||||
|
'paraStems.stemID' : (1, 'number', 0, 0),
|
||||||
|
|
||||||
|
'wordStems' : (0, 'number', 1, 1),
|
||||||
|
'wordStems.stemID' : (1, 'number', 0, 0),
|
||||||
|
|
||||||
|
'empty' : (1, 'snippets', 1, 0),
|
||||||
|
|
||||||
|
'page' : (1, 'snippets', 1, 0),
|
||||||
|
'page.pageid' : (1, 'scalar_text', 0, 0),
|
||||||
|
'page.pagelabel' : (1, 'scalar_text', 0, 0),
|
||||||
|
'page.type' : (1, 'scalar_text', 0, 0),
|
||||||
|
'page.h' : (1, 'scalar_number', 0, 0),
|
||||||
|
'page.w' : (1, 'scalar_number', 0, 0),
|
||||||
|
'page.startID' : (1, 'scalar_number', 0, 0),
|
||||||
|
|
||||||
|
'group' : (1, 'snippets', 1, 0),
|
||||||
|
'group.type' : (1, 'scalar_text', 0, 0),
|
||||||
|
'group._tag' : (1, 'scalar_text', 0, 0),
|
||||||
|
|
||||||
|
'region' : (1, 'snippets', 1, 0),
|
||||||
|
'region.type' : (1, 'scalar_text', 0, 0),
|
||||||
|
'region.x' : (1, 'scalar_number', 0, 0),
|
||||||
|
'region.y' : (1, 'scalar_number', 0, 0),
|
||||||
|
'region.h' : (1, 'scalar_number', 0, 0),
|
||||||
|
'region.w' : (1, 'scalar_number', 0, 0),
|
||||||
|
|
||||||
|
'empty_text_region' : (1, 'snippets', 1, 0),
|
||||||
|
|
||||||
|
'img' : (1, 'snippets', 1, 0),
|
||||||
|
'img.x' : (1, 'scalar_number', 0, 0),
|
||||||
|
'img.y' : (1, 'scalar_number', 0, 0),
|
||||||
|
'img.h' : (1, 'scalar_number', 0, 0),
|
||||||
|
'img.w' : (1, 'scalar_number', 0, 0),
|
||||||
|
'img.src' : (1, 'scalar_number', 0, 0),
|
||||||
|
'img.color_src' : (1, 'scalar_number', 0, 0),
|
||||||
|
|
||||||
|
'paragraph' : (1, 'snippets', 1, 0),
|
||||||
|
'paragraph.class' : (1, 'scalar_text', 0, 0),
|
||||||
|
'paragraph.firstWord' : (1, 'scalar_number', 0, 0),
|
||||||
|
'paragraph.lastWord' : (1, 'scalar_number', 0, 0),
|
||||||
|
'paragraph.lastWord' : (1, 'scalar_number', 0, 0),
|
||||||
|
'paragraph.gridSize' : (1, 'scalar_number', 0, 0),
|
||||||
|
'paragraph.gridBottomCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'paragraph.gridTopCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'paragraph.gridBeginCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'paragraph.gridEndCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
|
||||||
|
|
||||||
|
'word_semantic' : (1, 'snippets', 1, 1),
|
||||||
|
'word_semantic.type' : (1, 'scalar_text', 0, 0),
|
||||||
|
'word_semantic.firstWord' : (1, 'scalar_number', 0, 0),
|
||||||
|
'word_semantic.lastWord' : (1, 'scalar_number', 0, 0),
|
||||||
|
|
||||||
|
'word' : (1, 'snippets', 1, 0),
|
||||||
|
'word.type' : (1, 'scalar_text', 0, 0),
|
||||||
|
'word.class' : (1, 'scalar_text', 0, 0),
|
||||||
|
'word.firstGlyph' : (1, 'scalar_number', 0, 0),
|
||||||
|
'word.lastGlyph' : (1, 'scalar_number', 0, 0),
|
||||||
|
|
||||||
|
'_span' : (1, 'snippets', 1, 0),
|
||||||
|
'_span.firstWord' : (1, 'scalar_number', 0, 0),
|
||||||
|
'_span.lastWord' : (1, 'scalar_number', 0, 0),
|
||||||
|
'_span.gridSize' : (1, 'scalar_number', 0, 0),
|
||||||
|
'_span.gridBottomCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'_span.gridTopCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'_span.gridBeginCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'_span.gridEndCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
|
||||||
|
'span' : (1, 'snippets', 1, 0),
|
||||||
|
'span.firstWord' : (1, 'scalar_number', 0, 0),
|
||||||
|
'span.lastWord' : (1, 'scalar_number', 0, 0),
|
||||||
|
'span.gridSize' : (1, 'scalar_number', 0, 0),
|
||||||
|
'span.gridBottomCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'span.gridTopCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'span.gridBeginCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'span.gridEndCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
|
||||||
|
'extratokens' : (1, 'snippets', 1, 0),
|
||||||
|
'extratokens.type' : (1, 'scalar_text', 0, 0),
|
||||||
|
'extratokens.firstGlyph' : (1, 'scalar_number', 0, 0),
|
||||||
|
'extratokens.lastGlyph' : (1, 'scalar_number', 0, 0),
|
||||||
|
|
||||||
|
'glyph.h' : (1, 'number', 0, 0),
|
||||||
|
'glyph.w' : (1, 'number', 0, 0),
|
||||||
|
'glyph.use' : (1, 'number', 0, 0),
|
||||||
|
'glyph.vtx' : (1, 'number', 0, 1),
|
||||||
|
'glyph.len' : (1, 'number', 0, 1),
|
||||||
|
'glyph.dpi' : (1, 'number', 0, 0),
|
||||||
|
'vtx' : (0, 'number', 1, 1),
|
||||||
|
'vtx.x' : (1, 'number', 0, 0),
|
||||||
|
'vtx.y' : (1, 'number', 0, 0),
|
||||||
|
'len' : (0, 'number', 1, 1),
|
||||||
|
'len.n' : (1, 'number', 0, 0),
|
||||||
|
|
||||||
|
'book' : (1, 'snippets', 1, 0),
|
||||||
|
'version' : (1, 'snippets', 1, 0),
|
||||||
|
'version.FlowEdit_1_id' : (1, 'scalar_text', 0, 0),
|
||||||
|
'version.FlowEdit_1_version' : (1, 'scalar_text', 0, 0),
|
||||||
|
'version.Schema_id' : (1, 'scalar_text', 0, 0),
|
||||||
|
'version.Schema_version' : (1, 'scalar_text', 0, 0),
|
||||||
|
'version.Topaz_version' : (1, 'scalar_text', 0, 0),
|
||||||
|
'version.WordDetailEdit_1_id' : (1, 'scalar_text', 0, 0),
|
||||||
|
'version.WordDetailEdit_1_version' : (1, 'scalar_text', 0, 0),
|
||||||
|
'version.ZoneEdit_1_id' : (1, 'scalar_text', 0, 0),
|
||||||
|
'version.ZoneEdit_1_version' : (1, 'scalar_text', 0, 0),
|
||||||
|
'version.chapterheaders' : (1, 'scalar_text', 0, 0),
|
||||||
|
'version.creation_date' : (1, 'scalar_text', 0, 0),
|
||||||
|
'version.header_footer' : (1, 'scalar_text', 0, 0),
|
||||||
|
'version.init_from_ocr' : (1, 'scalar_text', 0, 0),
|
||||||
|
'version.letter_insertion' : (1, 'scalar_text', 0, 0),
|
||||||
|
'version.xmlinj_convert' : (1, 'scalar_text', 0, 0),
|
||||||
|
'version.xmlinj_reflow' : (1, 'scalar_text', 0, 0),
|
||||||
|
'version.xmlinj_transform' : (1, 'scalar_text', 0, 0),
|
||||||
|
'version.findlists' : (1, 'scalar_text', 0, 0),
|
||||||
|
'version.page_num' : (1, 'scalar_text', 0, 0),
|
||||||
|
'version.page_type' : (1, 'scalar_text', 0, 0),
|
||||||
|
'version.bad_text' : (1, 'scalar_text', 0, 0),
|
||||||
|
'version.glyph_mismatch' : (1, 'scalar_text', 0, 0),
|
||||||
|
'version.margins' : (1, 'scalar_text', 0, 0),
|
||||||
|
'version.staggered_lines' : (1, 'scalar_text', 0, 0),
|
||||||
|
'version.paragraph_continuation' : (1, 'scalar_text', 0, 0),
|
||||||
|
'version.toc' : (1, 'scalar_text', 0, 0),
|
||||||
|
|
||||||
|
'stylesheet' : (1, 'snippets', 1, 0),
|
||||||
|
'style' : (1, 'snippets', 1, 0),
|
||||||
|
'style._tag' : (1, 'scalar_text', 0, 0),
|
||||||
|
'style.type' : (1, 'scalar_text', 0, 0),
|
||||||
|
'style._parent_type' : (1, 'scalar_text', 0, 0),
|
||||||
|
'style.class' : (1, 'scalar_text', 0, 0),
|
||||||
|
'style._after_class' : (1, 'scalar_text', 0, 0),
|
||||||
|
'rule' : (1, 'snippets', 1, 0),
|
||||||
|
'rule.attr' : (1, 'scalar_text', 0, 0),
|
||||||
|
'rule.value' : (1, 'scalar_text', 0, 0),
|
||||||
|
|
||||||
|
'original' : (0, 'number', 1, 1),
|
||||||
|
'original.pnum' : (1, 'number', 0, 0),
|
||||||
|
'original.pid' : (1, 'text', 0, 0),
|
||||||
|
'pages' : (0, 'number', 1, 1),
|
||||||
|
'pages.ref' : (1, 'number', 0, 0),
|
||||||
|
'pages.id' : (1, 'number', 0, 0),
|
||||||
|
'startID' : (0, 'number', 1, 1),
|
||||||
|
'startID.page' : (1, 'number', 0, 0),
|
||||||
|
'startID.id' : (1, 'number', 0, 0),
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# full tag path record keeping routines
|
||||||
|
def tag_push(self, token):
|
||||||
|
self.tagpath.append(token)
|
||||||
|
def tag_pop(self):
|
||||||
|
if len(self.tagpath) > 0 :
|
||||||
|
self.tagpath.pop()
|
||||||
|
def tagpath_len(self):
|
||||||
|
return len(self.tagpath)
|
||||||
|
def get_tagpath(self, i):
|
||||||
|
cnt = len(self.tagpath)
|
||||||
|
if i < cnt : result = self.tagpath[i]
|
||||||
|
for j in xrange(i+1, cnt) :
|
||||||
|
result += '.' + self.tagpath[j]
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
# list of absolute command byte values values that indicate
|
||||||
|
# various types of loop meachanisms typically used to generate vectors
|
||||||
|
|
||||||
|
cmd_list = (0x76, 0x76)
|
||||||
|
|
||||||
|
# peek at and return 1 byte that is ahead by i bytes
|
||||||
|
def peek(self, aheadi):
|
||||||
|
c = self.fo.read(aheadi)
|
||||||
|
if (len(c) == 0):
|
||||||
|
return None
|
||||||
|
self.fo.seek(-aheadi,1)
|
||||||
|
c = c[-1:]
|
||||||
|
return ord(c)
|
||||||
|
|
||||||
|
|
||||||
|
# get the next value from the file being processed
|
||||||
|
def getNext(self):
|
||||||
|
nbyte = self.peek(1);
|
||||||
|
if (nbyte == None):
|
||||||
|
return None
|
||||||
|
val = readEncodedNumber(self.fo)
|
||||||
|
return val
|
||||||
|
|
||||||
|
|
||||||
|
# format an arg by argtype
|
||||||
|
def formatArg(self, arg, argtype):
|
||||||
|
if (argtype == 'text') or (argtype == 'scalar_text') :
|
||||||
|
result = self.dict.lookup(arg)
|
||||||
|
elif (argtype == 'raw') or (argtype == 'number') or (argtype == 'scalar_number') :
|
||||||
|
result = arg
|
||||||
|
elif (argtype == 'snippets') :
|
||||||
|
result = arg
|
||||||
|
else :
|
||||||
|
print "Error Unknown argtype %s" % argtype
|
||||||
|
sys.exit(-2)
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
# process the next tag token, recursively handling subtags,
|
||||||
|
# arguments, and commands
|
||||||
|
def procToken(self, token):
|
||||||
|
|
||||||
|
known_token = False
|
||||||
|
self.tag_push(token)
|
||||||
|
|
||||||
|
if self.debug : print 'Processing: ', self.get_tagpath(0)
|
||||||
|
cnt = self.tagpath_len()
|
||||||
|
for j in xrange(cnt):
|
||||||
|
tkn = self.get_tagpath(j)
|
||||||
|
if tkn in self.token_tags :
|
||||||
|
num_args = self.token_tags[tkn][0]
|
||||||
|
argtype = self.token_tags[tkn][1]
|
||||||
|
subtags = self.token_tags[tkn][2]
|
||||||
|
splcase = self.token_tags[tkn][3]
|
||||||
|
ntags = -1
|
||||||
|
known_token = True
|
||||||
|
break
|
||||||
|
|
||||||
|
if known_token :
|
||||||
|
|
||||||
|
# handle subtags if present
|
||||||
|
subtagres = []
|
||||||
|
if (splcase == 1):
|
||||||
|
# this type of tag uses of escape marker 0x74 indicate subtag count
|
||||||
|
if self.peek(1) == 0x74:
|
||||||
|
skip = readEncodedNumber(self.fo)
|
||||||
|
subtags = 1
|
||||||
|
num_args = 0
|
||||||
|
|
||||||
|
if (subtags == 1):
|
||||||
|
ntags = readEncodedNumber(self.fo)
|
||||||
|
if self.debug : print 'subtags: ' + token + ' has ' + str(ntags)
|
||||||
|
for j in xrange(ntags):
|
||||||
|
val = readEncodedNumber(self.fo)
|
||||||
|
subtagres.append(self.procToken(self.dict.lookup(val)))
|
||||||
|
|
||||||
|
# arguments can be scalars or vectors of text or numbers
|
||||||
|
argres = []
|
||||||
|
if num_args > 0 :
|
||||||
|
firstarg = self.peek(1)
|
||||||
|
if (firstarg in self.cmd_list) and (argtype != 'scalar_number') and (argtype != 'scalar_text'):
|
||||||
|
# single argument is a variable length vector of data
|
||||||
|
arg = readEncodedNumber(self.fo)
|
||||||
|
argres = self.decodeCMD(arg,argtype)
|
||||||
|
else :
|
||||||
|
# num_arg scalar arguments
|
||||||
|
for i in xrange(num_args):
|
||||||
|
argres.append(self.formatArg(readEncodedNumber(self.fo), argtype))
|
||||||
|
|
||||||
|
# build the return tag
|
||||||
|
result = []
|
||||||
|
tkn = self.get_tagpath(0)
|
||||||
|
result.append(tkn)
|
||||||
|
result.append(subtagres)
|
||||||
|
result.append(argtype)
|
||||||
|
result.append(argres)
|
||||||
|
self.tag_pop()
|
||||||
|
return result
|
||||||
|
|
||||||
|
# all tokens that need to be processed should be in the hash
|
||||||
|
# table if it may indicate a problem, either new token
|
||||||
|
# or an out of sync condition
|
||||||
|
else:
|
||||||
|
result = []
|
||||||
|
if (self.debug):
|
||||||
|
print 'Unknown Token:', token
|
||||||
|
self.tag_pop()
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
# special loop used to process code snippets
|
||||||
|
# it is NEVER used to format arguments.
|
||||||
|
# builds the snippetList
|
||||||
|
def doLoop72(self, argtype):
|
||||||
|
cnt = readEncodedNumber(self.fo)
|
||||||
|
if self.debug :
|
||||||
|
result = 'Set of '+ str(cnt) + ' xml snippets. The overall structure \n'
|
||||||
|
result += 'of the document is indicated by snippet number sets at the\n'
|
||||||
|
result += 'end of each snippet. \n'
|
||||||
|
print result
|
||||||
|
for i in xrange(cnt):
|
||||||
|
if self.debug: print 'Snippet:',str(i)
|
||||||
|
snippet = []
|
||||||
|
snippet.append(i)
|
||||||
|
val = readEncodedNumber(self.fo)
|
||||||
|
snippet.append(self.procToken(self.dict.lookup(val)))
|
||||||
|
self.snippetList.append(snippet)
|
||||||
|
return
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# general loop code gracisouly submitted by "skindle" - thank you!
|
||||||
|
def doLoop76Mode(self, argtype, cnt, mode):
|
||||||
|
result = []
|
||||||
|
adj = 0
|
||||||
|
if mode & 1:
|
||||||
|
adj = readEncodedNumber(self.fo)
|
||||||
|
mode = mode >> 1
|
||||||
|
x = []
|
||||||
|
for i in xrange(cnt):
|
||||||
|
x.append(readEncodedNumber(self.fo) - adj)
|
||||||
|
for i in xrange(mode):
|
||||||
|
for j in xrange(1, cnt):
|
||||||
|
x[j] = x[j] + x[j - 1]
|
||||||
|
for i in xrange(cnt):
|
||||||
|
result.append(self.formatArg(x[i],argtype))
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
# dispatches loop commands bytes with various modes
|
||||||
|
# The 0x76 style loops are used to build vectors
|
||||||
|
|
||||||
|
# This was all derived by trial and error and
|
||||||
|
# new loop types may exist that are not handled here
|
||||||
|
# since they did not appear in the test cases
|
||||||
|
|
||||||
|
def decodeCMD(self, cmd, argtype):
|
||||||
|
if (cmd == 0x76):
|
||||||
|
|
||||||
|
# loop with cnt, and mode to control loop styles
|
||||||
|
cnt = readEncodedNumber(self.fo)
|
||||||
|
mode = readEncodedNumber(self.fo)
|
||||||
|
|
||||||
|
if self.debug : print 'Loop for', cnt, 'with mode', mode, ': '
|
||||||
|
return self.doLoop76Mode(argtype, cnt, mode)
|
||||||
|
|
||||||
|
if self.dbug: print "Unknown command", cmd
|
||||||
|
result = []
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# add full tag path to injected snippets
|
||||||
|
def updateName(self, tag, prefix):
|
||||||
|
name = tag[0]
|
||||||
|
subtagList = tag[1]
|
||||||
|
argtype = tag[2]
|
||||||
|
argList = tag[3]
|
||||||
|
nname = prefix + '.' + name
|
||||||
|
nsubtaglist = []
|
||||||
|
for j in subtagList:
|
||||||
|
nsubtaglist.append(self.updateName(j,prefix))
|
||||||
|
ntag = []
|
||||||
|
ntag.append(nname)
|
||||||
|
ntag.append(nsubtaglist)
|
||||||
|
ntag.append(argtype)
|
||||||
|
ntag.append(argList)
|
||||||
|
return ntag
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# perform depth first injection of specified snippets into this one
|
||||||
|
def injectSnippets(self, snippet):
|
||||||
|
snipno, tag = snippet
|
||||||
|
name = tag[0]
|
||||||
|
subtagList = tag[1]
|
||||||
|
argtype = tag[2]
|
||||||
|
argList = tag[3]
|
||||||
|
nsubtagList = []
|
||||||
|
if len(argList) > 0 :
|
||||||
|
for j in argList:
|
||||||
|
asnip = self.snippetList[j]
|
||||||
|
aso, atag = self.injectSnippets(asnip)
|
||||||
|
atag = self.updateName(atag, name)
|
||||||
|
nsubtagList.append(atag)
|
||||||
|
argtype='number'
|
||||||
|
argList=[]
|
||||||
|
if len(nsubtagList) > 0 :
|
||||||
|
subtagList.extend(nsubtagList)
|
||||||
|
tag = []
|
||||||
|
tag.append(name)
|
||||||
|
tag.append(subtagList)
|
||||||
|
tag.append(argtype)
|
||||||
|
tag.append(argList)
|
||||||
|
snippet = []
|
||||||
|
snippet.append(snipno)
|
||||||
|
snippet.append(tag)
|
||||||
|
return snippet
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# format the tag for output
|
||||||
|
def formatTag(self, node):
|
||||||
|
name = node[0]
|
||||||
|
subtagList = node[1]
|
||||||
|
argtype = node[2]
|
||||||
|
argList = node[3]
|
||||||
|
fullpathname = name.split('.')
|
||||||
|
nodename = fullpathname.pop()
|
||||||
|
ilvl = len(fullpathname)
|
||||||
|
indent = ' ' * (3 * ilvl)
|
||||||
|
rlst = []
|
||||||
|
rlst.append(indent + '<' + nodename + '>')
|
||||||
|
if len(argList) > 0:
|
||||||
|
alst = []
|
||||||
|
for j in argList:
|
||||||
|
if (argtype == 'text') or (argtype == 'scalar_text') :
|
||||||
|
alst.append(j + '|')
|
||||||
|
else :
|
||||||
|
alst.append(str(j) + ',')
|
||||||
|
argres = "".join(alst)
|
||||||
|
argres = argres[0:-1]
|
||||||
|
if argtype == 'snippets' :
|
||||||
|
rlst.append('snippets:' + argres)
|
||||||
|
else :
|
||||||
|
rlst.append(argres)
|
||||||
|
if len(subtagList) > 0 :
|
||||||
|
rlst.append('\n')
|
||||||
|
for j in subtagList:
|
||||||
|
if len(j) > 0 :
|
||||||
|
rlst.append(self.formatTag(j))
|
||||||
|
rlst.append(indent + '</' + nodename + '>\n')
|
||||||
|
else:
|
||||||
|
rlst.append('</' + nodename + '>\n')
|
||||||
|
return "".join(rlst)
|
||||||
|
|
||||||
|
|
||||||
|
# flatten tag
|
||||||
|
def flattenTag(self, node):
|
||||||
|
name = node[0]
|
||||||
|
subtagList = node[1]
|
||||||
|
argtype = node[2]
|
||||||
|
argList = node[3]
|
||||||
|
rlst = []
|
||||||
|
rlst.append(name)
|
||||||
|
if (len(argList) > 0):
|
||||||
|
alst = []
|
||||||
|
for j in argList:
|
||||||
|
if (argtype == 'text') or (argtype == 'scalar_text') :
|
||||||
|
alst.append(j + '|')
|
||||||
|
else :
|
||||||
|
alst.append(str(j) + '|')
|
||||||
|
argres = "".join(alst)
|
||||||
|
argres = argres[0:-1]
|
||||||
|
if argtype == 'snippets' :
|
||||||
|
rlst.append('.snippets=' + argres)
|
||||||
|
else :
|
||||||
|
rlst.append('=' + argres)
|
||||||
|
rlst.append('\n')
|
||||||
|
for j in subtagList:
|
||||||
|
if len(j) > 0 :
|
||||||
|
rlst.append(self.flattenTag(j))
|
||||||
|
return "".join(rlst)
|
||||||
|
|
||||||
|
|
||||||
|
# reduce create xml output
|
||||||
|
def formatDoc(self, flat_xml):
|
||||||
|
rlst = []
|
||||||
|
for j in self.doc :
|
||||||
|
if len(j) > 0:
|
||||||
|
if flat_xml:
|
||||||
|
rlst.append(self.flattenTag(j))
|
||||||
|
else:
|
||||||
|
rlst.append(self.formatTag(j))
|
||||||
|
result = "".join(rlst)
|
||||||
|
if self.debug : print result
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# main loop - parse the page.dat files
|
||||||
|
# to create structured document and snippets
|
||||||
|
|
||||||
|
# FIXME: value at end of magic appears to be a subtags count
|
||||||
|
# but for what? For now, inject an 'info" tag as it is in
|
||||||
|
# every dictionary and seems close to what is meant
|
||||||
|
# The alternative is to special case the last _ "0x5f" to mean something
|
||||||
|
|
||||||
|
def process(self):
|
||||||
|
|
||||||
|
# peek at the first bytes to see what type of file it is
|
||||||
|
magic = self.fo.read(9)
|
||||||
|
if (magic[0:1] == 'p') and (magic[2:9] == 'marker_'):
|
||||||
|
first_token = 'info'
|
||||||
|
elif (magic[0:1] == 'p') and (magic[2:9] == '__PAGE_'):
|
||||||
|
skip = self.fo.read(2)
|
||||||
|
first_token = 'info'
|
||||||
|
elif (magic[0:1] == 'p') and (magic[2:8] == '_PAGE_'):
|
||||||
|
first_token = 'info'
|
||||||
|
elif (magic[0:1] == 'g') and (magic[2:9] == '__GLYPH'):
|
||||||
|
skip = self.fo.read(3)
|
||||||
|
first_token = 'info'
|
||||||
|
else :
|
||||||
|
# other0.dat file
|
||||||
|
first_token = None
|
||||||
|
self.fo.seek(-9,1)
|
||||||
|
|
||||||
|
|
||||||
|
# main loop to read and build the document tree
|
||||||
|
while True:
|
||||||
|
|
||||||
|
if first_token != None :
|
||||||
|
# use "inserted" first token 'info' for page and glyph files
|
||||||
|
tag = self.procToken(first_token)
|
||||||
|
if len(tag) > 0 :
|
||||||
|
self.doc.append(tag)
|
||||||
|
first_token = None
|
||||||
|
|
||||||
|
v = self.getNext()
|
||||||
|
if (v == None):
|
||||||
|
break
|
||||||
|
|
||||||
|
if (v == 0x72):
|
||||||
|
self.doLoop72('number')
|
||||||
|
elif (v > 0) and (v < self.dict.getSize()) :
|
||||||
|
tag = self.procToken(self.dict.lookup(v))
|
||||||
|
if len(tag) > 0 :
|
||||||
|
self.doc.append(tag)
|
||||||
|
else:
|
||||||
|
if self.debug:
|
||||||
|
print "Main Loop: Unknown value: %x" % v
|
||||||
|
if (v == 0):
|
||||||
|
if (self.peek(1) == 0x5f):
|
||||||
|
skip = self.fo.read(1)
|
||||||
|
first_token = 'info'
|
||||||
|
|
||||||
|
# now do snippet injection
|
||||||
|
if len(self.snippetList) > 0 :
|
||||||
|
if self.debug : print 'Injecting Snippets:'
|
||||||
|
snippet = self.injectSnippets(self.snippetList[0])
|
||||||
|
snipno = snippet[0]
|
||||||
|
tag_add = snippet[1]
|
||||||
|
if self.debug : print self.formatTag(tag_add)
|
||||||
|
if len(tag_add) > 0:
|
||||||
|
self.doc.append(tag_add)
|
||||||
|
|
||||||
|
# handle generation of xml output
|
||||||
|
xmlpage = self.formatDoc(self.flat_xml)
|
||||||
|
|
||||||
|
return xmlpage
|
||||||
|
|
||||||
|
|
||||||
|
def fromData(dict, fname):
|
||||||
|
flat_xml = True
|
||||||
|
debug = False
|
||||||
|
pp = PageParser(fname, dict, debug, flat_xml)
|
||||||
|
xmlpage = pp.process()
|
||||||
|
return xmlpage
|
||||||
|
|
||||||
|
def getXML(dict, fname):
|
||||||
|
flat_xml = False
|
||||||
|
debug = False
|
||||||
|
pp = PageParser(fname, dict, debug, flat_xml)
|
||||||
|
xmlpage = pp.process()
|
||||||
|
return xmlpage
|
||||||
|
|
||||||
|
def usage():
|
||||||
|
print 'Usage: '
|
||||||
|
print ' convert2xml.py dict0000.dat infile.dat '
|
||||||
|
print ' '
|
||||||
|
print ' Options:'
|
||||||
|
print ' -h print this usage help message '
|
||||||
|
print ' -d turn on debug output to check for potential errors '
|
||||||
|
print ' --flat-xml output the flattened xml page description only '
|
||||||
|
print ' '
|
||||||
|
print ' This program will attempt to convert a page*.dat file or '
|
||||||
|
print ' glyphs*.dat file, using the dict0000.dat file, to its xml description. '
|
||||||
|
print ' '
|
||||||
|
print ' Use "cmbtc_dump.py" first to unencrypt, uncompress, and dump '
|
||||||
|
print ' the *.dat files from a Topaz format e-book.'
|
||||||
|
|
||||||
#
|
#
|
||||||
# Locate and open the Kindle.info file.
|
# Main
|
||||||
#
|
#
|
||||||
def openKindleInfo(kInfoFile=None):
|
|
||||||
if kInfoFile == None:
|
def main(argv):
|
||||||
regkey = winreg.OpenKey(winreg.HKEY_CURRENT_USER, "Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Shell Folders\\")
|
dictFile = ""
|
||||||
path = winreg.QueryValueEx(regkey, 'Local AppData')[0]
|
pageFile = ""
|
||||||
kinfopath = path +'\\Amazon\\Kindle For PC\\{AMAwzsaPaaZAzmZzZQzgZCAkZ3AjA_AY}\\kindle.info'
|
debug = False
|
||||||
if not os.path.isfile(kinfopath):
|
flat_xml = False
|
||||||
raise DrmException('Error: kindle.info file can not be found')
|
printOutput = False
|
||||||
return open(kinfopath,'r')
|
if len(argv) == 0:
|
||||||
else:
|
printOutput = True
|
||||||
if not os.path.isfile(kInfoFile):
|
argv = sys.argv
|
||||||
raise DrmException('Error: kindle.info file can not be found')
|
|
||||||
return open(kInfoFile, 'r')
|
try:
|
||||||
|
opts, args = getopt.getopt(argv[1:], "hd", ["flat-xml"])
|
||||||
|
|
||||||
|
except getopt.GetoptError, err:
|
||||||
|
|
||||||
|
# print help information and exit:
|
||||||
|
print str(err) # will print something like "option -a not recognized"
|
||||||
|
usage()
|
||||||
|
sys.exit(2)
|
||||||
|
|
||||||
|
if len(opts) == 0 and len(args) == 0 :
|
||||||
|
usage()
|
||||||
|
sys.exit(2)
|
||||||
|
|
||||||
|
for o, a in opts:
|
||||||
|
if o =="-d":
|
||||||
|
debug=True
|
||||||
|
if o =="-h":
|
||||||
|
usage()
|
||||||
|
sys.exit(0)
|
||||||
|
if o =="--flat-xml":
|
||||||
|
flat_xml = True
|
||||||
|
|
||||||
|
dictFile, pageFile = args[0], args[1]
|
||||||
|
|
||||||
|
# read in the string table dictionary
|
||||||
|
dict = Dictionary(dictFile)
|
||||||
|
# dict.dumpDict()
|
||||||
|
|
||||||
|
# create a page parser
|
||||||
|
pp = PageParser(pageFile, dict, debug, flat_xml)
|
||||||
|
|
||||||
|
xmlpage = pp.process()
|
||||||
|
|
||||||
|
if printOutput:
|
||||||
|
print xmlpage
|
||||||
|
return 0
|
||||||
|
|
||||||
|
return xmlpage
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
sys.exit(main(''))
|
||||||
|
|||||||
@@ -1,388 +1,249 @@
|
|||||||
#!/usr/bin/python
|
#! /usr/bin/python
|
||||||
#
|
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
|
||||||
# This is a python script. You need a Python interpreter to run it.
|
|
||||||
# For example, ActiveState Python, which exists for windows.
|
|
||||||
#
|
|
||||||
# Changelog
|
|
||||||
# 0.01 - Initial version
|
|
||||||
# 0.02 - Huffdic compressed books were not properly decrypted
|
|
||||||
# 0.03 - Wasn't checking MOBI header length
|
|
||||||
# 0.04 - Wasn't sanity checking size of data record
|
|
||||||
# 0.05 - It seems that the extra data flags take two bytes not four
|
|
||||||
# 0.06 - And that low bit does mean something after all :-)
|
|
||||||
# 0.07 - The extra data flags aren't present in MOBI header < 0xE8 in size
|
|
||||||
# 0.08 - ...and also not in Mobi header version < 6
|
|
||||||
# 0.09 - ...but they are there with Mobi header version 6, header size 0xE4!
|
|
||||||
# 0.10 - Outputs unencrypted files as-is, so that when run as a Calibre
|
|
||||||
# import filter it works when importing unencrypted files.
|
|
||||||
# Also now handles encrypted files that don't need a specific PID.
|
|
||||||
# 0.11 - use autoflushed stdout and proper return values
|
|
||||||
# 0.12 - Fix for problems with metadata import as Calibre plugin, report errors
|
|
||||||
# 0.13 - Formatting fixes: retabbed file, removed trailing whitespace
|
|
||||||
# and extra blank lines, converted CR/LF pairs at ends of each line,
|
|
||||||
# and other cosmetic fixes.
|
|
||||||
# 0.14 - Working out when the extra data flags are present has been problematic
|
|
||||||
# Versions 7 through 9 have tried to tweak the conditions, but have been
|
|
||||||
# only partially successful. Closer examination of lots of sample
|
|
||||||
# files reveals that a confusion has arisen because trailing data entries
|
|
||||||
# are not encrypted, but it turns out that the multibyte entries
|
|
||||||
# in utf8 file are encrypted. (Although neither kind gets compressed.)
|
|
||||||
# This knowledge leads to a simplification of the test for the
|
|
||||||
# trailing data byte flags - version 5 and higher AND header size >= 0xE4.
|
|
||||||
# 0.15 - Now outputs 'heartbeat', and is also quicker for long files.
|
|
||||||
# 0.16 - And reverts to 'done' not 'done.' at the end for unswindle compatibility.
|
|
||||||
# 0.17 - added modifications to support its use as an imported python module
|
|
||||||
# both inside calibre and also in other places (ie K4DeDRM tools)
|
|
||||||
# 0.17a- disabled the standalone plugin feature since a plugin can not import
|
|
||||||
# a plugin
|
|
||||||
# 0.18 - It seems that multibyte entries aren't encrypted in a v7 file...
|
|
||||||
# Removed the disabled Calibre plug-in code
|
|
||||||
# Permit use of 8-digit PIDs
|
|
||||||
# 0.19 - It seems that multibyte entries aren't encrypted in a v6 file either.
|
|
||||||
# 0.20 - Correction: It seems that multibyte entries are encrypted in a v6 file.
|
|
||||||
# 0.21 - Added support for multiple pids
|
|
||||||
# 0.22 - revised structure to hold MobiBook as a class to allow an extended interface
|
|
||||||
# 0.23 - fixed problem with older files with no EXTH section
|
|
||||||
# 0.24 - add support for type 1 encryption and 'TEXtREAd' books as well
|
|
||||||
|
|
||||||
__version__ = '0.24'
|
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
|
import csv
|
||||||
class Unbuffered:
|
|
||||||
def __init__(self, stream):
|
|
||||||
self.stream = stream
|
|
||||||
def write(self, data):
|
|
||||||
self.stream.write(data)
|
|
||||||
self.stream.flush()
|
|
||||||
def __getattr__(self, attr):
|
|
||||||
return getattr(self.stream, attr)
|
|
||||||
sys.stdout=Unbuffered(sys.stdout)
|
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import struct
|
import getopt
|
||||||
import binascii
|
from struct import pack
|
||||||
|
from struct import unpack
|
||||||
class DrmException(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
#
|
class PParser(object):
|
||||||
# MobiBook Utility Routines
|
def __init__(self, gd, flatxml, meta_array):
|
||||||
#
|
self.gd = gd
|
||||||
|
self.flatdoc = flatxml.split('\n')
|
||||||
|
self.docSize = len(self.flatdoc)
|
||||||
|
self.temp = []
|
||||||
|
|
||||||
# Implementation of Pukall Cipher 1
|
self.ph = -1
|
||||||
def PC1(key, src, decryption=True):
|
self.pw = -1
|
||||||
sum1 = 0;
|
startpos = self.posinDoc('page.h') or self.posinDoc('book.h')
|
||||||
sum2 = 0;
|
for p in startpos:
|
||||||
keyXorVal = 0;
|
(name, argres) = self.lineinDoc(p)
|
||||||
if len(key)!=16:
|
self.ph = max(self.ph, int(argres))
|
||||||
print "Bad key length!"
|
startpos = self.posinDoc('page.w') or self.posinDoc('book.w')
|
||||||
return None
|
for p in startpos:
|
||||||
wkey = []
|
(name, argres) = self.lineinDoc(p)
|
||||||
for i in xrange(8):
|
self.pw = max(self.pw, int(argres))
|
||||||
wkey.append(ord(key[i*2])<<8 | ord(key[i*2+1]))
|
|
||||||
dst = ""
|
|
||||||
for i in xrange(len(src)):
|
|
||||||
temp1 = 0;
|
|
||||||
byteXorVal = 0;
|
|
||||||
for j in xrange(8):
|
|
||||||
temp1 ^= wkey[j]
|
|
||||||
sum2 = (sum2+j)*20021 + sum1
|
|
||||||
sum1 = (temp1*346)&0xFFFF
|
|
||||||
sum2 = (sum2+sum1)&0xFFFF
|
|
||||||
temp1 = (temp1*20021+1)&0xFFFF
|
|
||||||
byteXorVal ^= temp1 ^ sum2
|
|
||||||
curByte = ord(src[i])
|
|
||||||
if not decryption:
|
|
||||||
keyXorVal = curByte * 257;
|
|
||||||
curByte = ((curByte ^ (byteXorVal >> 8)) ^ byteXorVal) & 0xFF
|
|
||||||
if decryption:
|
|
||||||
keyXorVal = curByte * 257;
|
|
||||||
for j in xrange(8):
|
|
||||||
wkey[j] ^= keyXorVal;
|
|
||||||
dst+=chr(curByte)
|
|
||||||
return dst
|
|
||||||
|
|
||||||
def checksumPid(s):
|
if self.ph <= 0:
|
||||||
letters = "ABCDEFGHIJKLMNPQRSTUVWXYZ123456789"
|
self.ph = int(meta_array.get('pageHeight', '11000'))
|
||||||
crc = (~binascii.crc32(s,-1))&0xFFFFFFFF
|
if self.pw <= 0:
|
||||||
crc = crc ^ (crc >> 16)
|
self.pw = int(meta_array.get('pageWidth', '8500'))
|
||||||
res = s
|
|
||||||
l = len(letters)
|
|
||||||
for i in (0,1):
|
|
||||||
b = crc & 0xff
|
|
||||||
pos = (b // l) ^ (b % l)
|
|
||||||
res += letters[pos%l]
|
|
||||||
crc >>= 8
|
|
||||||
return res
|
|
||||||
|
|
||||||
def getSizeOfTrailingDataEntries(ptr, size, flags):
|
res = []
|
||||||
def getSizeOfTrailingDataEntry(ptr, size):
|
startpos = self.posinDoc('info.glyph.x')
|
||||||
bitpos, result = 0, 0
|
for p in startpos:
|
||||||
if size <= 0:
|
argres = self.getDataatPos('info.glyph.x', p)
|
||||||
return result
|
res.extend(argres)
|
||||||
while True:
|
self.gx = res
|
||||||
v = ord(ptr[size-1])
|
|
||||||
result |= (v & 0x7F) << bitpos
|
res = []
|
||||||
bitpos += 7
|
startpos = self.posinDoc('info.glyph.y')
|
||||||
size -= 1
|
for p in startpos:
|
||||||
if (v & 0x80) != 0 or (bitpos >= 28) or (size == 0):
|
argres = self.getDataatPos('info.glyph.y', p)
|
||||||
return result
|
res.extend(argres)
|
||||||
num = 0
|
self.gy = res
|
||||||
testflags = flags >> 1
|
|
||||||
while testflags:
|
res = []
|
||||||
if testflags & 1:
|
startpos = self.posinDoc('info.glyph.glyphID')
|
||||||
num += getSizeOfTrailingDataEntry(ptr, size - num)
|
for p in startpos:
|
||||||
testflags >>= 1
|
argres = self.getDataatPos('info.glyph.glyphID', p)
|
||||||
# Check the low bit to see if there's multibyte data present.
|
res.extend(argres)
|
||||||
# if multibyte data is included in the encryped data, we'll
|
self.gid = res
|
||||||
# have already cleared this flag.
|
|
||||||
if flags & 1:
|
|
||||||
num += (ord(ptr[size - num - 1]) & 0x3) + 1
|
|
||||||
return num
|
|
||||||
|
|
||||||
|
|
||||||
|
# return tag at line pos in document
|
||||||
|
def lineinDoc(self, pos) :
|
||||||
|
if (pos >= 0) and (pos < self.docSize) :
|
||||||
|
item = self.flatdoc[pos]
|
||||||
|
if item.find('=') >= 0:
|
||||||
|
(name, argres) = item.split('=',1)
|
||||||
|
else :
|
||||||
|
name = item
|
||||||
|
argres = ''
|
||||||
|
return name, argres
|
||||||
|
|
||||||
class MobiBook:
|
# find tag in doc if within pos to end inclusive
|
||||||
def loadSection(self, section):
|
def findinDoc(self, tagpath, pos, end) :
|
||||||
if (section + 1 == self.num_sections):
|
result = None
|
||||||
endoff = len(self.data_file)
|
if end == -1 :
|
||||||
|
end = self.docSize
|
||||||
else:
|
else:
|
||||||
endoff = self.sections[section + 1][0]
|
end = min(self.docSize, end)
|
||||||
off = self.sections[section][0]
|
foundat = -1
|
||||||
return self.data_file[off:endoff]
|
for j in xrange(pos, end):
|
||||||
|
item = self.flatdoc[j]
|
||||||
def __init__(self, infile):
|
if item.find('=') >= 0:
|
||||||
# initial sanity check on file
|
(name, argres) = item.split('=',1)
|
||||||
self.data_file = file(infile, 'rb').read()
|
else :
|
||||||
self.header = self.data_file[0:78]
|
name = item
|
||||||
if self.header[0x3C:0x3C+8] != 'BOOKMOBI' and self.header[0x3C:0x3C+8] != 'TEXtREAd':
|
argres = ''
|
||||||
raise DrmException("invalid file format")
|
if name.endswith(tagpath) :
|
||||||
self.magic = self.header[0x3C:0x3C+8]
|
result = argres
|
||||||
self.crypto_type = -1
|
foundat = j
|
||||||
|
|
||||||
# build up section offset and flag info
|
|
||||||
self.num_sections, = struct.unpack('>H', self.header[76:78])
|
|
||||||
self.sections = []
|
|
||||||
for i in xrange(self.num_sections):
|
|
||||||
offset, a1,a2,a3,a4 = struct.unpack('>LBBBB', self.data_file[78+i*8:78+i*8+8])
|
|
||||||
flags, val = a1, a2<<16|a3<<8|a4
|
|
||||||
self.sections.append( (offset, flags, val) )
|
|
||||||
|
|
||||||
# parse information from section 0
|
|
||||||
self.sect = self.loadSection(0)
|
|
||||||
self.records, = struct.unpack('>H', self.sect[0x8:0x8+2])
|
|
||||||
|
|
||||||
if self.magic == 'TEXtREAd':
|
|
||||||
print "Book has format: ", self.magic
|
|
||||||
self.extra_data_flags = 0
|
|
||||||
self.mobi_length = 0
|
|
||||||
self.mobi_version = -1
|
|
||||||
self.meta_array = {}
|
|
||||||
return
|
|
||||||
self.mobi_length, = struct.unpack('>L',self.sect[0x14:0x18])
|
|
||||||
self.mobi_version, = struct.unpack('>L',self.sect[0x68:0x6C])
|
|
||||||
print "MOBI header version = %d, length = %d" %(self.mobi_version, self.mobi_length)
|
|
||||||
self.extra_data_flags = 0
|
|
||||||
if (self.mobi_length >= 0xE4) and (self.mobi_version >= 5):
|
|
||||||
self.extra_data_flags, = struct.unpack('>H', self.sect[0xF2:0xF4])
|
|
||||||
print "Extra Data Flags = %d" % self.extra_data_flags
|
|
||||||
if self.mobi_version < 7:
|
|
||||||
# multibyte utf8 data is included in the encryption for mobi_version 6 and below
|
|
||||||
# so clear that byte so that we leave it to be decrypted.
|
|
||||||
self.extra_data_flags &= 0xFFFE
|
|
||||||
|
|
||||||
# if exth region exists parse it for metadata array
|
|
||||||
self.meta_array = {}
|
|
||||||
try:
|
|
||||||
exth_flag, = struct.unpack('>L', self.sect[0x80:0x84])
|
|
||||||
exth = 'NONE'
|
|
||||||
if exth_flag & 0x40:
|
|
||||||
exth = self.sect[16 + self.mobi_length:]
|
|
||||||
if (len(exth) >= 4) and (exth[:4] == 'EXTH'):
|
|
||||||
nitems, = struct.unpack('>I', exth[8:12])
|
|
||||||
pos = 12
|
|
||||||
for i in xrange(nitems):
|
|
||||||
type, size = struct.unpack('>II', exth[pos: pos + 8])
|
|
||||||
content = exth[pos + 8: pos + size]
|
|
||||||
self.meta_array[type] = content
|
|
||||||
pos += size
|
|
||||||
except:
|
|
||||||
self.meta_array = {}
|
|
||||||
pass
|
|
||||||
|
|
||||||
def getBookTitle(self):
|
|
||||||
title = ''
|
|
||||||
if 503 in self.meta_array:
|
|
||||||
title = self.meta_array[503]
|
|
||||||
else :
|
|
||||||
toff, tlen = struct.unpack('>II', self.sect[0x54:0x5c])
|
|
||||||
tend = toff + tlen
|
|
||||||
title = self.sect[toff:tend]
|
|
||||||
if title == '':
|
|
||||||
title = self.header[:32]
|
|
||||||
title = title.split("\0")[0]
|
|
||||||
return title
|
|
||||||
|
|
||||||
def getPIDMetaInfo(self):
|
|
||||||
rec209 = None
|
|
||||||
token = None
|
|
||||||
if 209 in self.meta_array:
|
|
||||||
rec209 = self.meta_array[209]
|
|
||||||
data = rec209
|
|
||||||
# Parse the 209 data to find the the exth record with the token data.
|
|
||||||
# The last character of the 209 data points to the record with the token.
|
|
||||||
# Always 208 from my experience, but I'll leave the logic in case that changes.
|
|
||||||
for i in xrange(len(data)):
|
|
||||||
if ord(data[i]) != 0:
|
|
||||||
if self.meta_array[ord(data[i])] != None:
|
|
||||||
token = self.meta_array[ord(data[i])]
|
|
||||||
return rec209, token
|
|
||||||
|
|
||||||
def patch(self, off, new):
|
|
||||||
self.data_file = self.data_file[:off] + new + self.data_file[off+len(new):]
|
|
||||||
|
|
||||||
def patchSection(self, section, new, in_off = 0):
|
|
||||||
if (section + 1 == self.num_sections):
|
|
||||||
endoff = len(self.data_file)
|
|
||||||
else:
|
|
||||||
endoff = self.sections[section + 1][0]
|
|
||||||
off = self.sections[section][0]
|
|
||||||
assert off + in_off + len(new) <= endoff
|
|
||||||
self.patch(off + in_off, new)
|
|
||||||
|
|
||||||
def parseDRM(self, data, count, pidlist):
|
|
||||||
found_key = None
|
|
||||||
keyvec1 = "\x72\x38\x33\xB0\xB4\xF2\xE3\xCA\xDF\x09\x01\xD6\xE2\xE0\x3F\x96"
|
|
||||||
for pid in pidlist:
|
|
||||||
bigpid = pid.ljust(16,'\0')
|
|
||||||
temp_key = PC1(keyvec1, bigpid, False)
|
|
||||||
temp_key_sum = sum(map(ord,temp_key)) & 0xff
|
|
||||||
found_key = None
|
|
||||||
for i in xrange(count):
|
|
||||||
verification, size, type, cksum, cookie = struct.unpack('>LLLBxxx32s', data[i*0x30:i*0x30+0x30])
|
|
||||||
if cksum == temp_key_sum:
|
|
||||||
cookie = PC1(temp_key, cookie)
|
|
||||||
ver,flags,finalkey,expiry,expiry2 = struct.unpack('>LL16sLL', cookie)
|
|
||||||
if verification == ver and (flags & 0x1F) == 1:
|
|
||||||
found_key = finalkey
|
|
||||||
break
|
|
||||||
if found_key != None:
|
|
||||||
break
|
break
|
||||||
if not found_key:
|
return foundat, result
|
||||||
# Then try the default encoding that doesn't require a PID
|
|
||||||
pid = "00000000"
|
|
||||||
temp_key = keyvec1
|
|
||||||
temp_key_sum = sum(map(ord,temp_key)) & 0xff
|
|
||||||
for i in xrange(count):
|
|
||||||
verification, size, type, cksum, cookie = struct.unpack('>LLLBxxx32s', data[i*0x30:i*0x30+0x30])
|
|
||||||
if cksum == temp_key_sum:
|
|
||||||
cookie = PC1(temp_key, cookie)
|
|
||||||
ver,flags,finalkey,expiry,expiry2 = struct.unpack('>LL16sLL', cookie)
|
|
||||||
if verification == ver:
|
|
||||||
found_key = finalkey
|
|
||||||
break
|
|
||||||
return [found_key,pid]
|
|
||||||
|
|
||||||
def processBook(self, pidlist):
|
# return list of start positions for the tagpath
|
||||||
crypto_type, = struct.unpack('>H', self.sect[0xC:0xC+2])
|
def posinDoc(self, tagpath):
|
||||||
print 'Crypto Type is: ', crypto_type
|
startpos = []
|
||||||
self.crypto_type = crypto_type
|
pos = 0
|
||||||
if crypto_type == 0:
|
res = ""
|
||||||
print "This book is not encrypted."
|
while res != None :
|
||||||
return self.data_file
|
(foundpos, res) = self.findinDoc(tagpath, pos, -1)
|
||||||
if crypto_type != 2 and crypto_type != 1:
|
if res != None :
|
||||||
raise DrmException("Cannot decode unknown Mobipocket encryption type %d" % crypto_type)
|
startpos.append(foundpos)
|
||||||
|
pos = foundpos + 1
|
||||||
|
return startpos
|
||||||
|
|
||||||
goodpids = []
|
def getData(self, path):
|
||||||
for pid in pidlist:
|
result = None
|
||||||
if len(pid)==10:
|
cnt = len(self.flatdoc)
|
||||||
if checksumPid(pid[0:-2]) != pid:
|
for j in xrange(cnt):
|
||||||
print "Warning: PID " + pid + " has incorrect checksum, should have been "+checksumPid(pid[0:-2])
|
item = self.flatdoc[j]
|
||||||
goodpids.append(pid[0:-2])
|
if item.find('=') >= 0:
|
||||||
elif len(pid)==8:
|
(name, argt) = item.split('=')
|
||||||
goodpids.append(pid)
|
argres = argt.split('|')
|
||||||
|
|
||||||
if self.crypto_type == 1:
|
|
||||||
t1_keyvec = "QDCVEPMU675RUBSZ"
|
|
||||||
if self.magic == 'TEXtREAd':
|
|
||||||
bookkey_data = self.sect[0x0E:0x0E+16]
|
|
||||||
else:
|
else:
|
||||||
bookkey_data = self.sect[0x90:0x90+16]
|
name = item
|
||||||
pid = "00000000"
|
argres = []
|
||||||
found_key = PC1(t1_keyvec, bookkey_data)
|
if (name.endswith(path)):
|
||||||
else :
|
result = argres
|
||||||
# calculate the keys
|
break
|
||||||
drm_ptr, drm_count, drm_size, drm_flags = struct.unpack('>LLLL', self.sect[0xA8:0xA8+16])
|
if (len(argres) > 0) :
|
||||||
if drm_count == 0:
|
for j in xrange(0,len(argres)):
|
||||||
raise DrmException("Not yet initialised with PID. Must be opened with Mobipocket Reader first.")
|
argres[j] = int(argres[j])
|
||||||
found_key, pid = self.parseDRM(self.sect[drm_ptr:drm_ptr+drm_size], drm_count, goodpids)
|
return result
|
||||||
if not found_key:
|
|
||||||
raise DrmException("No key found. Most likely the correct PID has not been given.")
|
|
||||||
# kill the drm keys
|
|
||||||
self.patchSection(0, "\0" * drm_size, drm_ptr)
|
|
||||||
# kill the drm pointers
|
|
||||||
self.patchSection(0, "\xff" * 4 + "\0" * 12, 0xA8)
|
|
||||||
|
|
||||||
if pid=="00000000":
|
def getDataatPos(self, path, pos):
|
||||||
print "File has default encryption, no specific PID."
|
result = None
|
||||||
|
item = self.flatdoc[pos]
|
||||||
|
if item.find('=') >= 0:
|
||||||
|
(name, argt) = item.split('=')
|
||||||
|
argres = argt.split('|')
|
||||||
else:
|
else:
|
||||||
print "File is encoded with PID "+checksumPid(pid)+"."
|
name = item
|
||||||
|
argres = []
|
||||||
|
if (len(argres) > 0) :
|
||||||
|
for j in xrange(0,len(argres)):
|
||||||
|
argres[j] = int(argres[j])
|
||||||
|
if (name.endswith(path)):
|
||||||
|
result = argres
|
||||||
|
return result
|
||||||
|
|
||||||
# clear the crypto type
|
def getDataTemp(self, path):
|
||||||
self.patchSection(0, "\0" * 2, 0xC)
|
result = None
|
||||||
|
cnt = len(self.temp)
|
||||||
|
for j in xrange(cnt):
|
||||||
|
item = self.temp[j]
|
||||||
|
if item.find('=') >= 0:
|
||||||
|
(name, argt) = item.split('=')
|
||||||
|
argres = argt.split('|')
|
||||||
|
else:
|
||||||
|
name = item
|
||||||
|
argres = []
|
||||||
|
if (name.endswith(path)):
|
||||||
|
result = argres
|
||||||
|
self.temp.pop(j)
|
||||||
|
break
|
||||||
|
if (len(argres) > 0) :
|
||||||
|
for j in xrange(0,len(argres)):
|
||||||
|
argres[j] = int(argres[j])
|
||||||
|
return result
|
||||||
|
|
||||||
# decrypt sections
|
def getImages(self):
|
||||||
print "Decrypting. Please wait . . .",
|
result = []
|
||||||
new_data = self.data_file[:self.sections[1][0]]
|
self.temp = self.flatdoc
|
||||||
for i in xrange(1, self.records+1):
|
while (self.getDataTemp('img') != None):
|
||||||
data = self.loadSection(i)
|
h = self.getDataTemp('img.h')[0]
|
||||||
extra_size = getSizeOfTrailingDataEntries(data, len(data), self.extra_data_flags)
|
w = self.getDataTemp('img.w')[0]
|
||||||
if i%100 == 0:
|
x = self.getDataTemp('img.x')[0]
|
||||||
print ".",
|
y = self.getDataTemp('img.y')[0]
|
||||||
# print "record %d, extra_size %d" %(i,extra_size)
|
src = self.getDataTemp('img.src')[0]
|
||||||
new_data += PC1(found_key, data[0:len(data) - extra_size])
|
result.append('<image xlink:href="../img/img%04d.jpg" x="%d" y="%d" width="%d" height="%d" />\n' % (src, x, y, w, h))
|
||||||
if extra_size > 0:
|
return result
|
||||||
new_data += data[-extra_size:]
|
|
||||||
if self.num_sections > self.records+1:
|
|
||||||
new_data += self.data_file[self.sections[self.records+1][0]:]
|
|
||||||
self.data_file = new_data
|
|
||||||
print "done"
|
|
||||||
return self.data_file
|
|
||||||
|
|
||||||
def getUnencryptedBook(infile,pid):
|
def getGlyphs(self):
|
||||||
if not os.path.isfile(infile):
|
result = []
|
||||||
raise DrmException('Input File Not Found')
|
if (self.gid != None) and (len(self.gid) > 0):
|
||||||
book = MobiBook(infile)
|
glyphs = []
|
||||||
return book.processBook([pid])
|
for j in set(self.gid):
|
||||||
|
glyphs.append(j)
|
||||||
|
glyphs.sort()
|
||||||
|
for gid in glyphs:
|
||||||
|
id='id="gl%d"' % gid
|
||||||
|
path = self.gd.lookup(id)
|
||||||
|
if path:
|
||||||
|
result.append(id + ' ' + path)
|
||||||
|
return result
|
||||||
|
|
||||||
def getUnencryptedBookWithList(infile,pidlist):
|
|
||||||
if not os.path.isfile(infile):
|
|
||||||
raise DrmException('Input File Not Found')
|
|
||||||
book = MobiBook(infile)
|
|
||||||
return book.processBook(pidlist)
|
|
||||||
|
|
||||||
def main(argv=sys.argv):
|
def convert2SVG(gdict, flat_xml, pageid, previd, nextid, svgDir, raw, meta_array, scaledpi):
|
||||||
print ('MobiDeDrm v%(__version__)s. '
|
mlst = []
|
||||||
'Copyright 2008-2010 The Dark Reverser.' % globals())
|
pp = PParser(gdict, flat_xml, meta_array)
|
||||||
if len(argv)<4:
|
mlst.append('<?xml version="1.0" standalone="no"?>\n')
|
||||||
print "Removes protection from Mobipocket books"
|
if (raw):
|
||||||
print "Usage:"
|
mlst.append('<!DOCTYPE svg PUBLIC "-//W3C/DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">\n')
|
||||||
print " %s <infile> <outfile> <Comma separated list of PIDs to try>" % sys.argv[0]
|
mlst.append('<svg width="%fin" height="%fin" viewBox="0 0 %d %d" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1">\n' % (pp.pw / scaledpi, pp.ph / scaledpi, pp.pw -1, pp.ph -1))
|
||||||
return 1
|
mlst.append('<title>Page %d - %s by %s</title>\n' % (pageid, meta_array['Title'],meta_array['Authors']))
|
||||||
else:
|
else:
|
||||||
infile = argv[1]
|
mlst.append('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n')
|
||||||
outfile = argv[2]
|
mlst.append('<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" ><head>\n')
|
||||||
pidlist = argv[3].split(',')
|
mlst.append('<title>Page %d - %s by %s</title>\n' % (pageid, meta_array['Title'],meta_array['Authors']))
|
||||||
try:
|
mlst.append('<script><![CDATA[\n')
|
||||||
stripped_file = getUnencryptedBookWithList(infile, pidlist)
|
mlst.append('function gd(){var p=window.location.href.replace(/^.*\?dpi=(\d+).*$/i,"$1");return p;}\n')
|
||||||
file(outfile, 'wb').write(stripped_file)
|
mlst.append('var dpi=%d;\n' % scaledpi)
|
||||||
except DrmException, e:
|
if (previd) :
|
||||||
print "Error: %s" % e
|
mlst.append('var prevpage="page%04d.xhtml";\n' % (previd))
|
||||||
return 1
|
if (nextid) :
|
||||||
return 0
|
mlst.append('var nextpage="page%04d.xhtml";\n' % (nextid))
|
||||||
|
mlst.append('var pw=%d;var ph=%d;' % (pp.pw, pp.ph))
|
||||||
|
mlst.append('function zoomin(){dpi=dpi*(0.8);setsize();}\n')
|
||||||
|
mlst.append('function zoomout(){dpi=dpi*1.25;setsize();}\n')
|
||||||
|
mlst.append('function setsize(){var svg=document.getElementById("svgimg");var prev=document.getElementById("prevsvg");var next=document.getElementById("nextsvg");var width=(pw/dpi)+"in";var height=(ph/dpi)+"in";svg.setAttribute("width",width);svg.setAttribute("height",height);prev.setAttribute("height",height);prev.setAttribute("width","50px");next.setAttribute("height",height);next.setAttribute("width","50px");}\n')
|
||||||
|
mlst.append('function ppage(){window.location.href=prevpage+"?dpi="+Math.round(dpi);}\n')
|
||||||
|
mlst.append('function npage(){window.location.href=nextpage+"?dpi="+Math.round(dpi);}\n')
|
||||||
|
mlst.append('var gt=gd();if(gt>0){dpi=gt;}\n')
|
||||||
|
mlst.append('window.onload=setsize;\n')
|
||||||
|
mlst.append(']]></script>\n')
|
||||||
|
mlst.append('</head>\n')
|
||||||
|
mlst.append('<body onLoad="setsize();" style="background-color:#777;text-align:center;">\n')
|
||||||
|
mlst.append('<div style="white-space:nowrap;">\n')
|
||||||
|
if previd == None:
|
||||||
|
mlst.append('<a href="javascript:ppage();"><svg id="prevsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"></svg></a>\n')
|
||||||
|
else:
|
||||||
|
mlst.append('<a href="javascript:ppage();"><svg id="prevsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"><polygon points="5,150,95,5,95,295" fill="#AAAAAA" /></svg></a>\n')
|
||||||
|
|
||||||
|
mlst.append('<a href="javascript:npage();"><svg id="svgimg" viewBox="0 0 %d %d" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" style="background-color:#FFF;border:1px solid black;">' % (pp.pw, pp.ph))
|
||||||
if __name__ == "__main__":
|
if (pp.gid != None):
|
||||||
sys.exit(main())
|
mlst.append('<defs>\n')
|
||||||
|
gdefs = pp.getGlyphs()
|
||||||
|
for j in xrange(0,len(gdefs)):
|
||||||
|
mlst.append(gdefs[j])
|
||||||
|
mlst.append('</defs>\n')
|
||||||
|
img = pp.getImages()
|
||||||
|
if (img != None):
|
||||||
|
for j in xrange(0,len(img)):
|
||||||
|
mlst.append(img[j])
|
||||||
|
if (pp.gid != None):
|
||||||
|
for j in xrange(0,len(pp.gid)):
|
||||||
|
mlst.append('<use xlink:href="#gl%d" x="%d" y="%d" />\n' % (pp.gid[j], pp.gx[j], pp.gy[j]))
|
||||||
|
if (img == None or len(img) == 0) and (pp.gid == None or len(pp.gid) == 0):
|
||||||
|
xpos = "%d" % (pp.pw // 3)
|
||||||
|
ypos = "%d" % (pp.ph // 3)
|
||||||
|
mlst.append('<text x="' + xpos + '" y="' + ypos + '" font-size="' + meta_array['fontSize'] + '" font-family="Helvetica" stroke="black">This page intentionally left blank.</text>\n')
|
||||||
|
if (raw) :
|
||||||
|
mlst.append('</svg>')
|
||||||
|
else :
|
||||||
|
mlst.append('</svg></a>\n')
|
||||||
|
if nextid == None:
|
||||||
|
mlst.append('<a href="javascript:npage();"><svg id="nextsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"></svg></a>\n')
|
||||||
|
else :
|
||||||
|
mlst.append('<a href="javascript:npage();"><svg id="nextsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"><polygon points="5,5,5,295,95,150" fill="#AAAAAA" /></svg></a>\n')
|
||||||
|
mlst.append('</div>\n')
|
||||||
|
mlst.append('<div><a href="javascript:zoomin();">zoom in</a> - <a href="javascript:zoomout();">zoom out</a></div>\n')
|
||||||
|
mlst.append('</body>\n')
|
||||||
|
mlst.append('</html>\n')
|
||||||
|
return "".join(mlst)
|
||||||
|
|||||||
Binary file not shown.
@@ -24,17 +24,17 @@
|
|||||||
<key>CFBundleExecutable</key>
|
<key>CFBundleExecutable</key>
|
||||||
<string>droplet</string>
|
<string>droplet</string>
|
||||||
<key>CFBundleGetInfoString</key>
|
<key>CFBundleGetInfoString</key>
|
||||||
<string>DeDRM 1.6, Copyright © 2010–2011 by Apprentice Alf.</string>
|
<string>DeDRM 5.0, Written 2010–2012 by Apprentice Alf and others.</string>
|
||||||
<key>CFBundleIconFile</key>
|
<key>CFBundleIconFile</key>
|
||||||
<string>droplet</string>
|
<string>droplet</string>
|
||||||
<key>CFBundleInfoDictionaryVersion</key>
|
<key>CFBundleInfoDictionaryVersion</key>
|
||||||
<string>6.0</string>
|
<string>6.0</string>
|
||||||
<key>CFBundleName</key>
|
<key>CFBundleName</key>
|
||||||
<string>DeDRM</string>
|
<string>DeDRM 5.0</string>
|
||||||
<key>CFBundlePackageType</key>
|
<key>CFBundlePackageType</key>
|
||||||
<string>APPL</string>
|
<string>APPL</string>
|
||||||
<key>CFBundleShortVersionString</key>
|
<key>CFBundleShortVersionString</key>
|
||||||
<string>1.6</string>
|
<string>5.0</string>
|
||||||
<key>CFBundleSignature</key>
|
<key>CFBundleSignature</key>
|
||||||
<string>dplt</string>
|
<string>dplt</string>
|
||||||
<key>LSMinimumSystemVersion</key>
|
<key>LSMinimumSystemVersion</key>
|
||||||
@@ -43,14 +43,18 @@
|
|||||||
<true/>
|
<true/>
|
||||||
<key>WindowState</key>
|
<key>WindowState</key>
|
||||||
<dict>
|
<dict>
|
||||||
|
<key>dividerCollapsed</key>
|
||||||
|
<false/>
|
||||||
|
<key>eventLogLevel</key>
|
||||||
|
<integer>-1</integer>
|
||||||
<key>name</key>
|
<key>name</key>
|
||||||
<string>ScriptWindowState</string>
|
<string>ScriptWindowState</string>
|
||||||
<key>positionOfDivider</key>
|
<key>positionOfDivider</key>
|
||||||
<real>686</real>
|
<real>460</real>
|
||||||
<key>savedFrame</key>
|
<key>savedFrame</key>
|
||||||
<string>2161 -75 907 765 1440 -150 1680 1050 </string>
|
<string>-2 132 1316 746 0 0 1440 878 </string>
|
||||||
<key>selectedTabView</key>
|
<key>selectedTabView</key>
|
||||||
<string>result</string>
|
<string>event log</string>
|
||||||
</dict>
|
</dict>
|
||||||
</dict>
|
</dict>
|
||||||
</plist>
|
</plist>
|
||||||
|
|||||||
Binary file not shown.
@@ -0,0 +1,568 @@
|
|||||||
|
#! /usr/bin/env python
|
||||||
|
|
||||||
|
"""
|
||||||
|
Routines for doing AES CBC in one file
|
||||||
|
|
||||||
|
Modified by some_updates to extract
|
||||||
|
and combine only those parts needed for AES CBC
|
||||||
|
into one simple to add python file
|
||||||
|
|
||||||
|
Original Version
|
||||||
|
Copyright (c) 2002 by Paul A. Lambert
|
||||||
|
Under:
|
||||||
|
CryptoPy Artisitic License Version 1.0
|
||||||
|
See the wonderful pure python package cryptopy-1.2.5
|
||||||
|
and read its LICENSE.txt for complete license details.
|
||||||
|
"""
|
||||||
|
|
||||||
|
class CryptoError(Exception):
|
||||||
|
""" Base class for crypto exceptions """
|
||||||
|
def __init__(self,errorMessage='Error!'):
|
||||||
|
self.message = errorMessage
|
||||||
|
def __str__(self):
|
||||||
|
return self.message
|
||||||
|
|
||||||
|
class InitCryptoError(CryptoError):
|
||||||
|
""" Crypto errors during algorithm initialization """
|
||||||
|
class BadKeySizeError(InitCryptoError):
|
||||||
|
""" Bad key size error """
|
||||||
|
class EncryptError(CryptoError):
|
||||||
|
""" Error in encryption processing """
|
||||||
|
class DecryptError(CryptoError):
|
||||||
|
""" Error in decryption processing """
|
||||||
|
class DecryptNotBlockAlignedError(DecryptError):
|
||||||
|
""" Error in decryption processing """
|
||||||
|
|
||||||
|
def xorS(a,b):
|
||||||
|
""" XOR two strings """
|
||||||
|
assert len(a)==len(b)
|
||||||
|
x = []
|
||||||
|
for i in range(len(a)):
|
||||||
|
x.append( chr(ord(a[i])^ord(b[i])))
|
||||||
|
return ''.join(x)
|
||||||
|
|
||||||
|
def xor(a,b):
|
||||||
|
""" XOR two strings """
|
||||||
|
x = []
|
||||||
|
for i in range(min(len(a),len(b))):
|
||||||
|
x.append( chr(ord(a[i])^ord(b[i])))
|
||||||
|
return ''.join(x)
|
||||||
|
|
||||||
|
"""
|
||||||
|
Base 'BlockCipher' and Pad classes for cipher instances.
|
||||||
|
BlockCipher supports automatic padding and type conversion. The BlockCipher
|
||||||
|
class was written to make the actual algorithm code more readable and
|
||||||
|
not for performance.
|
||||||
|
"""
|
||||||
|
|
||||||
|
class BlockCipher:
|
||||||
|
""" Block ciphers """
|
||||||
|
def __init__(self):
|
||||||
|
self.reset()
|
||||||
|
|
||||||
|
def reset(self):
|
||||||
|
self.resetEncrypt()
|
||||||
|
self.resetDecrypt()
|
||||||
|
def resetEncrypt(self):
|
||||||
|
self.encryptBlockCount = 0
|
||||||
|
self.bytesToEncrypt = ''
|
||||||
|
def resetDecrypt(self):
|
||||||
|
self.decryptBlockCount = 0
|
||||||
|
self.bytesToDecrypt = ''
|
||||||
|
|
||||||
|
def encrypt(self, plainText, more = None):
|
||||||
|
""" Encrypt a string and return a binary string """
|
||||||
|
self.bytesToEncrypt += plainText # append plainText to any bytes from prior encrypt
|
||||||
|
numBlocks, numExtraBytes = divmod(len(self.bytesToEncrypt), self.blockSize)
|
||||||
|
cipherText = ''
|
||||||
|
for i in range(numBlocks):
|
||||||
|
bStart = i*self.blockSize
|
||||||
|
ctBlock = self.encryptBlock(self.bytesToEncrypt[bStart:bStart+self.blockSize])
|
||||||
|
self.encryptBlockCount += 1
|
||||||
|
cipherText += ctBlock
|
||||||
|
if numExtraBytes > 0: # save any bytes that are not block aligned
|
||||||
|
self.bytesToEncrypt = self.bytesToEncrypt[-numExtraBytes:]
|
||||||
|
else:
|
||||||
|
self.bytesToEncrypt = ''
|
||||||
|
|
||||||
|
if more == None: # no more data expected from caller
|
||||||
|
finalBytes = self.padding.addPad(self.bytesToEncrypt,self.blockSize)
|
||||||
|
if len(finalBytes) > 0:
|
||||||
|
ctBlock = self.encryptBlock(finalBytes)
|
||||||
|
self.encryptBlockCount += 1
|
||||||
|
cipherText += ctBlock
|
||||||
|
self.resetEncrypt()
|
||||||
|
return cipherText
|
||||||
|
|
||||||
|
def decrypt(self, cipherText, more = None):
|
||||||
|
""" Decrypt a string and return a string """
|
||||||
|
self.bytesToDecrypt += cipherText # append to any bytes from prior decrypt
|
||||||
|
|
||||||
|
numBlocks, numExtraBytes = divmod(len(self.bytesToDecrypt), self.blockSize)
|
||||||
|
if more == None: # no more calls to decrypt, should have all the data
|
||||||
|
if numExtraBytes != 0:
|
||||||
|
raise DecryptNotBlockAlignedError, 'Data not block aligned on decrypt'
|
||||||
|
|
||||||
|
# hold back some bytes in case last decrypt has zero len
|
||||||
|
if (more != None) and (numExtraBytes == 0) and (numBlocks >0) :
|
||||||
|
numBlocks -= 1
|
||||||
|
numExtraBytes = self.blockSize
|
||||||
|
|
||||||
|
plainText = ''
|
||||||
|
for i in range(numBlocks):
|
||||||
|
bStart = i*self.blockSize
|
||||||
|
ptBlock = self.decryptBlock(self.bytesToDecrypt[bStart : bStart+self.blockSize])
|
||||||
|
self.decryptBlockCount += 1
|
||||||
|
plainText += ptBlock
|
||||||
|
|
||||||
|
if numExtraBytes > 0: # save any bytes that are not block aligned
|
||||||
|
self.bytesToEncrypt = self.bytesToEncrypt[-numExtraBytes:]
|
||||||
|
else:
|
||||||
|
self.bytesToEncrypt = ''
|
||||||
|
|
||||||
|
if more == None: # last decrypt remove padding
|
||||||
|
plainText = self.padding.removePad(plainText, self.blockSize)
|
||||||
|
self.resetDecrypt()
|
||||||
|
return plainText
|
||||||
|
|
||||||
|
|
||||||
|
class Pad:
|
||||||
|
def __init__(self):
|
||||||
|
pass # eventually could put in calculation of min and max size extension
|
||||||
|
|
||||||
|
class padWithPadLen(Pad):
|
||||||
|
""" Pad a binary string with the length of the padding """
|
||||||
|
|
||||||
|
def addPad(self, extraBytes, blockSize):
|
||||||
|
""" Add padding to a binary string to make it an even multiple
|
||||||
|
of the block size """
|
||||||
|
blocks, numExtraBytes = divmod(len(extraBytes), blockSize)
|
||||||
|
padLength = blockSize - numExtraBytes
|
||||||
|
return extraBytes + padLength*chr(padLength)
|
||||||
|
|
||||||
|
def removePad(self, paddedBinaryString, blockSize):
|
||||||
|
""" Remove padding from a binary string """
|
||||||
|
if not(0<len(paddedBinaryString)):
|
||||||
|
raise DecryptNotBlockAlignedError, 'Expected More Data'
|
||||||
|
return paddedBinaryString[:-ord(paddedBinaryString[-1])]
|
||||||
|
|
||||||
|
class noPadding(Pad):
|
||||||
|
""" No padding. Use this to get ECB behavior from encrypt/decrypt """
|
||||||
|
|
||||||
|
def addPad(self, extraBytes, blockSize):
|
||||||
|
""" Add no padding """
|
||||||
|
return extraBytes
|
||||||
|
|
||||||
|
def removePad(self, paddedBinaryString, blockSize):
|
||||||
|
""" Remove no padding """
|
||||||
|
return paddedBinaryString
|
||||||
|
|
||||||
|
"""
|
||||||
|
Rijndael encryption algorithm
|
||||||
|
This byte oriented implementation is intended to closely
|
||||||
|
match FIPS specification for readability. It is not implemented
|
||||||
|
for performance.
|
||||||
|
"""
|
||||||
|
|
||||||
|
class Rijndael(BlockCipher):
|
||||||
|
""" Rijndael encryption algorithm """
|
||||||
|
def __init__(self, key = None, padding = padWithPadLen(), keySize=16, blockSize=16 ):
|
||||||
|
self.name = 'RIJNDAEL'
|
||||||
|
self.keySize = keySize
|
||||||
|
self.strength = keySize*8
|
||||||
|
self.blockSize = blockSize # blockSize is in bytes
|
||||||
|
self.padding = padding # change default to noPadding() to get normal ECB behavior
|
||||||
|
|
||||||
|
assert( keySize%4==0 and NrTable[4].has_key(keySize/4)),'key size must be 16,20,24,29 or 32 bytes'
|
||||||
|
assert( blockSize%4==0 and NrTable.has_key(blockSize/4)), 'block size must be 16,20,24,29 or 32 bytes'
|
||||||
|
|
||||||
|
self.Nb = self.blockSize/4 # Nb is number of columns of 32 bit words
|
||||||
|
self.Nk = keySize/4 # Nk is the key length in 32-bit words
|
||||||
|
self.Nr = NrTable[self.Nb][self.Nk] # The number of rounds (Nr) is a function of
|
||||||
|
# the block (Nb) and key (Nk) sizes.
|
||||||
|
if key != None:
|
||||||
|
self.setKey(key)
|
||||||
|
|
||||||
|
def setKey(self, key):
|
||||||
|
""" Set a key and generate the expanded key """
|
||||||
|
assert( len(key) == (self.Nk*4) ), 'Key length must be same as keySize parameter'
|
||||||
|
self.__expandedKey = keyExpansion(self, key)
|
||||||
|
self.reset() # BlockCipher.reset()
|
||||||
|
|
||||||
|
def encryptBlock(self, plainTextBlock):
|
||||||
|
""" Encrypt a block, plainTextBlock must be a array of bytes [Nb by 4] """
|
||||||
|
self.state = self._toBlock(plainTextBlock)
|
||||||
|
AddRoundKey(self, self.__expandedKey[0:self.Nb])
|
||||||
|
for round in range(1,self.Nr): #for round = 1 step 1 to Nr
|
||||||
|
SubBytes(self)
|
||||||
|
ShiftRows(self)
|
||||||
|
MixColumns(self)
|
||||||
|
AddRoundKey(self, self.__expandedKey[round*self.Nb:(round+1)*self.Nb])
|
||||||
|
SubBytes(self)
|
||||||
|
ShiftRows(self)
|
||||||
|
AddRoundKey(self, self.__expandedKey[self.Nr*self.Nb:(self.Nr+1)*self.Nb])
|
||||||
|
return self._toBString(self.state)
|
||||||
|
|
||||||
|
|
||||||
|
def decryptBlock(self, encryptedBlock):
|
||||||
|
""" decrypt a block (array of bytes) """
|
||||||
|
self.state = self._toBlock(encryptedBlock)
|
||||||
|
AddRoundKey(self, self.__expandedKey[self.Nr*self.Nb:(self.Nr+1)*self.Nb])
|
||||||
|
for round in range(self.Nr-1,0,-1):
|
||||||
|
InvShiftRows(self)
|
||||||
|
InvSubBytes(self)
|
||||||
|
AddRoundKey(self, self.__expandedKey[round*self.Nb:(round+1)*self.Nb])
|
||||||
|
InvMixColumns(self)
|
||||||
|
InvShiftRows(self)
|
||||||
|
InvSubBytes(self)
|
||||||
|
AddRoundKey(self, self.__expandedKey[0:self.Nb])
|
||||||
|
return self._toBString(self.state)
|
||||||
|
|
||||||
|
def _toBlock(self, bs):
|
||||||
|
""" Convert binary string to array of bytes, state[col][row]"""
|
||||||
|
assert ( len(bs) == 4*self.Nb ), 'Rijndarl blocks must be of size blockSize'
|
||||||
|
return [[ord(bs[4*i]),ord(bs[4*i+1]),ord(bs[4*i+2]),ord(bs[4*i+3])] for i in range(self.Nb)]
|
||||||
|
|
||||||
|
def _toBString(self, block):
|
||||||
|
""" Convert block (array of bytes) to binary string """
|
||||||
|
l = []
|
||||||
|
for col in block:
|
||||||
|
for rowElement in col:
|
||||||
|
l.append(chr(rowElement))
|
||||||
|
return ''.join(l)
|
||||||
|
#-------------------------------------
|
||||||
|
""" Number of rounds Nr = NrTable[Nb][Nk]
|
||||||
|
|
||||||
|
Nb Nk=4 Nk=5 Nk=6 Nk=7 Nk=8
|
||||||
|
------------------------------------- """
|
||||||
|
NrTable = {4: {4:10, 5:11, 6:12, 7:13, 8:14},
|
||||||
|
5: {4:11, 5:11, 6:12, 7:13, 8:14},
|
||||||
|
6: {4:12, 5:12, 6:12, 7:13, 8:14},
|
||||||
|
7: {4:13, 5:13, 6:13, 7:13, 8:14},
|
||||||
|
8: {4:14, 5:14, 6:14, 7:14, 8:14}}
|
||||||
|
#-------------------------------------
|
||||||
|
def keyExpansion(algInstance, keyString):
|
||||||
|
""" Expand a string of size keySize into a larger array """
|
||||||
|
Nk, Nb, Nr = algInstance.Nk, algInstance.Nb, algInstance.Nr # for readability
|
||||||
|
key = [ord(byte) for byte in keyString] # convert string to list
|
||||||
|
w = [[key[4*i],key[4*i+1],key[4*i+2],key[4*i+3]] for i in range(Nk)]
|
||||||
|
for i in range(Nk,Nb*(Nr+1)):
|
||||||
|
temp = w[i-1] # a four byte column
|
||||||
|
if (i%Nk) == 0 :
|
||||||
|
temp = temp[1:]+[temp[0]] # RotWord(temp)
|
||||||
|
temp = [ Sbox[byte] for byte in temp ]
|
||||||
|
temp[0] ^= Rcon[i/Nk]
|
||||||
|
elif Nk > 6 and i%Nk == 4 :
|
||||||
|
temp = [ Sbox[byte] for byte in temp ] # SubWord(temp)
|
||||||
|
w.append( [ w[i-Nk][byte]^temp[byte] for byte in range(4) ] )
|
||||||
|
return w
|
||||||
|
|
||||||
|
Rcon = (0,0x01,0x02,0x04,0x08,0x10,0x20,0x40,0x80,0x1b,0x36, # note extra '0' !!!
|
||||||
|
0x6c,0xd8,0xab,0x4d,0x9a,0x2f,0x5e,0xbc,0x63,0xc6,
|
||||||
|
0x97,0x35,0x6a,0xd4,0xb3,0x7d,0xfa,0xef,0xc5,0x91)
|
||||||
|
|
||||||
|
#-------------------------------------
|
||||||
|
def AddRoundKey(algInstance, keyBlock):
|
||||||
|
""" XOR the algorithm state with a block of key material """
|
||||||
|
for column in range(algInstance.Nb):
|
||||||
|
for row in range(4):
|
||||||
|
algInstance.state[column][row] ^= keyBlock[column][row]
|
||||||
|
#-------------------------------------
|
||||||
|
|
||||||
|
def SubBytes(algInstance):
|
||||||
|
for column in range(algInstance.Nb):
|
||||||
|
for row in range(4):
|
||||||
|
algInstance.state[column][row] = Sbox[algInstance.state[column][row]]
|
||||||
|
|
||||||
|
def InvSubBytes(algInstance):
|
||||||
|
for column in range(algInstance.Nb):
|
||||||
|
for row in range(4):
|
||||||
|
algInstance.state[column][row] = InvSbox[algInstance.state[column][row]]
|
||||||
|
|
||||||
|
Sbox = (0x63,0x7c,0x77,0x7b,0xf2,0x6b,0x6f,0xc5,
|
||||||
|
0x30,0x01,0x67,0x2b,0xfe,0xd7,0xab,0x76,
|
||||||
|
0xca,0x82,0xc9,0x7d,0xfa,0x59,0x47,0xf0,
|
||||||
|
0xad,0xd4,0xa2,0xaf,0x9c,0xa4,0x72,0xc0,
|
||||||
|
0xb7,0xfd,0x93,0x26,0x36,0x3f,0xf7,0xcc,
|
||||||
|
0x34,0xa5,0xe5,0xf1,0x71,0xd8,0x31,0x15,
|
||||||
|
0x04,0xc7,0x23,0xc3,0x18,0x96,0x05,0x9a,
|
||||||
|
0x07,0x12,0x80,0xe2,0xeb,0x27,0xb2,0x75,
|
||||||
|
0x09,0x83,0x2c,0x1a,0x1b,0x6e,0x5a,0xa0,
|
||||||
|
0x52,0x3b,0xd6,0xb3,0x29,0xe3,0x2f,0x84,
|
||||||
|
0x53,0xd1,0x00,0xed,0x20,0xfc,0xb1,0x5b,
|
||||||
|
0x6a,0xcb,0xbe,0x39,0x4a,0x4c,0x58,0xcf,
|
||||||
|
0xd0,0xef,0xaa,0xfb,0x43,0x4d,0x33,0x85,
|
||||||
|
0x45,0xf9,0x02,0x7f,0x50,0x3c,0x9f,0xa8,
|
||||||
|
0x51,0xa3,0x40,0x8f,0x92,0x9d,0x38,0xf5,
|
||||||
|
0xbc,0xb6,0xda,0x21,0x10,0xff,0xf3,0xd2,
|
||||||
|
0xcd,0x0c,0x13,0xec,0x5f,0x97,0x44,0x17,
|
||||||
|
0xc4,0xa7,0x7e,0x3d,0x64,0x5d,0x19,0x73,
|
||||||
|
0x60,0x81,0x4f,0xdc,0x22,0x2a,0x90,0x88,
|
||||||
|
0x46,0xee,0xb8,0x14,0xde,0x5e,0x0b,0xdb,
|
||||||
|
0xe0,0x32,0x3a,0x0a,0x49,0x06,0x24,0x5c,
|
||||||
|
0xc2,0xd3,0xac,0x62,0x91,0x95,0xe4,0x79,
|
||||||
|
0xe7,0xc8,0x37,0x6d,0x8d,0xd5,0x4e,0xa9,
|
||||||
|
0x6c,0x56,0xf4,0xea,0x65,0x7a,0xae,0x08,
|
||||||
|
0xba,0x78,0x25,0x2e,0x1c,0xa6,0xb4,0xc6,
|
||||||
|
0xe8,0xdd,0x74,0x1f,0x4b,0xbd,0x8b,0x8a,
|
||||||
|
0x70,0x3e,0xb5,0x66,0x48,0x03,0xf6,0x0e,
|
||||||
|
0x61,0x35,0x57,0xb9,0x86,0xc1,0x1d,0x9e,
|
||||||
|
0xe1,0xf8,0x98,0x11,0x69,0xd9,0x8e,0x94,
|
||||||
|
0x9b,0x1e,0x87,0xe9,0xce,0x55,0x28,0xdf,
|
||||||
|
0x8c,0xa1,0x89,0x0d,0xbf,0xe6,0x42,0x68,
|
||||||
|
0x41,0x99,0x2d,0x0f,0xb0,0x54,0xbb,0x16)
|
||||||
|
|
||||||
|
InvSbox = (0x52,0x09,0x6a,0xd5,0x30,0x36,0xa5,0x38,
|
||||||
|
0xbf,0x40,0xa3,0x9e,0x81,0xf3,0xd7,0xfb,
|
||||||
|
0x7c,0xe3,0x39,0x82,0x9b,0x2f,0xff,0x87,
|
||||||
|
0x34,0x8e,0x43,0x44,0xc4,0xde,0xe9,0xcb,
|
||||||
|
0x54,0x7b,0x94,0x32,0xa6,0xc2,0x23,0x3d,
|
||||||
|
0xee,0x4c,0x95,0x0b,0x42,0xfa,0xc3,0x4e,
|
||||||
|
0x08,0x2e,0xa1,0x66,0x28,0xd9,0x24,0xb2,
|
||||||
|
0x76,0x5b,0xa2,0x49,0x6d,0x8b,0xd1,0x25,
|
||||||
|
0x72,0xf8,0xf6,0x64,0x86,0x68,0x98,0x16,
|
||||||
|
0xd4,0xa4,0x5c,0xcc,0x5d,0x65,0xb6,0x92,
|
||||||
|
0x6c,0x70,0x48,0x50,0xfd,0xed,0xb9,0xda,
|
||||||
|
0x5e,0x15,0x46,0x57,0xa7,0x8d,0x9d,0x84,
|
||||||
|
0x90,0xd8,0xab,0x00,0x8c,0xbc,0xd3,0x0a,
|
||||||
|
0xf7,0xe4,0x58,0x05,0xb8,0xb3,0x45,0x06,
|
||||||
|
0xd0,0x2c,0x1e,0x8f,0xca,0x3f,0x0f,0x02,
|
||||||
|
0xc1,0xaf,0xbd,0x03,0x01,0x13,0x8a,0x6b,
|
||||||
|
0x3a,0x91,0x11,0x41,0x4f,0x67,0xdc,0xea,
|
||||||
|
0x97,0xf2,0xcf,0xce,0xf0,0xb4,0xe6,0x73,
|
||||||
|
0x96,0xac,0x74,0x22,0xe7,0xad,0x35,0x85,
|
||||||
|
0xe2,0xf9,0x37,0xe8,0x1c,0x75,0xdf,0x6e,
|
||||||
|
0x47,0xf1,0x1a,0x71,0x1d,0x29,0xc5,0x89,
|
||||||
|
0x6f,0xb7,0x62,0x0e,0xaa,0x18,0xbe,0x1b,
|
||||||
|
0xfc,0x56,0x3e,0x4b,0xc6,0xd2,0x79,0x20,
|
||||||
|
0x9a,0xdb,0xc0,0xfe,0x78,0xcd,0x5a,0xf4,
|
||||||
|
0x1f,0xdd,0xa8,0x33,0x88,0x07,0xc7,0x31,
|
||||||
|
0xb1,0x12,0x10,0x59,0x27,0x80,0xec,0x5f,
|
||||||
|
0x60,0x51,0x7f,0xa9,0x19,0xb5,0x4a,0x0d,
|
||||||
|
0x2d,0xe5,0x7a,0x9f,0x93,0xc9,0x9c,0xef,
|
||||||
|
0xa0,0xe0,0x3b,0x4d,0xae,0x2a,0xf5,0xb0,
|
||||||
|
0xc8,0xeb,0xbb,0x3c,0x83,0x53,0x99,0x61,
|
||||||
|
0x17,0x2b,0x04,0x7e,0xba,0x77,0xd6,0x26,
|
||||||
|
0xe1,0x69,0x14,0x63,0x55,0x21,0x0c,0x7d)
|
||||||
|
|
||||||
|
#-------------------------------------
|
||||||
|
""" For each block size (Nb), the ShiftRow operation shifts row i
|
||||||
|
by the amount Ci. Note that row 0 is not shifted.
|
||||||
|
Nb C1 C2 C3
|
||||||
|
------------------- """
|
||||||
|
shiftOffset = { 4 : ( 0, 1, 2, 3),
|
||||||
|
5 : ( 0, 1, 2, 3),
|
||||||
|
6 : ( 0, 1, 2, 3),
|
||||||
|
7 : ( 0, 1, 2, 4),
|
||||||
|
8 : ( 0, 1, 3, 4) }
|
||||||
|
def ShiftRows(algInstance):
|
||||||
|
tmp = [0]*algInstance.Nb # list of size Nb
|
||||||
|
for r in range(1,4): # row 0 reamains unchanged and can be skipped
|
||||||
|
for c in range(algInstance.Nb):
|
||||||
|
tmp[c] = algInstance.state[(c+shiftOffset[algInstance.Nb][r]) % algInstance.Nb][r]
|
||||||
|
for c in range(algInstance.Nb):
|
||||||
|
algInstance.state[c][r] = tmp[c]
|
||||||
|
def InvShiftRows(algInstance):
|
||||||
|
tmp = [0]*algInstance.Nb # list of size Nb
|
||||||
|
for r in range(1,4): # row 0 reamains unchanged and can be skipped
|
||||||
|
for c in range(algInstance.Nb):
|
||||||
|
tmp[c] = algInstance.state[(c+algInstance.Nb-shiftOffset[algInstance.Nb][r]) % algInstance.Nb][r]
|
||||||
|
for c in range(algInstance.Nb):
|
||||||
|
algInstance.state[c][r] = tmp[c]
|
||||||
|
#-------------------------------------
|
||||||
|
def MixColumns(a):
|
||||||
|
Sprime = [0,0,0,0]
|
||||||
|
for j in range(a.Nb): # for each column
|
||||||
|
Sprime[0] = mul(2,a.state[j][0])^mul(3,a.state[j][1])^mul(1,a.state[j][2])^mul(1,a.state[j][3])
|
||||||
|
Sprime[1] = mul(1,a.state[j][0])^mul(2,a.state[j][1])^mul(3,a.state[j][2])^mul(1,a.state[j][3])
|
||||||
|
Sprime[2] = mul(1,a.state[j][0])^mul(1,a.state[j][1])^mul(2,a.state[j][2])^mul(3,a.state[j][3])
|
||||||
|
Sprime[3] = mul(3,a.state[j][0])^mul(1,a.state[j][1])^mul(1,a.state[j][2])^mul(2,a.state[j][3])
|
||||||
|
for i in range(4):
|
||||||
|
a.state[j][i] = Sprime[i]
|
||||||
|
|
||||||
|
def InvMixColumns(a):
|
||||||
|
""" Mix the four bytes of every column in a linear way
|
||||||
|
This is the opposite operation of Mixcolumn """
|
||||||
|
Sprime = [0,0,0,0]
|
||||||
|
for j in range(a.Nb): # for each column
|
||||||
|
Sprime[0] = mul(0x0E,a.state[j][0])^mul(0x0B,a.state[j][1])^mul(0x0D,a.state[j][2])^mul(0x09,a.state[j][3])
|
||||||
|
Sprime[1] = mul(0x09,a.state[j][0])^mul(0x0E,a.state[j][1])^mul(0x0B,a.state[j][2])^mul(0x0D,a.state[j][3])
|
||||||
|
Sprime[2] = mul(0x0D,a.state[j][0])^mul(0x09,a.state[j][1])^mul(0x0E,a.state[j][2])^mul(0x0B,a.state[j][3])
|
||||||
|
Sprime[3] = mul(0x0B,a.state[j][0])^mul(0x0D,a.state[j][1])^mul(0x09,a.state[j][2])^mul(0x0E,a.state[j][3])
|
||||||
|
for i in range(4):
|
||||||
|
a.state[j][i] = Sprime[i]
|
||||||
|
|
||||||
|
#-------------------------------------
|
||||||
|
def mul(a, b):
|
||||||
|
""" Multiply two elements of GF(2^m)
|
||||||
|
needed for MixColumn and InvMixColumn """
|
||||||
|
if (a !=0 and b!=0):
|
||||||
|
return Alogtable[(Logtable[a] + Logtable[b])%255]
|
||||||
|
else:
|
||||||
|
return 0
|
||||||
|
|
||||||
|
Logtable = ( 0, 0, 25, 1, 50, 2, 26, 198, 75, 199, 27, 104, 51, 238, 223, 3,
|
||||||
|
100, 4, 224, 14, 52, 141, 129, 239, 76, 113, 8, 200, 248, 105, 28, 193,
|
||||||
|
125, 194, 29, 181, 249, 185, 39, 106, 77, 228, 166, 114, 154, 201, 9, 120,
|
||||||
|
101, 47, 138, 5, 33, 15, 225, 36, 18, 240, 130, 69, 53, 147, 218, 142,
|
||||||
|
150, 143, 219, 189, 54, 208, 206, 148, 19, 92, 210, 241, 64, 70, 131, 56,
|
||||||
|
102, 221, 253, 48, 191, 6, 139, 98, 179, 37, 226, 152, 34, 136, 145, 16,
|
||||||
|
126, 110, 72, 195, 163, 182, 30, 66, 58, 107, 40, 84, 250, 133, 61, 186,
|
||||||
|
43, 121, 10, 21, 155, 159, 94, 202, 78, 212, 172, 229, 243, 115, 167, 87,
|
||||||
|
175, 88, 168, 80, 244, 234, 214, 116, 79, 174, 233, 213, 231, 230, 173, 232,
|
||||||
|
44, 215, 117, 122, 235, 22, 11, 245, 89, 203, 95, 176, 156, 169, 81, 160,
|
||||||
|
127, 12, 246, 111, 23, 196, 73, 236, 216, 67, 31, 45, 164, 118, 123, 183,
|
||||||
|
204, 187, 62, 90, 251, 96, 177, 134, 59, 82, 161, 108, 170, 85, 41, 157,
|
||||||
|
151, 178, 135, 144, 97, 190, 220, 252, 188, 149, 207, 205, 55, 63, 91, 209,
|
||||||
|
83, 57, 132, 60, 65, 162, 109, 71, 20, 42, 158, 93, 86, 242, 211, 171,
|
||||||
|
68, 17, 146, 217, 35, 32, 46, 137, 180, 124, 184, 38, 119, 153, 227, 165,
|
||||||
|
103, 74, 237, 222, 197, 49, 254, 24, 13, 99, 140, 128, 192, 247, 112, 7)
|
||||||
|
|
||||||
|
Alogtable= ( 1, 3, 5, 15, 17, 51, 85, 255, 26, 46, 114, 150, 161, 248, 19, 53,
|
||||||
|
95, 225, 56, 72, 216, 115, 149, 164, 247, 2, 6, 10, 30, 34, 102, 170,
|
||||||
|
229, 52, 92, 228, 55, 89, 235, 38, 106, 190, 217, 112, 144, 171, 230, 49,
|
||||||
|
83, 245, 4, 12, 20, 60, 68, 204, 79, 209, 104, 184, 211, 110, 178, 205,
|
||||||
|
76, 212, 103, 169, 224, 59, 77, 215, 98, 166, 241, 8, 24, 40, 120, 136,
|
||||||
|
131, 158, 185, 208, 107, 189, 220, 127, 129, 152, 179, 206, 73, 219, 118, 154,
|
||||||
|
181, 196, 87, 249, 16, 48, 80, 240, 11, 29, 39, 105, 187, 214, 97, 163,
|
||||||
|
254, 25, 43, 125, 135, 146, 173, 236, 47, 113, 147, 174, 233, 32, 96, 160,
|
||||||
|
251, 22, 58, 78, 210, 109, 183, 194, 93, 231, 50, 86, 250, 21, 63, 65,
|
||||||
|
195, 94, 226, 61, 71, 201, 64, 192, 91, 237, 44, 116, 156, 191, 218, 117,
|
||||||
|
159, 186, 213, 100, 172, 239, 42, 126, 130, 157, 188, 223, 122, 142, 137, 128,
|
||||||
|
155, 182, 193, 88, 232, 35, 101, 175, 234, 37, 111, 177, 200, 67, 197, 84,
|
||||||
|
252, 31, 33, 99, 165, 244, 7, 9, 27, 45, 119, 153, 176, 203, 70, 202,
|
||||||
|
69, 207, 74, 222, 121, 139, 134, 145, 168, 227, 62, 66, 198, 81, 243, 14,
|
||||||
|
18, 54, 90, 238, 41, 123, 141, 140, 143, 138, 133, 148, 167, 242, 13, 23,
|
||||||
|
57, 75, 221, 124, 132, 151, 162, 253, 28, 36, 108, 180, 199, 82, 246, 1)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
AES Encryption Algorithm
|
||||||
|
The AES algorithm is just Rijndael algorithm restricted to the default
|
||||||
|
blockSize of 128 bits.
|
||||||
|
"""
|
||||||
|
|
||||||
|
class AES(Rijndael):
|
||||||
|
""" The AES algorithm is the Rijndael block cipher restricted to block
|
||||||
|
sizes of 128 bits and key sizes of 128, 192 or 256 bits
|
||||||
|
"""
|
||||||
|
def __init__(self, key = None, padding = padWithPadLen(), keySize=16):
|
||||||
|
""" Initialize AES, keySize is in bytes """
|
||||||
|
if not (keySize == 16 or keySize == 24 or keySize == 32) :
|
||||||
|
raise BadKeySizeError, 'Illegal AES key size, must be 16, 24, or 32 bytes'
|
||||||
|
|
||||||
|
Rijndael.__init__( self, key, padding=padding, keySize=keySize, blockSize=16 )
|
||||||
|
|
||||||
|
self.name = 'AES'
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
CBC mode of encryption for block ciphers.
|
||||||
|
This algorithm mode wraps any BlockCipher to make a
|
||||||
|
Cipher Block Chaining mode.
|
||||||
|
"""
|
||||||
|
from random import Random # should change to crypto.random!!!
|
||||||
|
|
||||||
|
|
||||||
|
class CBC(BlockCipher):
|
||||||
|
""" The CBC class wraps block ciphers to make cipher block chaining (CBC) mode
|
||||||
|
algorithms. The initialization (IV) is automatic if set to None. Padding
|
||||||
|
is also automatic based on the Pad class used to initialize the algorithm
|
||||||
|
"""
|
||||||
|
def __init__(self, blockCipherInstance, padding = padWithPadLen()):
|
||||||
|
""" CBC algorithms are created by initializing with a BlockCipher instance """
|
||||||
|
self.baseCipher = blockCipherInstance
|
||||||
|
self.name = self.baseCipher.name + '_CBC'
|
||||||
|
self.blockSize = self.baseCipher.blockSize
|
||||||
|
self.keySize = self.baseCipher.keySize
|
||||||
|
self.padding = padding
|
||||||
|
self.baseCipher.padding = noPadding() # baseCipher should NOT pad!!
|
||||||
|
self.r = Random() # for IV generation, currently uses
|
||||||
|
# mediocre standard distro version <----------------
|
||||||
|
import time
|
||||||
|
newSeed = time.ctime()+str(self.r) # seed with instance location
|
||||||
|
self.r.seed(newSeed) # to make unique
|
||||||
|
self.reset()
|
||||||
|
|
||||||
|
def setKey(self, key):
|
||||||
|
self.baseCipher.setKey(key)
|
||||||
|
|
||||||
|
# Overload to reset both CBC state and the wrapped baseCipher
|
||||||
|
def resetEncrypt(self):
|
||||||
|
BlockCipher.resetEncrypt(self) # reset CBC encrypt state (super class)
|
||||||
|
self.baseCipher.resetEncrypt() # reset base cipher encrypt state
|
||||||
|
|
||||||
|
def resetDecrypt(self):
|
||||||
|
BlockCipher.resetDecrypt(self) # reset CBC state (super class)
|
||||||
|
self.baseCipher.resetDecrypt() # reset base cipher decrypt state
|
||||||
|
|
||||||
|
def encrypt(self, plainText, iv=None, more=None):
|
||||||
|
""" CBC encryption - overloads baseCipher to allow optional explicit IV
|
||||||
|
when iv=None, iv is auto generated!
|
||||||
|
"""
|
||||||
|
if self.encryptBlockCount == 0:
|
||||||
|
self.iv = iv
|
||||||
|
else:
|
||||||
|
assert(iv==None), 'IV used only on first call to encrypt'
|
||||||
|
|
||||||
|
return BlockCipher.encrypt(self,plainText, more=more)
|
||||||
|
|
||||||
|
def decrypt(self, cipherText, iv=None, more=None):
|
||||||
|
""" CBC decryption - overloads baseCipher to allow optional explicit IV
|
||||||
|
when iv=None, iv is auto generated!
|
||||||
|
"""
|
||||||
|
if self.decryptBlockCount == 0:
|
||||||
|
self.iv = iv
|
||||||
|
else:
|
||||||
|
assert(iv==None), 'IV used only on first call to decrypt'
|
||||||
|
|
||||||
|
return BlockCipher.decrypt(self, cipherText, more=more)
|
||||||
|
|
||||||
|
def encryptBlock(self, plainTextBlock):
|
||||||
|
""" CBC block encryption, IV is set with 'encrypt' """
|
||||||
|
auto_IV = ''
|
||||||
|
if self.encryptBlockCount == 0:
|
||||||
|
if self.iv == None:
|
||||||
|
# generate IV and use
|
||||||
|
self.iv = ''.join([chr(self.r.randrange(256)) for i in range(self.blockSize)])
|
||||||
|
self.prior_encr_CT_block = self.iv
|
||||||
|
auto_IV = self.prior_encr_CT_block # prepend IV if it's automatic
|
||||||
|
else: # application provided IV
|
||||||
|
assert(len(self.iv) == self.blockSize ),'IV must be same length as block'
|
||||||
|
self.prior_encr_CT_block = self.iv
|
||||||
|
""" encrypt the prior CT XORed with the PT """
|
||||||
|
ct = self.baseCipher.encryptBlock( xor(self.prior_encr_CT_block, plainTextBlock) )
|
||||||
|
self.prior_encr_CT_block = ct
|
||||||
|
return auto_IV+ct
|
||||||
|
|
||||||
|
def decryptBlock(self, encryptedBlock):
|
||||||
|
""" Decrypt a single block """
|
||||||
|
|
||||||
|
if self.decryptBlockCount == 0: # first call, process IV
|
||||||
|
if self.iv == None: # auto decrypt IV?
|
||||||
|
self.prior_CT_block = encryptedBlock
|
||||||
|
return ''
|
||||||
|
else:
|
||||||
|
assert(len(self.iv)==self.blockSize),"Bad IV size on CBC decryption"
|
||||||
|
self.prior_CT_block = self.iv
|
||||||
|
|
||||||
|
dct = self.baseCipher.decryptBlock(encryptedBlock)
|
||||||
|
""" XOR the prior decrypted CT with the prior CT """
|
||||||
|
dct_XOR_priorCT = xor( self.prior_CT_block, dct )
|
||||||
|
|
||||||
|
self.prior_CT_block = encryptedBlock
|
||||||
|
|
||||||
|
return dct_XOR_priorCT
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
AES_CBC Encryption Algorithm
|
||||||
|
"""
|
||||||
|
|
||||||
|
class AES_CBC(CBC):
|
||||||
|
""" AES encryption in CBC feedback mode """
|
||||||
|
def __init__(self, key=None, padding=padWithPadLen(), keySize=16):
|
||||||
|
CBC.__init__( self, AES(key, noPadding(), keySize), padding)
|
||||||
|
self.name = 'AES_CBC'
|
||||||
@@ -0,0 +1,290 @@
|
|||||||
|
#! /usr/bin/env python
|
||||||
|
|
||||||
|
import sys, os
|
||||||
|
import hmac
|
||||||
|
from struct import pack
|
||||||
|
import hashlib
|
||||||
|
|
||||||
|
|
||||||
|
# interface to needed routines libalfcrypto
|
||||||
|
def _load_libalfcrypto():
|
||||||
|
import ctypes
|
||||||
|
from ctypes import CDLL, byref, POINTER, c_void_p, c_char_p, c_int, c_long, \
|
||||||
|
Structure, c_ulong, create_string_buffer, addressof, string_at, cast, sizeof
|
||||||
|
|
||||||
|
pointer_size = ctypes.sizeof(ctypes.c_voidp)
|
||||||
|
name_of_lib = None
|
||||||
|
if sys.platform.startswith('darwin'):
|
||||||
|
name_of_lib = 'libalfcrypto.dylib'
|
||||||
|
elif sys.platform.startswith('win'):
|
||||||
|
if pointer_size == 4:
|
||||||
|
name_of_lib = 'alfcrypto.dll'
|
||||||
|
else:
|
||||||
|
name_of_lib = 'alfcrypto64.dll'
|
||||||
|
else:
|
||||||
|
if pointer_size == 4:
|
||||||
|
name_of_lib = 'libalfcrypto32.so'
|
||||||
|
else:
|
||||||
|
name_of_lib = 'libalfcrypto64.so'
|
||||||
|
|
||||||
|
libalfcrypto = sys.path[0] + os.sep + name_of_lib
|
||||||
|
|
||||||
|
if not os.path.isfile(libalfcrypto):
|
||||||
|
raise Exception('libalfcrypto not found')
|
||||||
|
|
||||||
|
libalfcrypto = CDLL(libalfcrypto)
|
||||||
|
|
||||||
|
c_char_pp = POINTER(c_char_p)
|
||||||
|
c_int_p = POINTER(c_int)
|
||||||
|
|
||||||
|
|
||||||
|
def F(restype, name, argtypes):
|
||||||
|
func = getattr(libalfcrypto, name)
|
||||||
|
func.restype = restype
|
||||||
|
func.argtypes = argtypes
|
||||||
|
return func
|
||||||
|
|
||||||
|
# aes cbc decryption
|
||||||
|
#
|
||||||
|
# struct aes_key_st {
|
||||||
|
# unsigned long rd_key[4 *(AES_MAXNR + 1)];
|
||||||
|
# int rounds;
|
||||||
|
# };
|
||||||
|
#
|
||||||
|
# typedef struct aes_key_st AES_KEY;
|
||||||
|
#
|
||||||
|
# int AES_set_decrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key);
|
||||||
|
#
|
||||||
|
#
|
||||||
|
# void AES_cbc_encrypt(const unsigned char *in, unsigned char *out,
|
||||||
|
# const unsigned long length, const AES_KEY *key,
|
||||||
|
# unsigned char *ivec, const int enc);
|
||||||
|
|
||||||
|
AES_MAXNR = 14
|
||||||
|
|
||||||
|
class AES_KEY(Structure):
|
||||||
|
_fields_ = [('rd_key', c_long * (4 * (AES_MAXNR + 1))), ('rounds', c_int)]
|
||||||
|
|
||||||
|
AES_KEY_p = POINTER(AES_KEY)
|
||||||
|
AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',[c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p, c_int])
|
||||||
|
AES_set_decrypt_key = F(c_int, 'AES_set_decrypt_key',[c_char_p, c_int, AES_KEY_p])
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Pukall 1 Cipher
|
||||||
|
# unsigned char *PC1(const unsigned char *key, unsigned int klen, const unsigned char *src,
|
||||||
|
# unsigned char *dest, unsigned int len, int decryption);
|
||||||
|
|
||||||
|
PC1 = F(c_char_p, 'PC1', [c_char_p, c_ulong, c_char_p, c_char_p, c_ulong, c_ulong])
|
||||||
|
|
||||||
|
# Topaz Encryption
|
||||||
|
# typedef struct _TpzCtx {
|
||||||
|
# unsigned int v[2];
|
||||||
|
# } TpzCtx;
|
||||||
|
#
|
||||||
|
# void topazCryptoInit(TpzCtx *ctx, const unsigned char *key, int klen);
|
||||||
|
# void topazCryptoDecrypt(const TpzCtx *ctx, const unsigned char *in, unsigned char *out, int len);
|
||||||
|
|
||||||
|
class TPZ_CTX(Structure):
|
||||||
|
_fields_ = [('v', c_long * 2)]
|
||||||
|
|
||||||
|
TPZ_CTX_p = POINTER(TPZ_CTX)
|
||||||
|
topazCryptoInit = F(None, 'topazCryptoInit', [TPZ_CTX_p, c_char_p, c_ulong])
|
||||||
|
topazCryptoDecrypt = F(None, 'topazCryptoDecrypt', [TPZ_CTX_p, c_char_p, c_char_p, c_ulong])
|
||||||
|
|
||||||
|
|
||||||
|
class AES_CBC(object):
|
||||||
|
def __init__(self):
|
||||||
|
self._blocksize = 0
|
||||||
|
self._keyctx = None
|
||||||
|
self._iv = 0
|
||||||
|
|
||||||
|
def set_decrypt_key(self, userkey, iv):
|
||||||
|
self._blocksize = len(userkey)
|
||||||
|
if (self._blocksize != 16) and (self._blocksize != 24) and (self._blocksize != 32) :
|
||||||
|
raise Exception('AES CBC improper key used')
|
||||||
|
return
|
||||||
|
keyctx = self._keyctx = AES_KEY()
|
||||||
|
self._iv = iv
|
||||||
|
rv = AES_set_decrypt_key(userkey, len(userkey) * 8, keyctx)
|
||||||
|
if rv < 0:
|
||||||
|
raise Exception('Failed to initialize AES CBC key')
|
||||||
|
|
||||||
|
def decrypt(self, data):
|
||||||
|
out = create_string_buffer(len(data))
|
||||||
|
mutable_iv = create_string_buffer(self._iv, len(self._iv))
|
||||||
|
rv = AES_cbc_encrypt(data, out, len(data), self._keyctx, mutable_iv, 0)
|
||||||
|
if rv == 0:
|
||||||
|
raise Exception('AES CBC decryption failed')
|
||||||
|
return out.raw
|
||||||
|
|
||||||
|
class Pukall_Cipher(object):
|
||||||
|
def __init__(self):
|
||||||
|
self.key = None
|
||||||
|
|
||||||
|
def PC1(self, key, src, decryption=True):
|
||||||
|
self.key = key
|
||||||
|
out = create_string_buffer(len(src))
|
||||||
|
de = 0
|
||||||
|
if decryption:
|
||||||
|
de = 1
|
||||||
|
rv = PC1(key, len(key), src, out, len(src), de)
|
||||||
|
return out.raw
|
||||||
|
|
||||||
|
class Topaz_Cipher(object):
|
||||||
|
def __init__(self):
|
||||||
|
self._ctx = None
|
||||||
|
|
||||||
|
def ctx_init(self, key):
|
||||||
|
tpz_ctx = self._ctx = TPZ_CTX()
|
||||||
|
topazCryptoInit(tpz_ctx, key, len(key))
|
||||||
|
return tpz_ctx
|
||||||
|
|
||||||
|
def decrypt(self, data, ctx=None):
|
||||||
|
if ctx == None:
|
||||||
|
ctx = self._ctx
|
||||||
|
out = create_string_buffer(len(data))
|
||||||
|
topazCryptoDecrypt(ctx, data, out, len(data))
|
||||||
|
return out.raw
|
||||||
|
|
||||||
|
print "Using Library AlfCrypto DLL/DYLIB/SO"
|
||||||
|
return (AES_CBC, Pukall_Cipher, Topaz_Cipher)
|
||||||
|
|
||||||
|
|
||||||
|
def _load_python_alfcrypto():
|
||||||
|
|
||||||
|
import aescbc
|
||||||
|
|
||||||
|
class Pukall_Cipher(object):
|
||||||
|
def __init__(self):
|
||||||
|
self.key = None
|
||||||
|
|
||||||
|
def PC1(self, key, src, decryption=True):
|
||||||
|
sum1 = 0;
|
||||||
|
sum2 = 0;
|
||||||
|
keyXorVal = 0;
|
||||||
|
if len(key)!=16:
|
||||||
|
print "Bad key length!"
|
||||||
|
return None
|
||||||
|
wkey = []
|
||||||
|
for i in xrange(8):
|
||||||
|
wkey.append(ord(key[i*2])<<8 | ord(key[i*2+1]))
|
||||||
|
dst = ""
|
||||||
|
for i in xrange(len(src)):
|
||||||
|
temp1 = 0;
|
||||||
|
byteXorVal = 0;
|
||||||
|
for j in xrange(8):
|
||||||
|
temp1 ^= wkey[j]
|
||||||
|
sum2 = (sum2+j)*20021 + sum1
|
||||||
|
sum1 = (temp1*346)&0xFFFF
|
||||||
|
sum2 = (sum2+sum1)&0xFFFF
|
||||||
|
temp1 = (temp1*20021+1)&0xFFFF
|
||||||
|
byteXorVal ^= temp1 ^ sum2
|
||||||
|
curByte = ord(src[i])
|
||||||
|
if not decryption:
|
||||||
|
keyXorVal = curByte * 257;
|
||||||
|
curByte = ((curByte ^ (byteXorVal >> 8)) ^ byteXorVal) & 0xFF
|
||||||
|
if decryption:
|
||||||
|
keyXorVal = curByte * 257;
|
||||||
|
for j in xrange(8):
|
||||||
|
wkey[j] ^= keyXorVal;
|
||||||
|
dst+=chr(curByte)
|
||||||
|
return dst
|
||||||
|
|
||||||
|
class Topaz_Cipher(object):
|
||||||
|
def __init__(self):
|
||||||
|
self._ctx = None
|
||||||
|
|
||||||
|
def ctx_init(self, key):
|
||||||
|
ctx1 = 0x0CAFFE19E
|
||||||
|
for keyChar in key:
|
||||||
|
keyByte = ord(keyChar)
|
||||||
|
ctx2 = ctx1
|
||||||
|
ctx1 = ((((ctx1 >>2) * (ctx1 >>7))&0xFFFFFFFF) ^ (keyByte * keyByte * 0x0F902007)& 0xFFFFFFFF )
|
||||||
|
self._ctx = [ctx1, ctx2]
|
||||||
|
return [ctx1,ctx2]
|
||||||
|
|
||||||
|
def decrypt(self, data, ctx=None):
|
||||||
|
if ctx == None:
|
||||||
|
ctx = self._ctx
|
||||||
|
ctx1 = ctx[0]
|
||||||
|
ctx2 = ctx[1]
|
||||||
|
plainText = ""
|
||||||
|
for dataChar in data:
|
||||||
|
dataByte = ord(dataChar)
|
||||||
|
m = (dataByte ^ ((ctx1 >> 3) &0xFF) ^ ((ctx2<<3) & 0xFF)) &0xFF
|
||||||
|
ctx2 = ctx1
|
||||||
|
ctx1 = (((ctx1 >> 2) * (ctx1 >> 7)) &0xFFFFFFFF) ^((m * m * 0x0F902007) &0xFFFFFFFF)
|
||||||
|
plainText += chr(m)
|
||||||
|
return plainText
|
||||||
|
|
||||||
|
class AES_CBC(object):
|
||||||
|
def __init__(self):
|
||||||
|
self._key = None
|
||||||
|
self._iv = None
|
||||||
|
self.aes = None
|
||||||
|
|
||||||
|
def set_decrypt_key(self, userkey, iv):
|
||||||
|
self._key = userkey
|
||||||
|
self._iv = iv
|
||||||
|
self.aes = aescbc.AES_CBC(userkey, aescbc.noPadding(), len(userkey))
|
||||||
|
|
||||||
|
def decrypt(self, data):
|
||||||
|
iv = self._iv
|
||||||
|
cleartext = self.aes.decrypt(iv + data)
|
||||||
|
return cleartext
|
||||||
|
|
||||||
|
return (AES_CBC, Pukall_Cipher, Topaz_Cipher)
|
||||||
|
|
||||||
|
|
||||||
|
def _load_crypto():
|
||||||
|
AES_CBC = Pukall_Cipher = Topaz_Cipher = None
|
||||||
|
cryptolist = (_load_libalfcrypto, _load_python_alfcrypto)
|
||||||
|
for loader in cryptolist:
|
||||||
|
try:
|
||||||
|
AES_CBC, Pukall_Cipher, Topaz_Cipher = loader()
|
||||||
|
break
|
||||||
|
except (ImportError, Exception):
|
||||||
|
pass
|
||||||
|
return AES_CBC, Pukall_Cipher, Topaz_Cipher
|
||||||
|
|
||||||
|
AES_CBC, Pukall_Cipher, Topaz_Cipher = _load_crypto()
|
||||||
|
|
||||||
|
|
||||||
|
class KeyIVGen(object):
|
||||||
|
# this only exists in openssl so we will use pure python implementation instead
|
||||||
|
# PKCS5_PBKDF2_HMAC_SHA1 = F(c_int, 'PKCS5_PBKDF2_HMAC_SHA1',
|
||||||
|
# [c_char_p, c_ulong, c_char_p, c_ulong, c_ulong, c_ulong, c_char_p])
|
||||||
|
def pbkdf2(self, passwd, salt, iter, keylen):
|
||||||
|
|
||||||
|
def xorstr( a, b ):
|
||||||
|
if len(a) != len(b):
|
||||||
|
raise Exception("xorstr(): lengths differ")
|
||||||
|
return ''.join((chr(ord(x)^ord(y)) for x, y in zip(a, b)))
|
||||||
|
|
||||||
|
def prf( h, data ):
|
||||||
|
hm = h.copy()
|
||||||
|
hm.update( data )
|
||||||
|
return hm.digest()
|
||||||
|
|
||||||
|
def pbkdf2_F( h, salt, itercount, blocknum ):
|
||||||
|
U = prf( h, salt + pack('>i',blocknum ) )
|
||||||
|
T = U
|
||||||
|
for i in range(2, itercount+1):
|
||||||
|
U = prf( h, U )
|
||||||
|
T = xorstr( T, U )
|
||||||
|
return T
|
||||||
|
|
||||||
|
sha = hashlib.sha1
|
||||||
|
digest_size = sha().digest_size
|
||||||
|
# l - number of output blocks to produce
|
||||||
|
l = keylen / digest_size
|
||||||
|
if keylen % digest_size != 0:
|
||||||
|
l += 1
|
||||||
|
h = hmac.new( passwd, None, sha )
|
||||||
|
T = ""
|
||||||
|
for i in range(1, l+1):
|
||||||
|
T += pbkdf2_F( h, salt, iter, i )
|
||||||
|
return T[0: keylen]
|
||||||
|
|
||||||
|
|
||||||
@@ -20,6 +20,8 @@ import getopt
|
|||||||
from struct import pack
|
from struct import pack
|
||||||
from struct import unpack
|
from struct import unpack
|
||||||
|
|
||||||
|
class TpzDRMError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
# Get a 7 bit encoded number from string. The most
|
# Get a 7 bit encoded number from string. The most
|
||||||
# significant byte comes first and has the high bit (8th) set
|
# significant byte comes first and has the high bit (8th) set
|
||||||
@@ -32,11 +34,11 @@ def readEncodedNumber(file):
|
|||||||
data = ord(c)
|
data = ord(c)
|
||||||
|
|
||||||
if data == 0xFF:
|
if data == 0xFF:
|
||||||
flag = True
|
flag = True
|
||||||
c = file.read(1)
|
c = file.read(1)
|
||||||
if (len(c) == 0):
|
if (len(c) == 0):
|
||||||
return None
|
return None
|
||||||
data = ord(c)
|
data = ord(c)
|
||||||
|
|
||||||
if data >= 0x80:
|
if data >= 0x80:
|
||||||
datax = (data & 0x7F)
|
datax = (data & 0x7F)
|
||||||
@@ -49,7 +51,7 @@ def readEncodedNumber(file):
|
|||||||
data = datax
|
data = datax
|
||||||
|
|
||||||
if flag:
|
if flag:
|
||||||
data = -data
|
data = -data
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
|
||||||
@@ -57,29 +59,29 @@ def readEncodedNumber(file):
|
|||||||
# most significant byte first which has the high bit set
|
# most significant byte first which has the high bit set
|
||||||
|
|
||||||
def encodeNumber(number):
|
def encodeNumber(number):
|
||||||
result = ""
|
result = ""
|
||||||
negative = False
|
negative = False
|
||||||
flag = 0
|
flag = 0
|
||||||
|
|
||||||
if number < 0 :
|
if number < 0 :
|
||||||
number = -number + 1
|
number = -number + 1
|
||||||
negative = True
|
negative = True
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
byte = number & 0x7F
|
byte = number & 0x7F
|
||||||
number = number >> 7
|
number = number >> 7
|
||||||
byte += flag
|
byte += flag
|
||||||
result += chr(byte)
|
result += chr(byte)
|
||||||
flag = 0x80
|
flag = 0x80
|
||||||
if number == 0 :
|
if number == 0 :
|
||||||
if (byte == 0xFF and negative == False) :
|
if (byte == 0xFF and negative == False) :
|
||||||
result += chr(0x80)
|
result += chr(0x80)
|
||||||
break
|
break
|
||||||
|
|
||||||
if negative:
|
if negative:
|
||||||
result += chr(0xFF)
|
result += chr(0xFF)
|
||||||
|
|
||||||
return result[::-1]
|
return result[::-1]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -138,7 +140,8 @@ class Dictionary(object):
|
|||||||
return self.stable[self.pos]
|
return self.stable[self.pos]
|
||||||
else:
|
else:
|
||||||
print "Error - %d outside of string table limits" % val
|
print "Error - %d outside of string table limits" % val
|
||||||
sys.exit(-1)
|
raise TpzDRMError('outside of string table limits')
|
||||||
|
# sys.exit(-1)
|
||||||
|
|
||||||
def getSize(self):
|
def getSize(self):
|
||||||
return self.size
|
return self.size
|
||||||
@@ -235,6 +238,7 @@ class PageParser(object):
|
|||||||
|
|
||||||
'group' : (1, 'snippets', 1, 0),
|
'group' : (1, 'snippets', 1, 0),
|
||||||
'group.type' : (1, 'scalar_text', 0, 0),
|
'group.type' : (1, 'scalar_text', 0, 0),
|
||||||
|
'group._tag' : (1, 'scalar_text', 0, 0),
|
||||||
|
|
||||||
'region' : (1, 'snippets', 1, 0),
|
'region' : (1, 'snippets', 1, 0),
|
||||||
'region.type' : (1, 'scalar_text', 0, 0),
|
'region.type' : (1, 'scalar_text', 0, 0),
|
||||||
@@ -257,6 +261,13 @@ class PageParser(object):
|
|||||||
'paragraph.class' : (1, 'scalar_text', 0, 0),
|
'paragraph.class' : (1, 'scalar_text', 0, 0),
|
||||||
'paragraph.firstWord' : (1, 'scalar_number', 0, 0),
|
'paragraph.firstWord' : (1, 'scalar_number', 0, 0),
|
||||||
'paragraph.lastWord' : (1, 'scalar_number', 0, 0),
|
'paragraph.lastWord' : (1, 'scalar_number', 0, 0),
|
||||||
|
'paragraph.lastWord' : (1, 'scalar_number', 0, 0),
|
||||||
|
'paragraph.gridSize' : (1, 'scalar_number', 0, 0),
|
||||||
|
'paragraph.gridBottomCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'paragraph.gridTopCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'paragraph.gridBeginCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'paragraph.gridEndCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
|
||||||
|
|
||||||
'word_semantic' : (1, 'snippets', 1, 1),
|
'word_semantic' : (1, 'snippets', 1, 1),
|
||||||
'word_semantic.type' : (1, 'scalar_text', 0, 0),
|
'word_semantic.type' : (1, 'scalar_text', 0, 0),
|
||||||
@@ -271,11 +282,21 @@ class PageParser(object):
|
|||||||
|
|
||||||
'_span' : (1, 'snippets', 1, 0),
|
'_span' : (1, 'snippets', 1, 0),
|
||||||
'_span.firstWord' : (1, 'scalar_number', 0, 0),
|
'_span.firstWord' : (1, 'scalar_number', 0, 0),
|
||||||
'-span.lastWord' : (1, 'scalar_number', 0, 0),
|
'_span.lastWord' : (1, 'scalar_number', 0, 0),
|
||||||
|
'_span.gridSize' : (1, 'scalar_number', 0, 0),
|
||||||
|
'_span.gridBottomCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'_span.gridTopCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'_span.gridBeginCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'_span.gridEndCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
|
||||||
'span' : (1, 'snippets', 1, 0),
|
'span' : (1, 'snippets', 1, 0),
|
||||||
'span.firstWord' : (1, 'scalar_number', 0, 0),
|
'span.firstWord' : (1, 'scalar_number', 0, 0),
|
||||||
'span.lastWord' : (1, 'scalar_number', 0, 0),
|
'span.lastWord' : (1, 'scalar_number', 0, 0),
|
||||||
|
'span.gridSize' : (1, 'scalar_number', 0, 0),
|
||||||
|
'span.gridBottomCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'span.gridTopCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'span.gridBeginCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'span.gridEndCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
|
||||||
'extratokens' : (1, 'snippets', 1, 0),
|
'extratokens' : (1, 'snippets', 1, 0),
|
||||||
'extratokens.type' : (1, 'scalar_text', 0, 0),
|
'extratokens.type' : (1, 'scalar_text', 0, 0),
|
||||||
@@ -618,7 +639,7 @@ class PageParser(object):
|
|||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
# flatten tag
|
# flatten tag
|
||||||
def flattenTag(self, node):
|
def flattenTag(self, node):
|
||||||
name = node[0]
|
name = node[0]
|
||||||
subtagList = node[1]
|
subtagList = node[1]
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
{\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540
|
{\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360
|
||||||
{\fonttbl}
|
{\fonttbl}
|
||||||
{\colortbl;\red255\green255\blue255;}
|
{\colortbl;\red255\green255\blue255;}
|
||||||
}
|
}
|
||||||
Binary file not shown.
|
Before Width: | Height: | Size: 362 B After Width: | Height: | Size: 362 B |
@@ -37,10 +37,9 @@ def cli_main(argv=sys.argv):
|
|||||||
keypath = argv[1]
|
keypath = argv[1]
|
||||||
with open(keypath, 'rb') as f:
|
with open(keypath, 'rb') as f:
|
||||||
keyder = f.read()
|
keyder = f.read()
|
||||||
print keyder.encode('base64')
|
print keyder.encode('base64')
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
sys.exit(cli_main())
|
sys.exit(cli_main())
|
||||||
|
|
||||||
@@ -57,8 +57,13 @@
|
|||||||
# 0.16 - convert to use openssl DES (very very fast) or pure python DES if openssl's libcrypto is not available
|
# 0.16 - convert to use openssl DES (very very fast) or pure python DES if openssl's libcrypto is not available
|
||||||
# 0.17 - added support for pycrypto's DES as well
|
# 0.17 - added support for pycrypto's DES as well
|
||||||
# 0.18 - on Windows try PyCrypto first and OpenSSL next
|
# 0.18 - on Windows try PyCrypto first and OpenSSL next
|
||||||
|
# 0.19 - Modify the interface to allow use of import
|
||||||
|
# 0.20 - modify to allow use inside new interface for calibre plugins
|
||||||
|
# 0.21 - Support eReader (drm) version 11.
|
||||||
|
# - Don't reject dictionary format.
|
||||||
|
# - Ignore sidebars for dictionaries (different format?)
|
||||||
|
|
||||||
__version__='0.18'
|
__version__='0.21'
|
||||||
|
|
||||||
class Unbuffered:
|
class Unbuffered:
|
||||||
def __init__(self, stream):
|
def __init__(self, stream):
|
||||||
@@ -70,32 +75,50 @@ class Unbuffered:
|
|||||||
return getattr(self.stream, attr)
|
return getattr(self.stream, attr)
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
sys.stdout=Unbuffered(sys.stdout)
|
|
||||||
|
|
||||||
import struct, binascii, getopt, zlib, os, os.path, urllib, tempfile
|
import struct, binascii, getopt, zlib, os, os.path, urllib, tempfile
|
||||||
|
|
||||||
|
if 'calibre' in sys.modules:
|
||||||
|
inCalibre = True
|
||||||
|
else:
|
||||||
|
inCalibre = False
|
||||||
|
|
||||||
Des = None
|
Des = None
|
||||||
if sys.platform.startswith('win'):
|
if sys.platform.startswith('win'):
|
||||||
# first try with pycrypto
|
# first try with pycrypto
|
||||||
import pycrypto_des
|
if inCalibre:
|
||||||
|
from calibre_plugins.erdrpdb2pml import pycrypto_des
|
||||||
|
else:
|
||||||
|
import pycrypto_des
|
||||||
Des = pycrypto_des.load_pycrypto()
|
Des = pycrypto_des.load_pycrypto()
|
||||||
if Des == None:
|
if Des == None:
|
||||||
# they try with openssl
|
# they try with openssl
|
||||||
import openssl_des
|
if inCalibre:
|
||||||
|
from calibre_plugins.erdrpdb2pml import openssl_des
|
||||||
|
else:
|
||||||
|
import openssl_des
|
||||||
Des = openssl_des.load_libcrypto()
|
Des = openssl_des.load_libcrypto()
|
||||||
else:
|
else:
|
||||||
# first try with openssl
|
# first try with openssl
|
||||||
import openssl_des
|
if inCalibre:
|
||||||
|
from calibre_plugins.erdrpdb2pml import openssl_des
|
||||||
|
else:
|
||||||
|
import openssl_des
|
||||||
Des = openssl_des.load_libcrypto()
|
Des = openssl_des.load_libcrypto()
|
||||||
if Des == None:
|
if Des == None:
|
||||||
# then try with pycrypto
|
# then try with pycrypto
|
||||||
import pycrypto_des
|
if inCalibre:
|
||||||
|
from calibre_plugins.erdrpdb2pml import pycrypto_des
|
||||||
|
else:
|
||||||
|
import pycrypto_des
|
||||||
Des = pycrypto_des.load_pycrypto()
|
Des = pycrypto_des.load_pycrypto()
|
||||||
|
|
||||||
# if that did not work then use pure python implementation
|
# if that did not work then use pure python implementation
|
||||||
# of DES and try to speed it up with Psycho
|
# of DES and try to speed it up with Psycho
|
||||||
if Des == None:
|
if Des == None:
|
||||||
import python_des
|
if inCalibre:
|
||||||
|
from calibre_plugins.erdrpdb2pml import python_des
|
||||||
|
else:
|
||||||
|
import python_des
|
||||||
Des = python_des.Des
|
Des = python_des.Des
|
||||||
# Import Psyco if available
|
# Import Psyco if available
|
||||||
try:
|
try:
|
||||||
@@ -111,19 +134,27 @@ except ImportError:
|
|||||||
# older Python release
|
# older Python release
|
||||||
import sha
|
import sha
|
||||||
sha1 = lambda s: sha.new(s)
|
sha1 = lambda s: sha.new(s)
|
||||||
|
|
||||||
import cgi
|
import cgi
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
logging.basicConfig()
|
logging.basicConfig()
|
||||||
#logging.basicConfig(level=logging.DEBUG)
|
#logging.basicConfig(level=logging.DEBUG)
|
||||||
|
|
||||||
|
|
||||||
class Sectionizer(object):
|
class Sectionizer(object):
|
||||||
|
bkType = "Book"
|
||||||
|
|
||||||
def __init__(self, filename, ident):
|
def __init__(self, filename, ident):
|
||||||
self.contents = file(filename, 'rb').read()
|
self.contents = file(filename, 'rb').read()
|
||||||
self.header = self.contents[0:72]
|
self.header = self.contents[0:72]
|
||||||
self.num_sections, = struct.unpack('>H', self.contents[76:78])
|
self.num_sections, = struct.unpack('>H', self.contents[76:78])
|
||||||
|
# Dictionary or normal content (TODO: Not hard-coded)
|
||||||
if self.header[0x3C:0x3C+8] != ident:
|
if self.header[0x3C:0x3C+8] != ident:
|
||||||
raise ValueError('Invalid file format')
|
if self.header[0x3C:0x3C+8] == "PDctPPrs":
|
||||||
|
self.bkType = "Dict"
|
||||||
|
else:
|
||||||
|
raise ValueError('Invalid file format')
|
||||||
self.sections = []
|
self.sections = []
|
||||||
for i in xrange(self.num_sections):
|
for i in xrange(self.num_sections):
|
||||||
offset, a1,a2,a3,a4 = struct.unpack('>LBBBB', self.contents[78+i*8:78+i*8+8])
|
offset, a1,a2,a3,a4 = struct.unpack('>LBBBB', self.contents[78+i*8:78+i*8+8])
|
||||||
@@ -147,7 +178,7 @@ def sanitizeFileName(s):
|
|||||||
def fixKey(key):
|
def fixKey(key):
|
||||||
def fixByte(b):
|
def fixByte(b):
|
||||||
return b ^ ((b ^ (b<<1) ^ (b<<2) ^ (b<<3) ^ (b<<4) ^ (b<<5) ^ (b<<6) ^ (b<<7) ^ 0x80) & 0x80)
|
return b ^ ((b ^ (b<<1) ^ (b<<2) ^ (b<<3) ^ (b<<4) ^ (b<<5) ^ (b<<6) ^ (b<<7) ^ 0x80) & 0x80)
|
||||||
return "".join([chr(fixByte(ord(a))) for a in key])
|
return "".join([chr(fixByte(ord(a))) for a in key])
|
||||||
|
|
||||||
def deXOR(text, sp, table):
|
def deXOR(text, sp, table):
|
||||||
r=''
|
r=''
|
||||||
@@ -160,15 +191,15 @@ def deXOR(text, sp, table):
|
|||||||
return r
|
return r
|
||||||
|
|
||||||
class EreaderProcessor(object):
|
class EreaderProcessor(object):
|
||||||
def __init__(self, section_reader, username, creditcard):
|
def __init__(self, sect, username, creditcard):
|
||||||
self.section_reader = section_reader
|
self.section_reader = sect.loadSection
|
||||||
data = section_reader(0)
|
data = self.section_reader(0)
|
||||||
version, = struct.unpack('>H', data[0:2])
|
version, = struct.unpack('>H', data[0:2])
|
||||||
self.version = version
|
self.version = version
|
||||||
logging.info('eReader file format version %s', version)
|
logging.info('eReader file format version %s', version)
|
||||||
if version != 272 and version != 260 and version != 259:
|
if version != 272 and version != 260 and version != 259:
|
||||||
raise ValueError('incorrect eReader version %d (error 1)' % version)
|
raise ValueError('incorrect eReader version %d (error 1)' % version)
|
||||||
data = section_reader(1)
|
data = self.section_reader(1)
|
||||||
self.data = data
|
self.data = data
|
||||||
des = Des(fixKey(data[0:8]))
|
des = Des(fixKey(data[0:8]))
|
||||||
cookie_shuf, cookie_size = struct.unpack('>LL', des.decrypt(data[-8:]))
|
cookie_shuf, cookie_size = struct.unpack('>LL', des.decrypt(data[-8:]))
|
||||||
@@ -181,7 +212,7 @@ class EreaderProcessor(object):
|
|||||||
for i in xrange(len(data)):
|
for i in xrange(len(data)):
|
||||||
j = (j + shuf) % len(data)
|
j = (j + shuf) % len(data)
|
||||||
r[j] = data[i]
|
r[j] = data[i]
|
||||||
assert len("".join(r)) == len(data)
|
assert len("".join(r)) == len(data)
|
||||||
return "".join(r)
|
return "".join(r)
|
||||||
r = unshuff(input[0:-8], cookie_shuf)
|
r = unshuff(input[0:-8], cookie_shuf)
|
||||||
|
|
||||||
@@ -197,11 +228,17 @@ class EreaderProcessor(object):
|
|||||||
self.num_text_pages = struct.unpack('>H', r[2:4])[0] - 1
|
self.num_text_pages = struct.unpack('>H', r[2:4])[0] - 1
|
||||||
self.num_image_pages = struct.unpack('>H', r[26:26+2])[0]
|
self.num_image_pages = struct.unpack('>H', r[26:26+2])[0]
|
||||||
self.first_image_page = struct.unpack('>H', r[24:24+2])[0]
|
self.first_image_page = struct.unpack('>H', r[24:24+2])[0]
|
||||||
|
# Default values
|
||||||
|
self.num_footnote_pages = 0
|
||||||
|
self.num_sidebar_pages = 0
|
||||||
|
self.first_footnote_page = -1
|
||||||
|
self.first_sidebar_page = -1
|
||||||
if self.version == 272:
|
if self.version == 272:
|
||||||
self.num_footnote_pages = struct.unpack('>H', r[46:46+2])[0]
|
self.num_footnote_pages = struct.unpack('>H', r[46:46+2])[0]
|
||||||
self.first_footnote_page = struct.unpack('>H', r[44:44+2])[0]
|
self.first_footnote_page = struct.unpack('>H', r[44:44+2])[0]
|
||||||
self.num_sidebar_pages = struct.unpack('>H', r[38:38+2])[0]
|
if (sect.bkType == "Book"):
|
||||||
self.first_sidebar_page = struct.unpack('>H', r[36:36+2])[0]
|
self.num_sidebar_pages = struct.unpack('>H', r[38:38+2])[0]
|
||||||
|
self.first_sidebar_page = struct.unpack('>H', r[36:36+2])[0]
|
||||||
# self.num_bookinfo_pages = struct.unpack('>H', r[34:34+2])[0]
|
# self.num_bookinfo_pages = struct.unpack('>H', r[34:34+2])[0]
|
||||||
# self.first_bookinfo_page = struct.unpack('>H', r[32:32+2])[0]
|
# self.first_bookinfo_page = struct.unpack('>H', r[32:32+2])[0]
|
||||||
# self.num_chapter_pages = struct.unpack('>H', r[22:22+2])[0]
|
# self.num_chapter_pages = struct.unpack('>H', r[22:22+2])[0]
|
||||||
@@ -217,10 +254,8 @@ class EreaderProcessor(object):
|
|||||||
self.xortable_size = struct.unpack('>H', r[42:42+2])[0]
|
self.xortable_size = struct.unpack('>H', r[42:42+2])[0]
|
||||||
self.xortable = self.data[self.xortable_offset:self.xortable_offset + self.xortable_size]
|
self.xortable = self.data[self.xortable_offset:self.xortable_offset + self.xortable_size]
|
||||||
else:
|
else:
|
||||||
self.num_footnote_pages = 0
|
# Nothing needs to be done
|
||||||
self.num_sidebar_pages = 0
|
pass
|
||||||
self.first_footnote_page = -1
|
|
||||||
self.first_sidebar_page = -1
|
|
||||||
# self.num_bookinfo_pages = 0
|
# self.num_bookinfo_pages = 0
|
||||||
# self.num_chapter_pages = 0
|
# self.num_chapter_pages = 0
|
||||||
# self.num_link_pages = 0
|
# self.num_link_pages = 0
|
||||||
@@ -245,10 +280,14 @@ class EreaderProcessor(object):
|
|||||||
encrypted_key_sha = r[44:44+20]
|
encrypted_key_sha = r[44:44+20]
|
||||||
encrypted_key = r[64:64+8]
|
encrypted_key = r[64:64+8]
|
||||||
elif version == 260:
|
elif version == 260:
|
||||||
if drm_sub_version != 13:
|
if drm_sub_version != 13 and drm_sub_version != 11:
|
||||||
raise ValueError('incorrect eReader version %d (error 3)' % drm_sub_version)
|
raise ValueError('incorrect eReader version %d (error 3)' % drm_sub_version)
|
||||||
encrypted_key = r[44:44+8]
|
if drm_sub_version == 13:
|
||||||
encrypted_key_sha = r[52:52+20]
|
encrypted_key = r[44:44+8]
|
||||||
|
encrypted_key_sha = r[52:52+20]
|
||||||
|
else:
|
||||||
|
encrypted_key = r[64:64+8]
|
||||||
|
encrypted_key_sha = r[44:44+20]
|
||||||
elif version == 272:
|
elif version == 272:
|
||||||
encrypted_key = r[172:172+8]
|
encrypted_key = r[172:172+8]
|
||||||
encrypted_key_sha = r[56:56+20]
|
encrypted_key_sha = r[56:56+20]
|
||||||
@@ -334,6 +373,12 @@ class EreaderProcessor(object):
|
|||||||
r += fmarker
|
r += fmarker
|
||||||
fnote_ids = fnote_ids[id_len+4:]
|
fnote_ids = fnote_ids[id_len+4:]
|
||||||
|
|
||||||
|
# TODO: Handle dictionary index (?) pages - which are also marked as
|
||||||
|
# sidebar_pages (?). For now dictionary sidebars are ignored
|
||||||
|
# For dictionaries - record 0 is null terminated strings, followed by
|
||||||
|
# blocks of around 62000 bytes and a final block. Not sure of the
|
||||||
|
# encoding
|
||||||
|
|
||||||
# now handle sidebar pages
|
# now handle sidebar pages
|
||||||
if self.num_sidebar_pages > 0:
|
if self.num_sidebar_pages > 0:
|
||||||
r += '\n'
|
r += '\n'
|
||||||
@@ -346,7 +391,7 @@ class EreaderProcessor(object):
|
|||||||
id_len = ord(sbar_ids[2])
|
id_len = ord(sbar_ids[2])
|
||||||
id = sbar_ids[3:3+id_len]
|
id = sbar_ids[3:3+id_len]
|
||||||
smarker = '<sidebar id="%s">\n' % id
|
smarker = '<sidebar id="%s">\n' % id
|
||||||
smarker += zlib.decompress(des.decrypt(self.section_reader(self.first_footnote_page + i)))
|
smarker += zlib.decompress(des.decrypt(self.section_reader(self.first_sidebar_page + i)))
|
||||||
smarker += '\n</sidebar>\n'
|
smarker += '\n</sidebar>\n'
|
||||||
r += smarker
|
r += smarker
|
||||||
sbar_ids = sbar_ids[id_len+4:]
|
sbar_ids = sbar_ids[id_len+4:]
|
||||||
@@ -354,20 +399,20 @@ class EreaderProcessor(object):
|
|||||||
return r
|
return r
|
||||||
|
|
||||||
def cleanPML(pml):
|
def cleanPML(pml):
|
||||||
# Convert special characters to proper PML code. High ASCII start at (\x80, \a128) and go up to (\xff, \a255)
|
# Convert special characters to proper PML code. High ASCII start at (\x80, \a128) and go up to (\xff, \a255)
|
||||||
pml2 = pml
|
pml2 = pml
|
||||||
for k in xrange(128,256):
|
for k in xrange(128,256):
|
||||||
badChar = chr(k)
|
badChar = chr(k)
|
||||||
pml2 = pml2.replace(badChar, '\\a%03d' % k)
|
pml2 = pml2.replace(badChar, '\\a%03d' % k)
|
||||||
return pml2
|
return pml2
|
||||||
|
|
||||||
def convertEreaderToPml(infile, name, cc, outdir):
|
def convertEreaderToPml(infile, name, cc, outdir):
|
||||||
if not os.path.exists(outdir):
|
if not os.path.exists(outdir):
|
||||||
os.makedirs(outdir)
|
os.makedirs(outdir)
|
||||||
|
bookname = os.path.splitext(os.path.basename(infile))[0]
|
||||||
print " Decoding File"
|
print " Decoding File"
|
||||||
sect = Sectionizer(infile, 'PNRdPPrs')
|
sect = Sectionizer(infile, 'PNRdPPrs')
|
||||||
er = EreaderProcessor(sect.loadSection, name, cc)
|
er = EreaderProcessor(sect, name, cc)
|
||||||
|
|
||||||
if er.getNumImages() > 0:
|
if er.getNumImages() > 0:
|
||||||
print " Extracting images"
|
print " Extracting images"
|
||||||
@@ -390,6 +435,47 @@ def convertEreaderToPml(infile, name, cc, outdir):
|
|||||||
# file(os.path.join(outdir, 'bookinfo.txt'),'wb').write(bkinfo)
|
# file(os.path.join(outdir, 'bookinfo.txt'),'wb').write(bkinfo)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def decryptBook(infile, outdir, name, cc, make_pmlz):
|
||||||
|
if make_pmlz :
|
||||||
|
# ignore specified outdir, use tempdir instead
|
||||||
|
outdir = tempfile.mkdtemp()
|
||||||
|
try:
|
||||||
|
print "Processing..."
|
||||||
|
convertEreaderToPml(infile, name, cc, outdir)
|
||||||
|
if make_pmlz :
|
||||||
|
import zipfile
|
||||||
|
import shutil
|
||||||
|
print " Creating PMLZ file"
|
||||||
|
zipname = infile[:-4] + '.pmlz'
|
||||||
|
myZipFile = zipfile.ZipFile(zipname,'w',zipfile.ZIP_STORED, False)
|
||||||
|
list = os.listdir(outdir)
|
||||||
|
for file in list:
|
||||||
|
localname = file
|
||||||
|
filePath = os.path.join(outdir,file)
|
||||||
|
if os.path.isfile(filePath):
|
||||||
|
myZipFile.write(filePath, localname)
|
||||||
|
elif os.path.isdir(filePath):
|
||||||
|
imageList = os.listdir(filePath)
|
||||||
|
localimgdir = os.path.basename(filePath)
|
||||||
|
for image in imageList:
|
||||||
|
localname = os.path.join(localimgdir,image)
|
||||||
|
imagePath = os.path.join(filePath,image)
|
||||||
|
if os.path.isfile(imagePath):
|
||||||
|
myZipFile.write(imagePath, localname)
|
||||||
|
myZipFile.close()
|
||||||
|
# remove temporary directory
|
||||||
|
shutil.rmtree(outdir, True)
|
||||||
|
print 'output is %s' % zipname
|
||||||
|
else :
|
||||||
|
print 'output in %s' % outdir
|
||||||
|
print "done"
|
||||||
|
except ValueError, e:
|
||||||
|
print "Error: %s" % e
|
||||||
|
return 1
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
def usage():
|
def usage():
|
||||||
print "Converts DRMed eReader books to PML Source"
|
print "Converts DRMed eReader books to PML Source"
|
||||||
print "Usage:"
|
print "Usage:"
|
||||||
@@ -404,8 +490,8 @@ def usage():
|
|||||||
print " It's enough to enter the last 8 digits of the credit card number"
|
print " It's enough to enter the last 8 digits of the credit card number"
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|
||||||
def main(argv=None):
|
def main(argv=None):
|
||||||
global bookname
|
|
||||||
try:
|
try:
|
||||||
opts, args = getopt.getopt(sys.argv[1:], "h", ["make-pmlz"])
|
opts, args = getopt.getopt(sys.argv[1:], "h", ["make-pmlz"])
|
||||||
except getopt.GetoptError, err:
|
except getopt.GetoptError, err:
|
||||||
@@ -413,76 +499,28 @@ def main(argv=None):
|
|||||||
usage()
|
usage()
|
||||||
return 1
|
return 1
|
||||||
make_pmlz = False
|
make_pmlz = False
|
||||||
zipname = None
|
|
||||||
for o, a in opts:
|
for o, a in opts:
|
||||||
if o == "-h":
|
if o == "-h":
|
||||||
usage()
|
usage()
|
||||||
return 0
|
return 0
|
||||||
elif o == "--make-pmlz":
|
elif o == "--make-pmlz":
|
||||||
make_pmlz = True
|
make_pmlz = True
|
||||||
zipname = ''
|
|
||||||
|
|
||||||
print "eRdr2Pml v%s. Copyright (c) 2009 The Dark Reverser" % __version__
|
print "eRdr2Pml v%s. Copyright (c) 2009 The Dark Reverser" % __version__
|
||||||
|
|
||||||
if len(args)!=3 and len(args)!=4:
|
if len(args)!=3 and len(args)!=4:
|
||||||
usage()
|
usage()
|
||||||
return 1
|
return 1
|
||||||
else:
|
|
||||||
if len(args)==3:
|
|
||||||
infile, name, cc = args[0], args[1], args[2]
|
|
||||||
outdir = infile[:-4] + '_Source'
|
|
||||||
elif len(args)==4:
|
|
||||||
infile, outdir, name, cc = args[0], args[1], args[2], args[3]
|
|
||||||
|
|
||||||
if make_pmlz :
|
if len(args)==3:
|
||||||
# ignore specified outdir, use tempdir instead
|
infile, name, cc = args[0], args[1], args[2]
|
||||||
outdir = tempfile.mkdtemp()
|
outdir = infile[:-4] + '_Source'
|
||||||
|
elif len(args)==4:
|
||||||
|
infile, outdir, name, cc = args[0], args[1], args[2], args[3]
|
||||||
|
|
||||||
bookname = os.path.splitext(os.path.basename(infile))[0]
|
return decryptBook(infile, outdir, name, cc, make_pmlz)
|
||||||
|
|
||||||
try:
|
|
||||||
print "Processing..."
|
|
||||||
import time
|
|
||||||
start_time = time.time()
|
|
||||||
convertEreaderToPml(infile, name, cc, outdir)
|
|
||||||
|
|
||||||
if make_pmlz :
|
|
||||||
import zipfile
|
|
||||||
import shutil
|
|
||||||
print " Creating PMLZ file"
|
|
||||||
zipname = infile[:-4] + '.pmlz'
|
|
||||||
myZipFile = zipfile.ZipFile(zipname,'w',zipfile.ZIP_STORED, False)
|
|
||||||
list = os.listdir(outdir)
|
|
||||||
for file in list:
|
|
||||||
localname = file
|
|
||||||
filePath = os.path.join(outdir,file)
|
|
||||||
if os.path.isfile(filePath):
|
|
||||||
myZipFile.write(filePath, localname)
|
|
||||||
elif os.path.isdir(filePath):
|
|
||||||
imageList = os.listdir(filePath)
|
|
||||||
localimgdir = os.path.basename(filePath)
|
|
||||||
for image in imageList:
|
|
||||||
localname = os.path.join(localimgdir,image)
|
|
||||||
imagePath = os.path.join(filePath,image)
|
|
||||||
if os.path.isfile(imagePath):
|
|
||||||
myZipFile.write(imagePath, localname)
|
|
||||||
myZipFile.close()
|
|
||||||
# remove temporary directory
|
|
||||||
shutil.rmtree(outdir, True)
|
|
||||||
|
|
||||||
end_time = time.time()
|
|
||||||
search_time = end_time - start_time
|
|
||||||
print 'elapsed time: %.2f seconds' % (search_time, )
|
|
||||||
if make_pmlz :
|
|
||||||
print 'output is %s' % zipname
|
|
||||||
else :
|
|
||||||
print 'output in %s' % outdir
|
|
||||||
print "done"
|
|
||||||
except ValueError, e:
|
|
||||||
print "Error: %s" % e
|
|
||||||
return 1
|
|
||||||
return 0
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
sys.stdout=Unbuffered(sys.stdout)
|
||||||
sys.exit(main())
|
sys.exit(main())
|
||||||
|
|
||||||
|
|||||||
@@ -68,7 +68,7 @@ class DocParser(object):
|
|||||||
ys = []
|
ys = []
|
||||||
gdefs = []
|
gdefs = []
|
||||||
|
|
||||||
# get path defintions, positions, dimensions for ecah glyph
|
# get path defintions, positions, dimensions for each glyph
|
||||||
# that makes up the image, and find min x and min y to reposition origin
|
# that makes up the image, and find min x and min y to reposition origin
|
||||||
minx = -1
|
minx = -1
|
||||||
miny = -1
|
miny = -1
|
||||||
@@ -271,6 +271,9 @@ class DocParser(object):
|
|||||||
|
|
||||||
pclass = self.getClass(pclass)
|
pclass = self.getClass(pclass)
|
||||||
|
|
||||||
|
# if paragraph uses extratokens (extra glyphs) then make it fixed
|
||||||
|
(pos, extraglyphs) = self.findinDoc('paragraph.extratokens',start,end)
|
||||||
|
|
||||||
# build up a description of the paragraph in result and return it
|
# build up a description of the paragraph in result and return it
|
||||||
# first check for the basic - all words paragraph
|
# first check for the basic - all words paragraph
|
||||||
(pos, sfirst) = self.findinDoc('paragraph.firstWord',start,end)
|
(pos, sfirst) = self.findinDoc('paragraph.firstWord',start,end)
|
||||||
@@ -280,6 +283,7 @@ class DocParser(object):
|
|||||||
last = int(slast)
|
last = int(slast)
|
||||||
|
|
||||||
makeImage = (regtype == 'vertical') or (regtype == 'table')
|
makeImage = (regtype == 'vertical') or (regtype == 'table')
|
||||||
|
makeImage = makeImage or (extraglyphs != None)
|
||||||
if self.fixedimage:
|
if self.fixedimage:
|
||||||
makeImage = makeImage or (regtype == 'fixed')
|
makeImage = makeImage or (regtype == 'fixed')
|
||||||
|
|
||||||
@@ -288,6 +292,11 @@ class DocParser(object):
|
|||||||
if self.fixedimage :
|
if self.fixedimage :
|
||||||
makeImage = makeImage or (pclass.find('cl-f-') >= 0)
|
makeImage = makeImage or (pclass.find('cl-f-') >= 0)
|
||||||
|
|
||||||
|
# before creating an image make sure glyph info exists
|
||||||
|
gidList = self.getData('info.glyph.glyphID',0,-1)
|
||||||
|
|
||||||
|
makeImage = makeImage & (len(gidList) > 0)
|
||||||
|
|
||||||
if not makeImage :
|
if not makeImage :
|
||||||
# standard all word paragraph
|
# standard all word paragraph
|
||||||
for wordnum in xrange(first, last):
|
for wordnum in xrange(first, last):
|
||||||
@@ -305,6 +314,15 @@ class DocParser(object):
|
|||||||
lastGlyph = firstglyphList[last]
|
lastGlyph = firstglyphList[last]
|
||||||
else :
|
else :
|
||||||
lastGlyph = len(gidList)
|
lastGlyph = len(gidList)
|
||||||
|
|
||||||
|
# handle case of white sapce paragraphs with no actual glyphs in them
|
||||||
|
# by reverting to text based paragraph
|
||||||
|
if firstGlyph >= lastGlyph:
|
||||||
|
# revert to standard text based paragraph
|
||||||
|
for wordnum in xrange(first, last):
|
||||||
|
result.append(('ocr', wordnum))
|
||||||
|
return pclass, result
|
||||||
|
|
||||||
for glyphnum in xrange(firstGlyph, lastGlyph):
|
for glyphnum in xrange(firstGlyph, lastGlyph):
|
||||||
glyphList.append(glyphnum)
|
glyphList.append(glyphnum)
|
||||||
# include any extratokens if they exist
|
# include any extratokens if they exist
|
||||||
@@ -344,6 +362,8 @@ class DocParser(object):
|
|||||||
|
|
||||||
word_class = ''
|
word_class = ''
|
||||||
|
|
||||||
|
word_semantic_type = ''
|
||||||
|
|
||||||
while (line < end) :
|
while (line < end) :
|
||||||
|
|
||||||
(name, argres) = self.lineinDoc(line)
|
(name, argres) = self.lineinDoc(line)
|
||||||
@@ -367,10 +387,10 @@ class DocParser(object):
|
|||||||
ws_last = int(argres)
|
ws_last = int(argres)
|
||||||
|
|
||||||
elif name.endswith('word.class'):
|
elif name.endswith('word.class'):
|
||||||
(cname, space) = argres.split('-',1)
|
(cname, space) = argres.split('-',1)
|
||||||
if space == '' : space = '0'
|
if space == '' : space = '0'
|
||||||
if (cname == 'spaceafter') and (int(space) > 0) :
|
if (cname == 'spaceafter') and (int(space) > 0) :
|
||||||
word_class = 'sa'
|
word_class = 'sa'
|
||||||
|
|
||||||
elif name.endswith('word.img.src'):
|
elif name.endswith('word.img.src'):
|
||||||
result.append(('img' + word_class, int(argres)))
|
result.append(('img' + word_class, int(argres)))
|
||||||
@@ -503,6 +523,72 @@ class DocParser(object):
|
|||||||
return parares
|
return parares
|
||||||
|
|
||||||
|
|
||||||
|
def buildTOCEntry(self, pdesc) :
|
||||||
|
parares = ''
|
||||||
|
sep =''
|
||||||
|
tocentry = ''
|
||||||
|
handle_links = len(self.link_id) > 0
|
||||||
|
|
||||||
|
lstart = 0
|
||||||
|
|
||||||
|
cnt = len(pdesc)
|
||||||
|
for j in xrange( 0, cnt) :
|
||||||
|
|
||||||
|
(wtype, num) = pdesc[j]
|
||||||
|
|
||||||
|
if wtype == 'ocr' :
|
||||||
|
word = self.ocrtext[num]
|
||||||
|
sep = ' '
|
||||||
|
|
||||||
|
if handle_links:
|
||||||
|
link = self.link_id[num]
|
||||||
|
if (link > 0):
|
||||||
|
linktype = self.link_type[link-1]
|
||||||
|
title = self.link_title[link-1]
|
||||||
|
title = title.rstrip('. ')
|
||||||
|
alt_title = parares[lstart:]
|
||||||
|
alt_title = alt_title.strip()
|
||||||
|
# now strip off the actual printed page number
|
||||||
|
alt_title = alt_title.rstrip('01234567890ivxldIVXLD-.')
|
||||||
|
alt_title = alt_title.rstrip('. ')
|
||||||
|
# skip over any external links - can't have them in a books toc
|
||||||
|
if linktype == 'external' :
|
||||||
|
title = ''
|
||||||
|
alt_title = ''
|
||||||
|
linkpage = ''
|
||||||
|
else :
|
||||||
|
if len(self.link_page) >= link :
|
||||||
|
ptarget = self.link_page[link-1] - 1
|
||||||
|
linkpage = '%04d' % ptarget
|
||||||
|
else :
|
||||||
|
# just link to the current page
|
||||||
|
linkpage = self.id[4:]
|
||||||
|
if len(alt_title) >= len(title):
|
||||||
|
title = alt_title
|
||||||
|
if title != '' and linkpage != '':
|
||||||
|
tocentry += title + '|' + linkpage + '\n'
|
||||||
|
lstart = len(parares)
|
||||||
|
if word == '_link_' : word = ''
|
||||||
|
elif (link < 0) :
|
||||||
|
if word == '_link_' : word = ''
|
||||||
|
|
||||||
|
if word == '_lb_':
|
||||||
|
word = ''
|
||||||
|
sep = ''
|
||||||
|
|
||||||
|
if num in self.dehyphen_rootid :
|
||||||
|
word = word[0:-1]
|
||||||
|
sep = ''
|
||||||
|
|
||||||
|
parares += word + sep
|
||||||
|
|
||||||
|
else :
|
||||||
|
continue
|
||||||
|
|
||||||
|
return tocentry
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# walk the document tree collecting the information needed
|
# walk the document tree collecting the information needed
|
||||||
# to build an html page using the ocrText
|
# to build an html page using the ocrText
|
||||||
@@ -510,6 +596,7 @@ class DocParser(object):
|
|||||||
def process(self):
|
def process(self):
|
||||||
|
|
||||||
htmlpage = ''
|
htmlpage = ''
|
||||||
|
tocinfo = ''
|
||||||
|
|
||||||
# get the ocr text
|
# get the ocr text
|
||||||
(pos, argres) = self.findinDoc('info.word.ocrText',0,-1)
|
(pos, argres) = self.findinDoc('info.word.ocrText',0,-1)
|
||||||
@@ -635,9 +722,9 @@ class DocParser(object):
|
|||||||
ptype = 'end'
|
ptype = 'end'
|
||||||
first_para_continued = False
|
first_para_continued = False
|
||||||
(pclass, pdesc) = self.getParaDescription(start,end, regtype)
|
(pclass, pdesc) = self.getParaDescription(start,end, regtype)
|
||||||
|
tocinfo += self.buildTOCEntry(pdesc)
|
||||||
htmlpage += self.buildParagraph(pclass, pdesc, ptype, regtype)
|
htmlpage += self.buildParagraph(pclass, pdesc, ptype, regtype)
|
||||||
|
|
||||||
|
|
||||||
elif (regtype == 'vertical') or (regtype == 'table') :
|
elif (regtype == 'vertical') or (regtype == 'table') :
|
||||||
ptype = 'full'
|
ptype = 'full'
|
||||||
if inGroup:
|
if inGroup:
|
||||||
@@ -695,12 +782,11 @@ class DocParser(object):
|
|||||||
htmlpage = htmlpage[0:-4]
|
htmlpage = htmlpage[0:-4]
|
||||||
last_para_continued = False
|
last_para_continued = False
|
||||||
|
|
||||||
return htmlpage
|
return htmlpage, tocinfo
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def convert2HTML(flatxml, classlst, fileid, bookDir, gdict, fixedimage):
|
def convert2HTML(flatxml, classlst, fileid, bookDir, gdict, fixedimage):
|
||||||
# create a document parser
|
# create a document parser
|
||||||
dp = DocParser(flatxml, classlst, fileid, bookDir, gdict, fixedimage)
|
dp = DocParser(flatxml, classlst, fileid, bookDir, gdict, fixedimage)
|
||||||
htmlpage = dp.process()
|
htmlpage, tocinfo = dp.process()
|
||||||
return htmlpage
|
return htmlpage, tocinfo
|
||||||
|
|||||||
@@ -10,17 +10,94 @@ from struct import unpack
|
|||||||
|
|
||||||
|
|
||||||
class PParser(object):
|
class PParser(object):
|
||||||
def __init__(self, gd, flatxml):
|
def __init__(self, gd, flatxml, meta_array):
|
||||||
self.gd = gd
|
self.gd = gd
|
||||||
self.flatdoc = flatxml.split('\n')
|
self.flatdoc = flatxml.split('\n')
|
||||||
|
self.docSize = len(self.flatdoc)
|
||||||
self.temp = []
|
self.temp = []
|
||||||
foo = self.getData('page.h') or self.getData('book.h')
|
|
||||||
self.ph = foo[0]
|
self.ph = -1
|
||||||
foo = self.getData('page.w') or self.getData('book.w')
|
self.pw = -1
|
||||||
self.pw = foo[0]
|
startpos = self.posinDoc('page.h') or self.posinDoc('book.h')
|
||||||
self.gx = self.getData('info.glyph.x')
|
for p in startpos:
|
||||||
self.gy = self.getData('info.glyph.y')
|
(name, argres) = self.lineinDoc(p)
|
||||||
self.gid = self.getData('info.glyph.glyphID')
|
self.ph = max(self.ph, int(argres))
|
||||||
|
startpos = self.posinDoc('page.w') or self.posinDoc('book.w')
|
||||||
|
for p in startpos:
|
||||||
|
(name, argres) = self.lineinDoc(p)
|
||||||
|
self.pw = max(self.pw, int(argres))
|
||||||
|
|
||||||
|
if self.ph <= 0:
|
||||||
|
self.ph = int(meta_array.get('pageHeight', '11000'))
|
||||||
|
if self.pw <= 0:
|
||||||
|
self.pw = int(meta_array.get('pageWidth', '8500'))
|
||||||
|
|
||||||
|
res = []
|
||||||
|
startpos = self.posinDoc('info.glyph.x')
|
||||||
|
for p in startpos:
|
||||||
|
argres = self.getDataatPos('info.glyph.x', p)
|
||||||
|
res.extend(argres)
|
||||||
|
self.gx = res
|
||||||
|
|
||||||
|
res = []
|
||||||
|
startpos = self.posinDoc('info.glyph.y')
|
||||||
|
for p in startpos:
|
||||||
|
argres = self.getDataatPos('info.glyph.y', p)
|
||||||
|
res.extend(argres)
|
||||||
|
self.gy = res
|
||||||
|
|
||||||
|
res = []
|
||||||
|
startpos = self.posinDoc('info.glyph.glyphID')
|
||||||
|
for p in startpos:
|
||||||
|
argres = self.getDataatPos('info.glyph.glyphID', p)
|
||||||
|
res.extend(argres)
|
||||||
|
self.gid = res
|
||||||
|
|
||||||
|
|
||||||
|
# return tag at line pos in document
|
||||||
|
def lineinDoc(self, pos) :
|
||||||
|
if (pos >= 0) and (pos < self.docSize) :
|
||||||
|
item = self.flatdoc[pos]
|
||||||
|
if item.find('=') >= 0:
|
||||||
|
(name, argres) = item.split('=',1)
|
||||||
|
else :
|
||||||
|
name = item
|
||||||
|
argres = ''
|
||||||
|
return name, argres
|
||||||
|
|
||||||
|
# find tag in doc if within pos to end inclusive
|
||||||
|
def findinDoc(self, tagpath, pos, end) :
|
||||||
|
result = None
|
||||||
|
if end == -1 :
|
||||||
|
end = self.docSize
|
||||||
|
else:
|
||||||
|
end = min(self.docSize, end)
|
||||||
|
foundat = -1
|
||||||
|
for j in xrange(pos, end):
|
||||||
|
item = self.flatdoc[j]
|
||||||
|
if item.find('=') >= 0:
|
||||||
|
(name, argres) = item.split('=',1)
|
||||||
|
else :
|
||||||
|
name = item
|
||||||
|
argres = ''
|
||||||
|
if name.endswith(tagpath) :
|
||||||
|
result = argres
|
||||||
|
foundat = j
|
||||||
|
break
|
||||||
|
return foundat, result
|
||||||
|
|
||||||
|
# return list of start positions for the tagpath
|
||||||
|
def posinDoc(self, tagpath):
|
||||||
|
startpos = []
|
||||||
|
pos = 0
|
||||||
|
res = ""
|
||||||
|
while res != None :
|
||||||
|
(foundpos, res) = self.findinDoc(tagpath, pos, -1)
|
||||||
|
if res != None :
|
||||||
|
startpos.append(foundpos)
|
||||||
|
pos = foundpos + 1
|
||||||
|
return startpos
|
||||||
|
|
||||||
def getData(self, path):
|
def getData(self, path):
|
||||||
result = None
|
result = None
|
||||||
cnt = len(self.flatdoc)
|
cnt = len(self.flatdoc)
|
||||||
@@ -39,6 +116,23 @@ class PParser(object):
|
|||||||
for j in xrange(0,len(argres)):
|
for j in xrange(0,len(argres)):
|
||||||
argres[j] = int(argres[j])
|
argres[j] = int(argres[j])
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
def getDataatPos(self, path, pos):
|
||||||
|
result = None
|
||||||
|
item = self.flatdoc[pos]
|
||||||
|
if item.find('=') >= 0:
|
||||||
|
(name, argt) = item.split('=')
|
||||||
|
argres = argt.split('|')
|
||||||
|
else:
|
||||||
|
name = item
|
||||||
|
argres = []
|
||||||
|
if (len(argres) > 0) :
|
||||||
|
for j in xrange(0,len(argres)):
|
||||||
|
argres[j] = int(argres[j])
|
||||||
|
if (name.endswith(path)):
|
||||||
|
result = argres
|
||||||
|
return result
|
||||||
|
|
||||||
def getDataTemp(self, path):
|
def getDataTemp(self, path):
|
||||||
result = None
|
result = None
|
||||||
cnt = len(self.temp)
|
cnt = len(self.temp)
|
||||||
@@ -58,6 +152,7 @@ class PParser(object):
|
|||||||
for j in xrange(0,len(argres)):
|
for j in xrange(0,len(argres)):
|
||||||
argres[j] = int(argres[j])
|
argres[j] = int(argres[j])
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def getImages(self):
|
def getImages(self):
|
||||||
result = []
|
result = []
|
||||||
self.temp = self.flatdoc
|
self.temp = self.flatdoc
|
||||||
@@ -69,6 +164,7 @@ class PParser(object):
|
|||||||
src = self.getDataTemp('img.src')[0]
|
src = self.getDataTemp('img.src')[0]
|
||||||
result.append('<image xlink:href="../img/img%04d.jpg" x="%d" y="%d" width="%d" height="%d" />\n' % (src, x, y, w, h))
|
result.append('<image xlink:href="../img/img%04d.jpg" x="%d" y="%d" width="%d" height="%d" />\n' % (src, x, y, w, h))
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def getGlyphs(self):
|
def getGlyphs(self):
|
||||||
result = []
|
result = []
|
||||||
if (self.gid != None) and (len(self.gid) > 0):
|
if (self.gid != None) and (len(self.gid) > 0):
|
||||||
@@ -84,25 +180,25 @@ class PParser(object):
|
|||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def convert2SVG(gdict, flat_xml, counter, numfiles, svgDir, raw, meta_array, scaledpi):
|
def convert2SVG(gdict, flat_xml, pageid, previd, nextid, svgDir, raw, meta_array, scaledpi):
|
||||||
ml = ''
|
ml = ''
|
||||||
pp = PParser(gdict, flat_xml)
|
pp = PParser(gdict, flat_xml, meta_array)
|
||||||
ml += '<?xml version="1.0" standalone="no"?>\n'
|
ml += '<?xml version="1.0" standalone="no"?>\n'
|
||||||
if (raw):
|
if (raw):
|
||||||
ml += '<!DOCTYPE svg PUBLIC "-//W3C/DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">\n'
|
ml += '<!DOCTYPE svg PUBLIC "-//W3C/DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">\n'
|
||||||
ml += '<svg width="%fin" height="%fin" viewBox="0 0 %d %d" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1">\n' % (pp.pw / scaledpi, pp.ph / scaledpi, pp.pw -1, pp.ph -1)
|
ml += '<svg width="%fin" height="%fin" viewBox="0 0 %d %d" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1">\n' % (pp.pw / scaledpi, pp.ph / scaledpi, pp.pw -1, pp.ph -1)
|
||||||
ml += '<title>Page %d - %s by %s</title>\n' % (counter, meta_array['Title'],meta_array['Authors'])
|
ml += '<title>Page %d - %s by %s</title>\n' % (pageid, meta_array['Title'],meta_array['Authors'])
|
||||||
else:
|
else:
|
||||||
ml += '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n'
|
ml += '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n'
|
||||||
ml += '<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" ><head>\n'
|
ml += '<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" ><head>\n'
|
||||||
ml += '<title>Page %d - %s by %s</title>\n' % (counter, meta_array['Title'],meta_array['Authors'])
|
ml += '<title>Page %d - %s by %s</title>\n' % (pageid, meta_array['Title'],meta_array['Authors'])
|
||||||
ml += '<script><![CDATA[\n'
|
ml += '<script><![CDATA[\n'
|
||||||
ml += 'function gd(){var p=window.location.href.replace(/^.*\?dpi=(\d+).*$/i,"$1");return p;}\n'
|
ml += 'function gd(){var p=window.location.href.replace(/^.*\?dpi=(\d+).*$/i,"$1");return p;}\n'
|
||||||
ml += 'var dpi=%d;\n' % scaledpi
|
ml += 'var dpi=%d;\n' % scaledpi
|
||||||
if (counter) :
|
if (previd) :
|
||||||
ml += 'var prevpage="page%04d.xhtml";\n' % (counter - 1)
|
ml += 'var prevpage="page%04d.xhtml";\n' % (previd)
|
||||||
if (counter < numfiles-1) :
|
if (nextid) :
|
||||||
ml += 'var nextpage="page%04d.xhtml";\n' % (counter + 1)
|
ml += 'var nextpage="page%04d.xhtml";\n' % (nextid)
|
||||||
ml += 'var pw=%d;var ph=%d;' % (pp.pw, pp.ph)
|
ml += 'var pw=%d;var ph=%d;' % (pp.pw, pp.ph)
|
||||||
ml += 'function zoomin(){dpi=dpi*(0.8);setsize();}\n'
|
ml += 'function zoomin(){dpi=dpi*(0.8);setsize();}\n'
|
||||||
ml += 'function zoomout(){dpi=dpi*1.25;setsize();}\n'
|
ml += 'function zoomout(){dpi=dpi*1.25;setsize();}\n'
|
||||||
@@ -115,10 +211,11 @@ def convert2SVG(gdict, flat_xml, counter, numfiles, svgDir, raw, meta_array, sca
|
|||||||
ml += '</head>\n'
|
ml += '</head>\n'
|
||||||
ml += '<body onLoad="setsize();" style="background-color:#777;text-align:center;">\n'
|
ml += '<body onLoad="setsize();" style="background-color:#777;text-align:center;">\n'
|
||||||
ml += '<div style="white-space:nowrap;">\n'
|
ml += '<div style="white-space:nowrap;">\n'
|
||||||
if (counter == 0) :
|
if previd == None:
|
||||||
ml += '<a href="javascript:ppage();"><svg id="prevsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"></svg></a>\n'
|
ml += '<a href="javascript:ppage();"><svg id="prevsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"></svg></a>\n'
|
||||||
else:
|
else:
|
||||||
ml += '<a href="javascript:ppage();"><svg id="prevsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"><polygon points="5,150,95,5,95,295" fill="#AAAAAA" /></svg></a>\n'
|
ml += '<a href="javascript:ppage();"><svg id="prevsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"><polygon points="5,150,95,5,95,295" fill="#AAAAAA" /></svg></a>\n'
|
||||||
|
|
||||||
ml += '<a href="javascript:npage();"><svg id="svgimg" viewBox="0 0 %d %d" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" style="background-color:#FFF;border:1px solid black;">' % (pp.pw, pp.ph)
|
ml += '<a href="javascript:npage();"><svg id="svgimg" viewBox="0 0 %d %d" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" style="background-color:#FFF;border:1px solid black;">' % (pp.pw, pp.ph)
|
||||||
if (pp.gid != None):
|
if (pp.gid != None):
|
||||||
ml += '<defs>\n'
|
ml += '<defs>\n'
|
||||||
@@ -134,12 +231,14 @@ def convert2SVG(gdict, flat_xml, counter, numfiles, svgDir, raw, meta_array, sca
|
|||||||
for j in xrange(0,len(pp.gid)):
|
for j in xrange(0,len(pp.gid)):
|
||||||
ml += '<use xlink:href="#gl%d" x="%d" y="%d" />\n' % (pp.gid[j], pp.gx[j], pp.gy[j])
|
ml += '<use xlink:href="#gl%d" x="%d" y="%d" />\n' % (pp.gid[j], pp.gx[j], pp.gy[j])
|
||||||
if (img == None or len(img) == 0) and (pp.gid == None or len(pp.gid) == 0):
|
if (img == None or len(img) == 0) and (pp.gid == None or len(pp.gid) == 0):
|
||||||
ml += '<text x="10" y="10" font-family="Helvetica" font-size="100" stroke="black">This page intentionally left blank.</text>\n<text x="10" y="110" font-family="Helvetica" font-size="50" stroke="black">Until this notice unintentionally gave it content. (gensvg.py)</text>\n'
|
xpos = "%d" % (pp.pw // 3)
|
||||||
|
ypos = "%d" % (pp.ph // 3)
|
||||||
|
ml += '<text x="' + xpos + '" y="' + ypos + '" font-size="' + meta_array['fontSize'] + '" font-family="Helvetica" stroke="black">This page intentionally left blank.</text>\n'
|
||||||
if (raw) :
|
if (raw) :
|
||||||
ml += '</svg>'
|
ml += '</svg>'
|
||||||
else :
|
else :
|
||||||
ml += '</svg></a>\n'
|
ml += '</svg></a>\n'
|
||||||
if (counter == numfiles - 1) :
|
if nextid == None:
|
||||||
ml += '<a href="javascript:npage();"><svg id="nextsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"></svg></a>\n'
|
ml += '<a href="javascript:npage();"><svg id="nextsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"></svg></a>\n'
|
||||||
else :
|
else :
|
||||||
ml += '<a href="javascript:npage();"><svg id="nextsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"><polygon points="5,5,5,295,95,150" fill="#AAAAAA" /></svg></a>\n'
|
ml += '<a href="javascript:npage();"><svg id="nextsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"><polygon points="5,5,5,295,95,150" fill="#AAAAAA" /></svg></a>\n'
|
||||||
@@ -148,4 +247,3 @@ def convert2SVG(gdict, flat_xml, counter, numfiles, svgDir, raw, meta_array, sca
|
|||||||
ml += '</body>\n'
|
ml += '</body>\n'
|
||||||
ml += '</html>\n'
|
ml += '</html>\n'
|
||||||
return ml
|
return ml
|
||||||
|
|
||||||
|
|||||||
@@ -19,12 +19,25 @@ import getopt
|
|||||||
from struct import pack
|
from struct import pack
|
||||||
from struct import unpack
|
from struct import unpack
|
||||||
|
|
||||||
|
class TpzDRMError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
# local support routines
|
# local support routines
|
||||||
import convert2xml
|
if 'calibre' in sys.modules:
|
||||||
import flatxml2html
|
inCalibre = True
|
||||||
import flatxml2svg
|
else:
|
||||||
import stylexml2css
|
inCalibre = False
|
||||||
|
|
||||||
|
if inCalibre :
|
||||||
|
from calibre_plugins.k4mobidedrm import convert2xml
|
||||||
|
from calibre_plugins.k4mobidedrm import flatxml2html
|
||||||
|
from calibre_plugins.k4mobidedrm import flatxml2svg
|
||||||
|
from calibre_plugins.k4mobidedrm import stylexml2css
|
||||||
|
else :
|
||||||
|
import convert2xml
|
||||||
|
import flatxml2html
|
||||||
|
import flatxml2svg
|
||||||
|
import stylexml2css
|
||||||
|
|
||||||
|
|
||||||
# Get a 7 bit encoded number from a file
|
# Get a 7 bit encoded number from a file
|
||||||
@@ -35,11 +48,11 @@ def readEncodedNumber(file):
|
|||||||
return None
|
return None
|
||||||
data = ord(c)
|
data = ord(c)
|
||||||
if data == 0xFF:
|
if data == 0xFF:
|
||||||
flag = True
|
flag = True
|
||||||
c = file.read(1)
|
c = file.read(1)
|
||||||
if (len(c) == 0):
|
if (len(c) == 0):
|
||||||
return None
|
return None
|
||||||
data = ord(c)
|
data = ord(c)
|
||||||
if data >= 0x80:
|
if data >= 0x80:
|
||||||
datax = (data & 0x7F)
|
datax = (data & 0x7F)
|
||||||
while data >= 0x80 :
|
while data >= 0x80 :
|
||||||
@@ -50,7 +63,7 @@ def readEncodedNumber(file):
|
|||||||
datax = (datax <<7) + (data & 0x7F)
|
datax = (datax <<7) + (data & 0x7F)
|
||||||
data = datax
|
data = datax
|
||||||
if flag:
|
if flag:
|
||||||
data = -data
|
data = -data
|
||||||
return data
|
return data
|
||||||
|
|
||||||
# Get a length prefixed string from the file
|
# Get a length prefixed string from the file
|
||||||
@@ -103,7 +116,8 @@ class Dictionary(object):
|
|||||||
return self.stable[self.pos]
|
return self.stable[self.pos]
|
||||||
else:
|
else:
|
||||||
print "Error - %d outside of string table limits" % val
|
print "Error - %d outside of string table limits" % val
|
||||||
sys.exit(-1)
|
raise TpzDRMError('outside or string table limits')
|
||||||
|
# sys.exit(-1)
|
||||||
def getSize(self):
|
def getSize(self):
|
||||||
return self.size
|
return self.size
|
||||||
def getPos(self):
|
def getPos(self):
|
||||||
@@ -192,6 +206,8 @@ class GParser(object):
|
|||||||
argres[j] = int(argres[j])
|
argres[j] = int(argres[j])
|
||||||
return result
|
return result
|
||||||
def getGlyphDim(self, gly):
|
def getGlyphDim(self, gly):
|
||||||
|
if self.gdpi[gly] == 0:
|
||||||
|
return 0, 0
|
||||||
maxh = (self.gh[gly] * self.dpi) / self.gdpi[gly]
|
maxh = (self.gh[gly] * self.dpi) / self.gdpi[gly]
|
||||||
maxw = (self.gw[gly] * self.dpi) / self.gdpi[gly]
|
maxw = (self.gw[gly] * self.dpi) / self.gdpi[gly]
|
||||||
return maxh, maxw
|
return maxh, maxw
|
||||||
@@ -320,6 +336,18 @@ def generateBook(bookDir, raw, fixedimage):
|
|||||||
print 'Processing Meta Data and creating OPF'
|
print 'Processing Meta Data and creating OPF'
|
||||||
meta_array = getMetaArray(metaFile)
|
meta_array = getMetaArray(metaFile)
|
||||||
|
|
||||||
|
# replace special chars in title and authors like & < >
|
||||||
|
title = meta_array.get('Title','No Title Provided')
|
||||||
|
title = title.replace('&','&')
|
||||||
|
title = title.replace('<','<')
|
||||||
|
title = title.replace('>','>')
|
||||||
|
meta_array['Title'] = title
|
||||||
|
authors = meta_array.get('Authors','No Authors Provided')
|
||||||
|
authors = authors.replace('&','&')
|
||||||
|
authors = authors.replace('<','<')
|
||||||
|
authors = authors.replace('>','>')
|
||||||
|
meta_array['Authors'] = authors
|
||||||
|
|
||||||
xname = os.path.join(xmlDir, 'metadata.xml')
|
xname = os.path.join(xmlDir, 'metadata.xml')
|
||||||
metastr = ''
|
metastr = ''
|
||||||
for key in meta_array:
|
for key in meta_array:
|
||||||
@@ -346,10 +374,34 @@ def generateBook(bookDir, raw, fixedimage):
|
|||||||
(ph, pw) = getPageDim(flat_xml)
|
(ph, pw) = getPageDim(flat_xml)
|
||||||
if (ph == '-1') or (ph == '0') : ph = '11000'
|
if (ph == '-1') or (ph == '0') : ph = '11000'
|
||||||
if (pw == '-1') or (pw == '0') : pw = '8500'
|
if (pw == '-1') or (pw == '0') : pw = '8500'
|
||||||
|
meta_array['pageHeight'] = ph
|
||||||
|
meta_array['pageWidth'] = pw
|
||||||
|
if 'fontSize' not in meta_array.keys():
|
||||||
|
meta_array['fontSize'] = fontsize
|
||||||
|
|
||||||
# print ' ', 'other0000.dat'
|
# process other.dat for css info and for map of page files to svg images
|
||||||
|
# this map is needed because some pages actually are made up of multiple
|
||||||
|
# pageXXXX.xml files
|
||||||
xname = os.path.join(bookDir, 'style.css')
|
xname = os.path.join(bookDir, 'style.css')
|
||||||
flat_xml = convert2xml.fromData(dict, otherFile)
|
flat_xml = convert2xml.fromData(dict, otherFile)
|
||||||
|
|
||||||
|
# extract info.original.pid to get original page information
|
||||||
|
pageIDMap = {}
|
||||||
|
pageidnums = stylexml2css.getpageIDMap(flat_xml)
|
||||||
|
if len(pageidnums) == 0:
|
||||||
|
filenames = os.listdir(pageDir)
|
||||||
|
numfiles = len(filenames)
|
||||||
|
for k in range(numfiles):
|
||||||
|
pageidnums.append(k)
|
||||||
|
# create a map from page ids to list of page file nums to process for that page
|
||||||
|
for i in range(len(pageidnums)):
|
||||||
|
id = pageidnums[i]
|
||||||
|
if id in pageIDMap.keys():
|
||||||
|
pageIDMap[id].append(i)
|
||||||
|
else:
|
||||||
|
pageIDMap[id] = [i]
|
||||||
|
|
||||||
|
# now get the css info
|
||||||
cssstr , classlst = stylexml2css.convert2CSS(flat_xml, fontsize, ph, pw)
|
cssstr , classlst = stylexml2css.convert2CSS(flat_xml, fontsize, ph, pw)
|
||||||
file(xname, 'wb').write(cssstr)
|
file(xname, 'wb').write(cssstr)
|
||||||
xname = os.path.join(xmlDir, 'other0000.xml')
|
xname = os.path.join(xmlDir, 'other0000.xml')
|
||||||
@@ -389,6 +441,9 @@ def generateBook(bookDir, raw, fixedimage):
|
|||||||
glyfile.close()
|
glyfile.close()
|
||||||
print " "
|
print " "
|
||||||
|
|
||||||
|
# build up tocentries while processing html
|
||||||
|
tocentries = ''
|
||||||
|
|
||||||
# start up the html
|
# start up the html
|
||||||
htmlFileName = "book.html"
|
htmlFileName = "book.html"
|
||||||
htmlstr = '<?xml version="1.0" encoding="utf-8"?>\n'
|
htmlstr = '<?xml version="1.0" encoding="utf-8"?>\n'
|
||||||
@@ -399,8 +454,10 @@ def generateBook(bookDir, raw, fixedimage):
|
|||||||
htmlstr += '<title>' + meta_array['Title'] + ' by ' + meta_array['Authors'] + '</title>\n'
|
htmlstr += '<title>' + meta_array['Title'] + ' by ' + meta_array['Authors'] + '</title>\n'
|
||||||
htmlstr += '<meta name="Author" content="' + meta_array['Authors'] + '" />\n'
|
htmlstr += '<meta name="Author" content="' + meta_array['Authors'] + '" />\n'
|
||||||
htmlstr += '<meta name="Title" content="' + meta_array['Title'] + '" />\n'
|
htmlstr += '<meta name="Title" content="' + meta_array['Title'] + '" />\n'
|
||||||
htmlstr += '<meta name="ASIN" content="' + meta_array['ASIN'] + '" />\n'
|
if 'ASIN' in meta_array:
|
||||||
htmlstr += '<meta name="GUID" content="' + meta_array['GUID'] + '" />\n'
|
htmlstr += '<meta name="ASIN" content="' + meta_array['ASIN'] + '" />\n'
|
||||||
|
if 'GUID' in meta_array:
|
||||||
|
htmlstr += '<meta name="GUID" content="' + meta_array['GUID'] + '" />\n'
|
||||||
htmlstr += '<link href="style.css" rel="stylesheet" type="text/css" />\n'
|
htmlstr += '<link href="style.css" rel="stylesheet" type="text/css" />\n'
|
||||||
htmlstr += '</head>\n<body>\n'
|
htmlstr += '</head>\n<body>\n'
|
||||||
|
|
||||||
@@ -409,6 +466,77 @@ def generateBook(bookDir, raw, fixedimage):
|
|||||||
# readability when rendering to the screen.
|
# readability when rendering to the screen.
|
||||||
scaledpi = 1440.0
|
scaledpi = 1440.0
|
||||||
|
|
||||||
|
filenames = os.listdir(pageDir)
|
||||||
|
filenames = sorted(filenames)
|
||||||
|
numfiles = len(filenames)
|
||||||
|
|
||||||
|
xmllst = []
|
||||||
|
|
||||||
|
for filename in filenames:
|
||||||
|
# print ' ', filename
|
||||||
|
print ".",
|
||||||
|
fname = os.path.join(pageDir,filename)
|
||||||
|
flat_xml = convert2xml.fromData(dict, fname)
|
||||||
|
|
||||||
|
# keep flat_xml for later svg processing
|
||||||
|
xmllst.append(flat_xml)
|
||||||
|
|
||||||
|
xname = os.path.join(xmlDir, filename.replace('.dat','.xml'))
|
||||||
|
file(xname, 'wb').write(convert2xml.getXML(dict, fname))
|
||||||
|
|
||||||
|
# first get the html
|
||||||
|
pagehtml, tocinfo = flatxml2html.convert2HTML(flat_xml, classlst, fname, bookDir, gd, fixedimage)
|
||||||
|
tocentries += tocinfo
|
||||||
|
htmlstr += pagehtml
|
||||||
|
|
||||||
|
# finish up the html string and output it
|
||||||
|
htmlstr += '</body>\n</html>\n'
|
||||||
|
file(os.path.join(bookDir, htmlFileName), 'wb').write(htmlstr)
|
||||||
|
|
||||||
|
print " "
|
||||||
|
print 'Extracting Table of Contents from Amazon OCR'
|
||||||
|
|
||||||
|
# first create a table of contents file for the svg images
|
||||||
|
tochtml = '<?xml version="1.0" encoding="utf-8"?>\n'
|
||||||
|
tochtml += '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n'
|
||||||
|
tochtml += '<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" >'
|
||||||
|
tochtml += '<head>\n'
|
||||||
|
tochtml += '<title>' + meta_array['Title'] + '</title>\n'
|
||||||
|
tochtml += '<meta name="Author" content="' + meta_array['Authors'] + '" />\n'
|
||||||
|
tochtml += '<meta name="Title" content="' + meta_array['Title'] + '" />\n'
|
||||||
|
if 'ASIN' in meta_array:
|
||||||
|
tochtml += '<meta name="ASIN" content="' + meta_array['ASIN'] + '" />\n'
|
||||||
|
if 'GUID' in meta_array:
|
||||||
|
tochtml += '<meta name="GUID" content="' + meta_array['GUID'] + '" />\n'
|
||||||
|
tochtml += '</head>\n'
|
||||||
|
tochtml += '<body>\n'
|
||||||
|
|
||||||
|
tochtml += '<h2>Table of Contents</h2>\n'
|
||||||
|
start = pageidnums[0]
|
||||||
|
if (raw):
|
||||||
|
startname = 'page%04d.svg' % start
|
||||||
|
else:
|
||||||
|
startname = 'page%04d.xhtml' % start
|
||||||
|
|
||||||
|
tochtml += '<h3><a href="' + startname + '">Start of Book</a></h3>\n'
|
||||||
|
# build up a table of contents for the svg xhtml output
|
||||||
|
toclst = tocentries.split('\n')
|
||||||
|
toclst.pop()
|
||||||
|
for entry in toclst:
|
||||||
|
print entry
|
||||||
|
title, pagenum = entry.split('|')
|
||||||
|
id = pageidnums[int(pagenum)]
|
||||||
|
if (raw):
|
||||||
|
fname = 'page%04d.svg' % id
|
||||||
|
else:
|
||||||
|
fname = 'page%04d.xhtml' % id
|
||||||
|
tochtml += '<h3><a href="'+ fname + '">' + title + '</a></h3>\n'
|
||||||
|
tochtml += '</body>\n'
|
||||||
|
tochtml += '</html>\n'
|
||||||
|
file(os.path.join(svgDir, 'toc.xhtml'), 'wb').write(tochtml)
|
||||||
|
|
||||||
|
|
||||||
|
# now create index_svg.xhtml that points to all required files
|
||||||
svgindex = '<?xml version="1.0" encoding="utf-8"?>\n'
|
svgindex = '<?xml version="1.0" encoding="utf-8"?>\n'
|
||||||
svgindex += '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n'
|
svgindex += '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n'
|
||||||
svgindex += '<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" >'
|
svgindex += '<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" >'
|
||||||
@@ -416,64 +544,61 @@ def generateBook(bookDir, raw, fixedimage):
|
|||||||
svgindex += '<title>' + meta_array['Title'] + '</title>\n'
|
svgindex += '<title>' + meta_array['Title'] + '</title>\n'
|
||||||
svgindex += '<meta name="Author" content="' + meta_array['Authors'] + '" />\n'
|
svgindex += '<meta name="Author" content="' + meta_array['Authors'] + '" />\n'
|
||||||
svgindex += '<meta name="Title" content="' + meta_array['Title'] + '" />\n'
|
svgindex += '<meta name="Title" content="' + meta_array['Title'] + '" />\n'
|
||||||
svgindex += '<meta name="ASIN" content="' + meta_array['ASIN'] + '" />\n'
|
if 'ASIN' in meta_array:
|
||||||
svgindex += '<meta name="GUID" content="' + meta_array['GUID'] + '" />\n'
|
svgindex += '<meta name="ASIN" content="' + meta_array['ASIN'] + '" />\n'
|
||||||
|
if 'GUID' in meta_array:
|
||||||
|
svgindex += '<meta name="GUID" content="' + meta_array['GUID'] + '" />\n'
|
||||||
svgindex += '</head>\n'
|
svgindex += '</head>\n'
|
||||||
svgindex += '<body>\n'
|
svgindex += '<body>\n'
|
||||||
|
|
||||||
filenames = os.listdir(pageDir)
|
print "Building svg images of each book page"
|
||||||
filenames = sorted(filenames)
|
svgindex += '<h2>List of Pages</h2>\n'
|
||||||
numfiles = len(filenames)
|
svgindex += '<div>\n'
|
||||||
counter = 0
|
idlst = sorted(pageIDMap.keys())
|
||||||
|
numids = len(idlst)
|
||||||
for filename in filenames:
|
cnt = len(idlst)
|
||||||
# print ' ', filename
|
previd = None
|
||||||
print ".",
|
for j in range(cnt):
|
||||||
|
pageid = idlst[j]
|
||||||
fname = os.path.join(pageDir,filename)
|
if j < cnt - 1:
|
||||||
flat_xml = convert2xml.fromData(dict, fname)
|
nextid = idlst[j+1]
|
||||||
|
else:
|
||||||
xname = os.path.join(xmlDir, filename.replace('.dat','.xml'))
|
nextid = None
|
||||||
file(xname, 'wb').write(convert2xml.getXML(dict, fname))
|
print '.',
|
||||||
|
pagelst = pageIDMap[pageid]
|
||||||
# first get the html
|
flat_svg = ''
|
||||||
htmlstr += flatxml2html.convert2HTML(flat_xml, classlst, fname, bookDir, gd, fixedimage)
|
for page in pagelst:
|
||||||
|
flat_svg += xmllst[page]
|
||||||
# now get the svg image of the page
|
svgxml = flatxml2svg.convert2SVG(gd, flat_svg, pageid, previd, nextid, svgDir, raw, meta_array, scaledpi)
|
||||||
svgxml = flatxml2svg.convert2SVG(gd, flat_xml, counter, numfiles, svgDir, raw, meta_array, scaledpi)
|
|
||||||
|
|
||||||
if (raw) :
|
if (raw) :
|
||||||
pfile = open(os.path.join(svgDir,filename.replace('.dat','.svg')), 'w')
|
pfile = open(os.path.join(svgDir,'page%04d.svg' % pageid),'w')
|
||||||
svgindex += '<a href="svg/page%04d.svg">Page %d</a>\n' % (counter, counter)
|
svgindex += '<a href="svg/page%04d.svg">Page %d</a>\n' % (pageid, pageid)
|
||||||
else :
|
else :
|
||||||
pfile = open(os.path.join(svgDir,'page%04d.xhtml' % counter), 'w')
|
pfile = open(os.path.join(svgDir,'page%04d.xhtml' % pageid), 'w')
|
||||||
svgindex += '<a href="svg/page%04d.xhtml">Page %d</a>\n' % (counter, counter)
|
svgindex += '<a href="svg/page%04d.xhtml">Page %d</a>\n' % (pageid, pageid)
|
||||||
|
previd = pageid
|
||||||
|
|
||||||
pfile.write(svgxml)
|
pfile.write(svgxml)
|
||||||
pfile.close()
|
pfile.close()
|
||||||
|
|
||||||
counter += 1
|
counter += 1
|
||||||
|
svgindex += '</div>\n'
|
||||||
print " "
|
svgindex += '<h2><a href="svg/toc.xhtml">Table of Contents</a></h2>\n'
|
||||||
|
|
||||||
# finish up the html string and output it
|
|
||||||
htmlstr += '</body>\n</html>\n'
|
|
||||||
file(os.path.join(bookDir, htmlFileName), 'wb').write(htmlstr)
|
|
||||||
|
|
||||||
# finish up the svg index string and output it
|
|
||||||
svgindex += '</body>\n</html>\n'
|
svgindex += '</body>\n</html>\n'
|
||||||
file(os.path.join(bookDir, 'index_svg.xhtml'), 'wb').write(svgindex)
|
file(os.path.join(bookDir, 'index_svg.xhtml'), 'wb').write(svgindex)
|
||||||
|
|
||||||
|
print " "
|
||||||
|
|
||||||
# build the opf file
|
# build the opf file
|
||||||
opfname = os.path.join(bookDir, 'book.opf')
|
opfname = os.path.join(bookDir, 'book.opf')
|
||||||
opfstr = '<?xml version="1.0" encoding="utf-8"?>\n'
|
opfstr = '<?xml version="1.0" encoding="utf-8"?>\n'
|
||||||
opfstr += '<package xmlns="http://www.idpf.org/2007/opf" unique-identifier="guid_id">\n'
|
opfstr += '<package xmlns="http://www.idpf.org/2007/opf" unique-identifier="guid_id">\n'
|
||||||
# adding metadata
|
# adding metadata
|
||||||
opfstr += ' <metadata xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:opf="http://www.idpf.org/2007/opf">\n'
|
opfstr += ' <metadata xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:opf="http://www.idpf.org/2007/opf">\n'
|
||||||
opfstr += ' <dc:identifier opf:scheme="GUID" id="guid_id">' + meta_array['GUID'] + '</dc:identifier>\n'
|
if 'GUID' in meta_array:
|
||||||
opfstr += ' <dc:identifier opf:scheme="ASIN">' + meta_array['ASIN'] + '</dc:identifier>\n'
|
opfstr += ' <dc:identifier opf:scheme="GUID" id="guid_id">' + meta_array['GUID'] + '</dc:identifier>\n'
|
||||||
opfstr += ' <dc:identifier opf:scheme="oASIN">' + meta_array['oASIN'] + '</dc:identifier>\n'
|
if 'ASIN' in meta_array:
|
||||||
|
opfstr += ' <dc:identifier opf:scheme="ASIN">' + meta_array['ASIN'] + '</dc:identifier>\n'
|
||||||
|
if 'oASIN' in meta_array:
|
||||||
|
opfstr += ' <dc:identifier opf:scheme="oASIN">' + meta_array['oASIN'] + '</dc:identifier>\n'
|
||||||
opfstr += ' <dc:title>' + meta_array['Title'] + '</dc:title>\n'
|
opfstr += ' <dc:title>' + meta_array['Title'] + '</dc:title>\n'
|
||||||
opfstr += ' <dc:creator opf:role="aut">' + meta_array['Authors'] + '</dc:creator>\n'
|
opfstr += ' <dc:creator opf:role="aut">' + meta_array['Authors'] + '</dc:creator>\n'
|
||||||
opfstr += ' <dc:language>en</dc:language>\n'
|
opfstr += ' <dc:language>en</dc:language>\n'
|
||||||
@@ -483,7 +608,7 @@ def generateBook(bookDir, raw, fixedimage):
|
|||||||
opfstr += ' </metadata>\n'
|
opfstr += ' </metadata>\n'
|
||||||
opfstr += '<manifest>\n'
|
opfstr += '<manifest>\n'
|
||||||
opfstr += ' <item id="book" href="book.html" media-type="application/xhtml+xml"/>\n'
|
opfstr += ' <item id="book" href="book.html" media-type="application/xhtml+xml"/>\n'
|
||||||
opfstr += ' <item id="stylesheet" href="style.css" media-type="text.css"/>\n'
|
opfstr += ' <item id="stylesheet" href="style.css" media-type="text/css"/>\n'
|
||||||
# adding image files to manifest
|
# adding image files to manifest
|
||||||
filenames = os.listdir(imgDir)
|
filenames = os.listdir(imgDir)
|
||||||
filenames = sorted(filenames)
|
filenames = sorted(filenames)
|
||||||
@@ -541,7 +666,7 @@ def main(argv):
|
|||||||
return 1
|
return 1
|
||||||
|
|
||||||
raw = 0
|
raw = 0
|
||||||
fixedimage = False
|
fixedimage = True
|
||||||
for o, a in opts:
|
for o, a in opts:
|
||||||
if o =="-h":
|
if o =="-h":
|
||||||
usage()
|
usage()
|
||||||
|
|||||||
@@ -1,6 +1,8 @@
|
|||||||
#! /usr/bin/python
|
#! /usr/bin/python
|
||||||
|
|
||||||
# ignobleepub.pyw, version 3.3
|
from __future__ import with_statement
|
||||||
|
|
||||||
|
# ignobleepub.pyw, version 3.4
|
||||||
|
|
||||||
# To run this program install Python 2.6 from <http://www.python.org/download/>
|
# To run this program install Python 2.6 from <http://www.python.org/download/>
|
||||||
# and OpenSSL or PyCrypto from http://www.voidspace.org.uk/python/modules.shtml#pycrypto
|
# and OpenSSL or PyCrypto from http://www.voidspace.org.uk/python/modules.shtml#pycrypto
|
||||||
@@ -14,10 +16,9 @@
|
|||||||
# 3.1 - Allow Windows versions of libcrypto to be found
|
# 3.1 - Allow Windows versions of libcrypto to be found
|
||||||
# 3.2 - add support for encoding to 'utf-8' when building up list of files to cecrypt from encryption.xml
|
# 3.2 - add support for encoding to 'utf-8' when building up list of files to cecrypt from encryption.xml
|
||||||
# 3.3 - On Windows try PyCrypto first and OpenSSL next
|
# 3.3 - On Windows try PyCrypto first and OpenSSL next
|
||||||
|
# 3.4 - Modify interace to allow use with import
|
||||||
|
|
||||||
|
|
||||||
from __future__ import with_statement
|
|
||||||
|
|
||||||
__license__ = 'GPL v3'
|
__license__ = 'GPL v3'
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
@@ -170,49 +171,6 @@ class Decryptor(object):
|
|||||||
return data
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def cli_main(argv=sys.argv):
|
|
||||||
progname = os.path.basename(argv[0])
|
|
||||||
if AES is None:
|
|
||||||
print "%s: This script requires OpenSSL or PyCrypto, which must be installed " \
|
|
||||||
"separately. Read the top-of-script comment for details." % \
|
|
||||||
(progname,)
|
|
||||||
return 1
|
|
||||||
if len(argv) != 4:
|
|
||||||
print "usage: %s KEYFILE INBOOK OUTBOOK" % (progname,)
|
|
||||||
return 1
|
|
||||||
keypath, inpath, outpath = argv[1:]
|
|
||||||
with open(keypath, 'rb') as f:
|
|
||||||
keyb64 = f.read()
|
|
||||||
key = keyb64.decode('base64')[:16]
|
|
||||||
# aes = AES.new(key, AES.MODE_CBC)
|
|
||||||
aes = AES(key)
|
|
||||||
|
|
||||||
with closing(ZipFile(open(inpath, 'rb'))) as inf:
|
|
||||||
namelist = set(inf.namelist())
|
|
||||||
if 'META-INF/rights.xml' not in namelist or \
|
|
||||||
'META-INF/encryption.xml' not in namelist:
|
|
||||||
raise IGNOBLEError('%s: not an B&N ADEPT EPUB' % (inpath,))
|
|
||||||
for name in META_NAMES:
|
|
||||||
namelist.remove(name)
|
|
||||||
rights = etree.fromstring(inf.read('META-INF/rights.xml'))
|
|
||||||
adept = lambda tag: '{%s}%s' % (NSMAP['adept'], tag)
|
|
||||||
expr = './/%s' % (adept('encryptedKey'),)
|
|
||||||
bookkey = ''.join(rights.findtext(expr))
|
|
||||||
bookkey = aes.decrypt(bookkey.decode('base64'))
|
|
||||||
bookkey = bookkey[:-ord(bookkey[-1])]
|
|
||||||
encryption = inf.read('META-INF/encryption.xml')
|
|
||||||
decryptor = Decryptor(bookkey[-16:], encryption)
|
|
||||||
kwds = dict(compression=ZIP_DEFLATED, allowZip64=False)
|
|
||||||
with closing(ZipFile(open(outpath, 'wb'), 'w', **kwds)) as outf:
|
|
||||||
zi = ZipInfo('mimetype', compress_type=ZIP_STORED)
|
|
||||||
outf.writestr(zi, inf.read('mimetype'))
|
|
||||||
for path in namelist:
|
|
||||||
data = inf.read(path)
|
|
||||||
outf.writestr(path, decryptor.decrypt(path, data))
|
|
||||||
return 0
|
|
||||||
|
|
||||||
|
|
||||||
class DecryptionDialog(Tkinter.Frame):
|
class DecryptionDialog(Tkinter.Frame):
|
||||||
def __init__(self, root):
|
def __init__(self, root):
|
||||||
Tkinter.Frame.__init__(self, root, border=5)
|
Tkinter.Frame.__init__(self, root, border=5)
|
||||||
@@ -308,6 +266,53 @@ class DecryptionDialog(Tkinter.Frame):
|
|||||||
return
|
return
|
||||||
self.status['text'] = 'File successfully decrypted'
|
self.status['text'] = 'File successfully decrypted'
|
||||||
|
|
||||||
|
|
||||||
|
def decryptBook(keypath, inpath, outpath):
|
||||||
|
with open(keypath, 'rb') as f:
|
||||||
|
keyb64 = f.read()
|
||||||
|
key = keyb64.decode('base64')[:16]
|
||||||
|
# aes = AES.new(key, AES.MODE_CBC)
|
||||||
|
aes = AES(key)
|
||||||
|
|
||||||
|
with closing(ZipFile(open(inpath, 'rb'))) as inf:
|
||||||
|
namelist = set(inf.namelist())
|
||||||
|
if 'META-INF/rights.xml' not in namelist or \
|
||||||
|
'META-INF/encryption.xml' not in namelist:
|
||||||
|
raise IGNOBLEError('%s: not an B&N ADEPT EPUB' % (inpath,))
|
||||||
|
for name in META_NAMES:
|
||||||
|
namelist.remove(name)
|
||||||
|
rights = etree.fromstring(inf.read('META-INF/rights.xml'))
|
||||||
|
adept = lambda tag: '{%s}%s' % (NSMAP['adept'], tag)
|
||||||
|
expr = './/%s' % (adept('encryptedKey'),)
|
||||||
|
bookkey = ''.join(rights.findtext(expr))
|
||||||
|
bookkey = aes.decrypt(bookkey.decode('base64'))
|
||||||
|
bookkey = bookkey[:-ord(bookkey[-1])]
|
||||||
|
encryption = inf.read('META-INF/encryption.xml')
|
||||||
|
decryptor = Decryptor(bookkey[-16:], encryption)
|
||||||
|
kwds = dict(compression=ZIP_DEFLATED, allowZip64=False)
|
||||||
|
with closing(ZipFile(open(outpath, 'wb'), 'w', **kwds)) as outf:
|
||||||
|
zi = ZipInfo('mimetype', compress_type=ZIP_STORED)
|
||||||
|
outf.writestr(zi, inf.read('mimetype'))
|
||||||
|
for path in namelist:
|
||||||
|
data = inf.read(path)
|
||||||
|
outf.writestr(path, decryptor.decrypt(path, data))
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
def cli_main(argv=sys.argv):
|
||||||
|
progname = os.path.basename(argv[0])
|
||||||
|
if AES is None:
|
||||||
|
print "%s: This script requires OpenSSL or PyCrypto, which must be installed " \
|
||||||
|
"separately. Read the top-of-script comment for details." % \
|
||||||
|
(progname,)
|
||||||
|
return 1
|
||||||
|
if len(argv) != 4:
|
||||||
|
print "usage: %s KEYFILE INBOOK OUTBOOK" % (progname,)
|
||||||
|
return 1
|
||||||
|
keypath, inpath, outpath = argv[1:]
|
||||||
|
return decryptBook(keypath, inpath, outpath)
|
||||||
|
|
||||||
|
|
||||||
def gui_main():
|
def gui_main():
|
||||||
root = Tkinter.Tk()
|
root = Tkinter.Tk()
|
||||||
if AES is None:
|
if AES is None:
|
||||||
@@ -324,6 +329,7 @@ def gui_main():
|
|||||||
root.mainloop()
|
root.mainloop()
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
if len(sys.argv) > 1:
|
if len(sys.argv) > 1:
|
||||||
sys.exit(cli_main())
|
sys.exit(cli_main())
|
||||||
@@ -1,6 +1,8 @@
|
|||||||
#! /usr/bin/python
|
#! /usr/bin/python
|
||||||
|
|
||||||
# ignoblekeygen.pyw, version 2.2
|
from __future__ import with_statement
|
||||||
|
|
||||||
|
# ignoblekeygen.pyw, version 2.3
|
||||||
|
|
||||||
# To run this program install Python 2.6 from <http://www.python.org/download/>
|
# To run this program install Python 2.6 from <http://www.python.org/download/>
|
||||||
# and OpenSSL or PyCrypto from http://www.voidspace.org.uk/python/modules.shtml#pycrypto
|
# and OpenSSL or PyCrypto from http://www.voidspace.org.uk/python/modules.shtml#pycrypto
|
||||||
@@ -12,12 +14,12 @@
|
|||||||
# 2 - Add OS X support by using OpenSSL when available (taken/modified from ineptepub v5)
|
# 2 - Add OS X support by using OpenSSL when available (taken/modified from ineptepub v5)
|
||||||
# 2.1 - Allow Windows versions of libcrypto to be found
|
# 2.1 - Allow Windows versions of libcrypto to be found
|
||||||
# 2.2 - On Windows try PyCrypto first and then OpenSSL next
|
# 2.2 - On Windows try PyCrypto first and then OpenSSL next
|
||||||
|
# 2.3 - Modify interface to allow use of import
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Generate Barnes & Noble EPUB user key from name and credit card number.
|
Generate Barnes & Noble EPUB user key from name and credit card number.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from __future__ import with_statement
|
|
||||||
|
|
||||||
__license__ = 'GPL v3'
|
__license__ = 'GPL v3'
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
@@ -72,7 +74,7 @@ def _load_crypto_libcrypto():
|
|||||||
[c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p,
|
[c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p,
|
||||||
c_int])
|
c_int])
|
||||||
class AES(object):
|
class AES(object):
|
||||||
def __init__(self, userkey, iv):
|
def __init__(self, userkey, iv):
|
||||||
self._blocksize = len(userkey)
|
self._blocksize = len(userkey)
|
||||||
self._iv = iv
|
self._iv = iv
|
||||||
key = self._key = AES_KEY()
|
key = self._key = AES_KEY()
|
||||||
@@ -80,7 +82,7 @@ def _load_crypto_libcrypto():
|
|||||||
if rv < 0:
|
if rv < 0:
|
||||||
raise IGNOBLEError('Failed to initialize AES Encrypt key')
|
raise IGNOBLEError('Failed to initialize AES Encrypt key')
|
||||||
|
|
||||||
def encrypt(self, data):
|
def encrypt(self, data):
|
||||||
out = create_string_buffer(len(data))
|
out = create_string_buffer(len(data))
|
||||||
rv = AES_cbc_encrypt(data, out, len(data), self._key, self._iv, 1)
|
rv = AES_cbc_encrypt(data, out, len(data), self._key, self._iv, 1)
|
||||||
if rv == 0:
|
if rv == 0:
|
||||||
@@ -120,6 +122,7 @@ AES = _load_crypto()
|
|||||||
def normalize_name(name):
|
def normalize_name(name):
|
||||||
return ''.join(x for x in name.lower() if x != ' ')
|
return ''.join(x for x in name.lower() if x != ' ')
|
||||||
|
|
||||||
|
|
||||||
def generate_keyfile(name, ccn, outpath):
|
def generate_keyfile(name, ccn, outpath):
|
||||||
name = normalize_name(name) + '\x00'
|
name = normalize_name(name) + '\x00'
|
||||||
ccn = ccn + '\x00'
|
ccn = ccn + '\x00'
|
||||||
@@ -133,19 +136,6 @@ def generate_keyfile(name, ccn, outpath):
|
|||||||
f.write(userkey.encode('base64'))
|
f.write(userkey.encode('base64'))
|
||||||
return userkey
|
return userkey
|
||||||
|
|
||||||
def cli_main(argv=sys.argv):
|
|
||||||
progname = os.path.basename(argv[0])
|
|
||||||
if AES is None:
|
|
||||||
print "%s: This script requires OpenSSL or PyCrypto, which must be installed " \
|
|
||||||
"separately. Read the top-of-script comment for details." % \
|
|
||||||
(progname,)
|
|
||||||
return 1
|
|
||||||
if len(argv) != 4:
|
|
||||||
print "usage: %s NAME CC# OUTFILE" % (progname,)
|
|
||||||
return 1
|
|
||||||
name, ccn, outpath = argv[1:]
|
|
||||||
generate_keyfile(name, ccn, outpath)
|
|
||||||
return 0
|
|
||||||
|
|
||||||
class DecryptionDialog(Tkinter.Frame):
|
class DecryptionDialog(Tkinter.Frame):
|
||||||
def __init__(self, root):
|
def __init__(self, root):
|
||||||
@@ -211,6 +201,22 @@ class DecryptionDialog(Tkinter.Frame):
|
|||||||
return
|
return
|
||||||
self.status['text'] = 'Keyfile successfully generated'
|
self.status['text'] = 'Keyfile successfully generated'
|
||||||
|
|
||||||
|
|
||||||
|
def cli_main(argv=sys.argv):
|
||||||
|
progname = os.path.basename(argv[0])
|
||||||
|
if AES is None:
|
||||||
|
print "%s: This script requires OpenSSL or PyCrypto, which must be installed " \
|
||||||
|
"separately. Read the top-of-script comment for details." % \
|
||||||
|
(progname,)
|
||||||
|
return 1
|
||||||
|
if len(argv) != 4:
|
||||||
|
print "usage: %s NAME CC# OUTFILE" % (progname,)
|
||||||
|
return 1
|
||||||
|
name, ccn, outpath = argv[1:]
|
||||||
|
generate_keyfile(name, ccn, outpath)
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
def gui_main():
|
def gui_main():
|
||||||
root = Tkinter.Tk()
|
root = Tkinter.Tk()
|
||||||
if AES is None:
|
if AES is None:
|
||||||
@@ -1,7 +1,9 @@
|
|||||||
#! /usr/bin/python
|
#! /usr/bin/python
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
# ineptepub.pyw, version 5.5
|
from __future__ import with_statement
|
||||||
|
|
||||||
|
# ineptepub.pyw, version 5.6
|
||||||
# Copyright © 2009-2010 i♥cabbages
|
# Copyright © 2009-2010 i♥cabbages
|
||||||
|
|
||||||
# Released under the terms of the GNU General Public Licence, version 3 or
|
# Released under the terms of the GNU General Public Licence, version 3 or
|
||||||
@@ -27,13 +29,11 @@
|
|||||||
# 5.3 - add support for OpenSSL on Windows, fix bug with some versions of libcrypto 0.9.8 prior to path level o
|
# 5.3 - add support for OpenSSL on Windows, fix bug with some versions of libcrypto 0.9.8 prior to path level o
|
||||||
# 5.4 - add support for encoding to 'utf-8' when building up list of files to decrypt from encryption.xml
|
# 5.4 - add support for encoding to 'utf-8' when building up list of files to decrypt from encryption.xml
|
||||||
# 5.5 - On Windows try PyCrypto first, OpenSSL next
|
# 5.5 - On Windows try PyCrypto first, OpenSSL next
|
||||||
|
# 5.6 - Modify interface to allow use with import
|
||||||
"""
|
"""
|
||||||
Decrypt Adobe ADEPT-encrypted EPUB books.
|
Decrypt Adobe ADEPT-encrypted EPUB books.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from __future__ import with_statement
|
|
||||||
|
|
||||||
__license__ = 'GPL v3'
|
__license__ = 'GPL v3'
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
@@ -312,45 +312,6 @@ class Decryptor(object):
|
|||||||
data = self.decompress(data)
|
data = self.decompress(data)
|
||||||
return data
|
return data
|
||||||
|
|
||||||
def cli_main(argv=sys.argv):
|
|
||||||
progname = os.path.basename(argv[0])
|
|
||||||
if AES is None:
|
|
||||||
print "%s: This script requires OpenSSL or PyCrypto, which must be" \
|
|
||||||
" installed separately. Read the top-of-script comment for" \
|
|
||||||
" details." % (progname,)
|
|
||||||
return 1
|
|
||||||
if len(argv) != 4:
|
|
||||||
print "usage: %s KEYFILE INBOOK OUTBOOK" % (progname,)
|
|
||||||
return 1
|
|
||||||
keypath, inpath, outpath = argv[1:]
|
|
||||||
with open(keypath, 'rb') as f:
|
|
||||||
keyder = f.read()
|
|
||||||
rsa = RSA(keyder)
|
|
||||||
with closing(ZipFile(open(inpath, 'rb'))) as inf:
|
|
||||||
namelist = set(inf.namelist())
|
|
||||||
if 'META-INF/rights.xml' not in namelist or \
|
|
||||||
'META-INF/encryption.xml' not in namelist:
|
|
||||||
raise ADEPTError('%s: not an ADEPT EPUB' % (inpath,))
|
|
||||||
for name in META_NAMES:
|
|
||||||
namelist.remove(name)
|
|
||||||
rights = etree.fromstring(inf.read('META-INF/rights.xml'))
|
|
||||||
adept = lambda tag: '{%s}%s' % (NSMAP['adept'], tag)
|
|
||||||
expr = './/%s' % (adept('encryptedKey'),)
|
|
||||||
bookkey = ''.join(rights.findtext(expr))
|
|
||||||
bookkey = rsa.decrypt(bookkey.decode('base64'))
|
|
||||||
# Padded as per RSAES-PKCS1-v1_5
|
|
||||||
if bookkey[-17] != '\x00':
|
|
||||||
raise ADEPTError('problem decrypting session key')
|
|
||||||
encryption = inf.read('META-INF/encryption.xml')
|
|
||||||
decryptor = Decryptor(bookkey[-16:], encryption)
|
|
||||||
kwds = dict(compression=ZIP_DEFLATED, allowZip64=False)
|
|
||||||
with closing(ZipFile(open(outpath, 'wb'), 'w', **kwds)) as outf:
|
|
||||||
zi = ZipInfo('mimetype', compress_type=ZIP_STORED)
|
|
||||||
outf.writestr(zi, inf.read('mimetype'))
|
|
||||||
for path in namelist:
|
|
||||||
data = inf.read(path)
|
|
||||||
outf.writestr(path, decryptor.decrypt(path, data))
|
|
||||||
return 0
|
|
||||||
|
|
||||||
class DecryptionDialog(Tkinter.Frame):
|
class DecryptionDialog(Tkinter.Frame):
|
||||||
def __init__(self, root):
|
def __init__(self, root):
|
||||||
@@ -446,6 +407,52 @@ class DecryptionDialog(Tkinter.Frame):
|
|||||||
return
|
return
|
||||||
self.status['text'] = 'File successfully decrypted'
|
self.status['text'] = 'File successfully decrypted'
|
||||||
|
|
||||||
|
|
||||||
|
def decryptBook(keypath, inpath, outpath):
|
||||||
|
with open(keypath, 'rb') as f:
|
||||||
|
keyder = f.read()
|
||||||
|
rsa = RSA(keyder)
|
||||||
|
with closing(ZipFile(open(inpath, 'rb'))) as inf:
|
||||||
|
namelist = set(inf.namelist())
|
||||||
|
if 'META-INF/rights.xml' not in namelist or \
|
||||||
|
'META-INF/encryption.xml' not in namelist:
|
||||||
|
raise ADEPTError('%s: not an ADEPT EPUB' % (inpath,))
|
||||||
|
for name in META_NAMES:
|
||||||
|
namelist.remove(name)
|
||||||
|
rights = etree.fromstring(inf.read('META-INF/rights.xml'))
|
||||||
|
adept = lambda tag: '{%s}%s' % (NSMAP['adept'], tag)
|
||||||
|
expr = './/%s' % (adept('encryptedKey'),)
|
||||||
|
bookkey = ''.join(rights.findtext(expr))
|
||||||
|
bookkey = rsa.decrypt(bookkey.decode('base64'))
|
||||||
|
# Padded as per RSAES-PKCS1-v1_5
|
||||||
|
if bookkey[-17] != '\x00':
|
||||||
|
raise ADEPTError('problem decrypting session key')
|
||||||
|
encryption = inf.read('META-INF/encryption.xml')
|
||||||
|
decryptor = Decryptor(bookkey[-16:], encryption)
|
||||||
|
kwds = dict(compression=ZIP_DEFLATED, allowZip64=False)
|
||||||
|
with closing(ZipFile(open(outpath, 'wb'), 'w', **kwds)) as outf:
|
||||||
|
zi = ZipInfo('mimetype', compress_type=ZIP_STORED)
|
||||||
|
outf.writestr(zi, inf.read('mimetype'))
|
||||||
|
for path in namelist:
|
||||||
|
data = inf.read(path)
|
||||||
|
outf.writestr(path, decryptor.decrypt(path, data))
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
def cli_main(argv=sys.argv):
|
||||||
|
progname = os.path.basename(argv[0])
|
||||||
|
if AES is None:
|
||||||
|
print "%s: This script requires OpenSSL or PyCrypto, which must be" \
|
||||||
|
" installed separately. Read the top-of-script comment for" \
|
||||||
|
" details." % (progname,)
|
||||||
|
return 1
|
||||||
|
if len(argv) != 4:
|
||||||
|
print "usage: %s KEYFILE INBOOK OUTBOOK" % (progname,)
|
||||||
|
return 1
|
||||||
|
keypath, inpath, outpath = argv[1:]
|
||||||
|
return decryptBook(keypath, inpath, outpath)
|
||||||
|
|
||||||
|
|
||||||
def gui_main():
|
def gui_main():
|
||||||
root = Tkinter.Tk()
|
root = Tkinter.Tk()
|
||||||
if AES is None:
|
if AES is None:
|
||||||
@@ -1,7 +1,9 @@
|
|||||||
#! /usr/bin/python
|
#! /usr/bin/python
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
# ineptkey.pyw, version 5.3
|
from __future__ import with_statement
|
||||||
|
|
||||||
|
# ineptkey.pyw, version 5.4
|
||||||
# Copyright © 2009-2010 i♥cabbages
|
# Copyright © 2009-2010 i♥cabbages
|
||||||
|
|
||||||
# Released under the terms of the GNU General Public Licence, version 3 or
|
# Released under the terms of the GNU General Public Licence, version 3 or
|
||||||
@@ -33,13 +35,12 @@
|
|||||||
# 5.1 - add support for using OpenSSL on Windows in place of PyCrypto
|
# 5.1 - add support for using OpenSSL on Windows in place of PyCrypto
|
||||||
# 5.2 - added support for output of key to a particular file
|
# 5.2 - added support for output of key to a particular file
|
||||||
# 5.3 - On Windows try PyCrypto first, OpenSSL next
|
# 5.3 - On Windows try PyCrypto first, OpenSSL next
|
||||||
|
# 5.4 - Modify interface to allow use of import
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Retrieve Adobe ADEPT user key.
|
Retrieve Adobe ADEPT user key.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from __future__ import with_statement
|
|
||||||
|
|
||||||
__license__ = 'GPL v3'
|
__license__ = 'GPL v3'
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
@@ -415,22 +416,29 @@ class ExceptionDialog(Tkinter.Frame):
|
|||||||
label.pack(fill=Tkconstants.X, expand=0)
|
label.pack(fill=Tkconstants.X, expand=0)
|
||||||
self.text = Tkinter.Text(self)
|
self.text = Tkinter.Text(self)
|
||||||
self.text.pack(fill=Tkconstants.BOTH, expand=1)
|
self.text.pack(fill=Tkconstants.BOTH, expand=1)
|
||||||
|
|
||||||
self.text.insert(Tkconstants.END, text)
|
self.text.insert(Tkconstants.END, text)
|
||||||
|
|
||||||
def cli_main(argv=sys.argv):
|
|
||||||
keypath = argv[1]
|
def extractKeyfile(keypath):
|
||||||
try:
|
try:
|
||||||
success = retrieve_key(keypath)
|
success = retrieve_key(keypath)
|
||||||
except ADEPTError, e:
|
except ADEPTError, e:
|
||||||
print "Key generation Error: " + str(e)
|
print "Key generation Error: " + str(e)
|
||||||
return 1
|
return 1
|
||||||
except Exception, e:
|
except Exception, e:
|
||||||
print "General Error: " + str(e)
|
print "General Error: " + str(e)
|
||||||
return 1
|
return 1
|
||||||
if not success:
|
if not success:
|
||||||
return 1
|
return 1
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
def cli_main(argv=sys.argv):
|
||||||
|
keypath = argv[1]
|
||||||
|
return extractKeyfile(keypath)
|
||||||
|
|
||||||
|
|
||||||
def main(argv=sys.argv):
|
def main(argv=sys.argv):
|
||||||
root = Tkinter.Tk()
|
root = Tkinter.Tk()
|
||||||
root.withdraw()
|
root.withdraw()
|
||||||
@@ -1,5 +1,7 @@
|
|||||||
#! /usr/bin/env python
|
#! /usr/bin/env python
|
||||||
# ineptpdf.pyw, version 7.7
|
# ineptpdf.pyw, version 7.9
|
||||||
|
|
||||||
|
from __future__ import with_statement
|
||||||
|
|
||||||
# To run this program install Python 2.6 from http://www.python.org/download/
|
# To run this program install Python 2.6 from http://www.python.org/download/
|
||||||
# and OpenSSL (already installed on Mac OS X and Linux) OR
|
# and OpenSSL (already installed on Mac OS X and Linux) OR
|
||||||
@@ -30,13 +32,13 @@
|
|||||||
# fixed minor typos
|
# fixed minor typos
|
||||||
# 7.6 - backported AES and other fixes from version 8.4.48
|
# 7.6 - backported AES and other fixes from version 8.4.48
|
||||||
# 7.7 - On Windows try PyCrypto first and OpenSSL next
|
# 7.7 - On Windows try PyCrypto first and OpenSSL next
|
||||||
|
# 7.8 - Modify interface to allow use of import
|
||||||
|
# 7.9 - Bug fix for some session key errors when len(bookkey) > length required
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Decrypts Adobe ADEPT-encrypted PDF files.
|
Decrypts Adobe ADEPT-encrypted PDF files.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from __future__ import with_statement
|
|
||||||
|
|
||||||
__license__ = 'GPL v3'
|
__license__ = 'GPL v3'
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
@@ -155,6 +157,7 @@ def _load_crypto_libcrypto():
|
|||||||
return out.raw
|
return out.raw
|
||||||
|
|
||||||
class AES(object):
|
class AES(object):
|
||||||
|
MODE_CBC = 0
|
||||||
@classmethod
|
@classmethod
|
||||||
def new(cls, userkey, mode, iv):
|
def new(cls, userkey, mode, iv):
|
||||||
self = AES()
|
self = AES()
|
||||||
@@ -1026,25 +1029,25 @@ def stream_value(x):
|
|||||||
|
|
||||||
# ascii85decode(data)
|
# ascii85decode(data)
|
||||||
def ascii85decode(data):
|
def ascii85decode(data):
|
||||||
n = b = 0
|
n = b = 0
|
||||||
out = ''
|
out = ''
|
||||||
for c in data:
|
for c in data:
|
||||||
if '!' <= c and c <= 'u':
|
if '!' <= c and c <= 'u':
|
||||||
n += 1
|
n += 1
|
||||||
b = b*85+(ord(c)-33)
|
b = b*85+(ord(c)-33)
|
||||||
if n == 5:
|
if n == 5:
|
||||||
out += struct.pack('>L',b)
|
out += struct.pack('>L',b)
|
||||||
n = b = 0
|
n = b = 0
|
||||||
elif c == 'z':
|
elif c == 'z':
|
||||||
assert n == 0
|
assert n == 0
|
||||||
out += '\0\0\0\0'
|
out += '\0\0\0\0'
|
||||||
elif c == '~':
|
elif c == '~':
|
||||||
if n:
|
if n:
|
||||||
for _ in range(5-n):
|
for _ in range(5-n):
|
||||||
b = b*85+84
|
b = b*85+84
|
||||||
out += struct.pack('>L',b)[:n-1]
|
out += struct.pack('>L',b)[:n-1]
|
||||||
break
|
break
|
||||||
return out
|
return out
|
||||||
|
|
||||||
|
|
||||||
## PDFStream type
|
## PDFStream type
|
||||||
@@ -1530,16 +1533,30 @@ class PDFDocument(object):
|
|||||||
bookkey = bookkey[index:]
|
bookkey = bookkey[index:]
|
||||||
ebx_V = int_value(param.get('V', 4))
|
ebx_V = int_value(param.get('V', 4))
|
||||||
ebx_type = int_value(param.get('EBX_ENCRYPTIONTYPE', 6))
|
ebx_type = int_value(param.get('EBX_ENCRYPTIONTYPE', 6))
|
||||||
# added because of the booktype / decryption book session key error
|
# added because of improper booktype / decryption book session key errors
|
||||||
if ebx_V == 3:
|
if length > 0:
|
||||||
V = 3
|
if len(bookkey) == length:
|
||||||
elif ebx_V < 4 or ebx_type < 6:
|
if ebx_V == 3:
|
||||||
V = ord(bookkey[0])
|
V = 3
|
||||||
bookkey = bookkey[1:]
|
else:
|
||||||
|
V = 2
|
||||||
|
elif len(bookkey) == length + 1:
|
||||||
|
V = ord(bookkey[0])
|
||||||
|
bookkey = bookkey[1:]
|
||||||
|
else:
|
||||||
|
print "ebx_V is %d and ebx_type is %d" % (ebx_V, ebx_type)
|
||||||
|
print "length is %d and len(bookkey) is %d" % (length, len(bookkey))
|
||||||
|
print "bookkey[0] is %d" % ord(bookkey[0])
|
||||||
|
raise ADEPTError('error decrypting book session key - mismatched length')
|
||||||
else:
|
else:
|
||||||
V = 2
|
# proper length unknown try with whatever you have
|
||||||
if length and len(bookkey) != length:
|
print "ebx_V is %d and ebx_type is %d" % (ebx_V, ebx_type)
|
||||||
raise ADEPTError('error decrypting book session key')
|
print "length is %d and len(bookkey) is %d" % (length, len(bookkey))
|
||||||
|
print "bookkey[0] is %d" % ord(bookkey[0])
|
||||||
|
if ebx_V == 3:
|
||||||
|
V = 3
|
||||||
|
else:
|
||||||
|
V = 2
|
||||||
self.decrypt_key = bookkey
|
self.decrypt_key = bookkey
|
||||||
self.genkey = self.genkey_v3 if V == 3 else self.genkey_v2
|
self.genkey = self.genkey_v3 if V == 3 else self.genkey_v2
|
||||||
self.decipher = self.decrypt_rc4
|
self.decipher = self.decrypt_rc4
|
||||||
@@ -2056,7 +2073,7 @@ class PDFSerializer(object):
|
|||||||
### are no longer useful, as we have extracted all objects from
|
### are no longer useful, as we have extracted all objects from
|
||||||
### them. Therefore leave them out from the output.
|
### them. Therefore leave them out from the output.
|
||||||
if obj.dic.get('Type') == LITERAL_OBJSTM and not gen_xref_stm:
|
if obj.dic.get('Type') == LITERAL_OBJSTM and not gen_xref_stm:
|
||||||
self.write('(deleted)')
|
self.write('(deleted)')
|
||||||
else:
|
else:
|
||||||
data = obj.get_decdata()
|
data = obj.get_decdata()
|
||||||
self.serialize_object(obj.dic)
|
self.serialize_object(obj.dic)
|
||||||
@@ -2076,25 +2093,6 @@ class PDFSerializer(object):
|
|||||||
self.write('\n')
|
self.write('\n')
|
||||||
self.write('endobj\n')
|
self.write('endobj\n')
|
||||||
|
|
||||||
def cli_main(argv=sys.argv):
|
|
||||||
progname = os.path.basename(argv[0])
|
|
||||||
if RSA is None:
|
|
||||||
print "%s: This script requires OpenSSL or PyCrypto, which must be installed " \
|
|
||||||
"separately. Read the top-of-script comment for details." % \
|
|
||||||
(progname,)
|
|
||||||
return 1
|
|
||||||
if len(argv) != 4:
|
|
||||||
print "usage: %s KEYFILE INBOOK OUTBOOK" % (progname,)
|
|
||||||
return 1
|
|
||||||
keypath, inpath, outpath = argv[1:]
|
|
||||||
with open(inpath, 'rb') as inf:
|
|
||||||
serializer = PDFSerializer(inf, keypath)
|
|
||||||
# hope this will fix the 'bad file descriptor' problem
|
|
||||||
with open(outpath, 'wb') as outf:
|
|
||||||
# help construct to make sure the method runs to the end
|
|
||||||
serializer.dump(outf)
|
|
||||||
return 0
|
|
||||||
|
|
||||||
|
|
||||||
class DecryptionDialog(Tkinter.Frame):
|
class DecryptionDialog(Tkinter.Frame):
|
||||||
def __init__(self, root):
|
def __init__(self, root):
|
||||||
@@ -2198,6 +2196,31 @@ class DecryptionDialog(Tkinter.Frame):
|
|||||||
'Close this window or decrypt another pdf file.'
|
'Close this window or decrypt another pdf file.'
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|
||||||
|
def decryptBook(keypath, inpath, outpath):
|
||||||
|
with open(inpath, 'rb') as inf:
|
||||||
|
serializer = PDFSerializer(inf, keypath)
|
||||||
|
# hope this will fix the 'bad file descriptor' problem
|
||||||
|
with open(outpath, 'wb') as outf:
|
||||||
|
# help construct to make sure the method runs to the end
|
||||||
|
serializer.dump(outf)
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
def cli_main(argv=sys.argv):
|
||||||
|
progname = os.path.basename(argv[0])
|
||||||
|
if RSA is None:
|
||||||
|
print "%s: This script requires OpenSSL or PyCrypto, which must be installed " \
|
||||||
|
"separately. Read the top-of-script comment for details." % \
|
||||||
|
(progname,)
|
||||||
|
return 1
|
||||||
|
if len(argv) != 4:
|
||||||
|
print "usage: %s KEYFILE INBOOK OUTBOOK" % (progname,)
|
||||||
|
return 1
|
||||||
|
keypath, inpath, outpath = argv[1:]
|
||||||
|
return decryptBook(keypath, inpath, outpath)
|
||||||
|
|
||||||
|
|
||||||
def gui_main():
|
def gui_main():
|
||||||
root = Tkinter.Tk()
|
root = Tkinter.Tk()
|
||||||
if RSA is None:
|
if RSA is None:
|
||||||
@@ -265,8 +265,8 @@ def findNameForHash(hash):
|
|||||||
result = ""
|
result = ""
|
||||||
for name in names:
|
for name in names:
|
||||||
if hash == encodeHash(name, charMap2):
|
if hash == encodeHash(name, charMap2):
|
||||||
result = name
|
result = name
|
||||||
break
|
break
|
||||||
return result
|
return result
|
||||||
|
|
||||||
# Print all the records from the kindle.info file (option -i)
|
# Print all the records from the kindle.info file (option -i)
|
||||||
@@ -293,9 +293,9 @@ def getTwoBitsFromBitField(bitField,offset):
|
|||||||
|
|
||||||
# Returns the six bits at offset from a bit field
|
# Returns the six bits at offset from a bit field
|
||||||
def getSixBitsFromBitField(bitField,offset):
|
def getSixBitsFromBitField(bitField,offset):
|
||||||
offset *= 3
|
offset *= 3
|
||||||
value = (getTwoBitsFromBitField(bitField,offset) <<4) + (getTwoBitsFromBitField(bitField,offset+1) << 2) +getTwoBitsFromBitField(bitField,offset+2)
|
value = (getTwoBitsFromBitField(bitField,offset) <<4) + (getTwoBitsFromBitField(bitField,offset+1) << 2) +getTwoBitsFromBitField(bitField,offset+2)
|
||||||
return value
|
return value
|
||||||
|
|
||||||
# 8 bits to six bits encoding from hash to generate PID string
|
# 8 bits to six bits encoding from hash to generate PID string
|
||||||
def encodePID(hash):
|
def encodePID(hash):
|
||||||
|
|||||||
@@ -1,5 +1,7 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
from __future__ import with_statement
|
||||||
|
|
||||||
# engine to remove drm from Kindle for Mac and Kindle for PC books
|
# engine to remove drm from Kindle for Mac and Kindle for PC books
|
||||||
# for personal use for archiving and converting your ebooks
|
# for personal use for archiving and converting your ebooks
|
||||||
|
|
||||||
@@ -14,21 +16,8 @@
|
|||||||
# unswindle, DarkReverser, ApprenticeAlf, DiapDealer, some_updates
|
# unswindle, DarkReverser, ApprenticeAlf, DiapDealer, some_updates
|
||||||
# and many many others
|
# and many many others
|
||||||
|
|
||||||
# It can run standalone to convert K4M/K4PC/Mobi files, or it can be installed as a
|
|
||||||
# plugin for Calibre (http://calibre-ebook.com/about) so that importing
|
|
||||||
# K4 or Mobi with DRM is no londer a multi-step process.
|
|
||||||
#
|
|
||||||
# ***NOTE*** If you are using this script as a calibre plugin for a K4M or K4PC ebook
|
|
||||||
# then calibre must be installed on the same machine and in the same account as K4PC or K4M
|
|
||||||
# for the plugin version to function properly.
|
|
||||||
#
|
|
||||||
# To create a Calibre plugin, rename this file so that the filename
|
|
||||||
# ends in '_plugin.py', put it into a ZIP file with all its supporting python routines
|
|
||||||
# and import that ZIP into Calibre using its plugin configuration GUI.
|
|
||||||
|
|
||||||
from __future__ import with_statement
|
__version__ = '4.0'
|
||||||
|
|
||||||
__version__ = '1.4'
|
|
||||||
|
|
||||||
class Unbuffered:
|
class Unbuffered:
|
||||||
def __init__(self, stream):
|
def __init__(self, stream):
|
||||||
@@ -41,11 +30,9 @@ class Unbuffered:
|
|||||||
|
|
||||||
import sys
|
import sys
|
||||||
import os, csv, getopt
|
import os, csv, getopt
|
||||||
import binascii
|
import string
|
||||||
import zlib
|
|
||||||
import re
|
import re
|
||||||
import zlib, zipfile, tempfile, shutil
|
import traceback
|
||||||
from struct import pack, unpack, unpack_from
|
|
||||||
|
|
||||||
class DrmException(Exception):
|
class DrmException(Exception):
|
||||||
pass
|
pass
|
||||||
@@ -55,19 +42,112 @@ if 'calibre' in sys.modules:
|
|||||||
else:
|
else:
|
||||||
inCalibre = False
|
inCalibre = False
|
||||||
|
|
||||||
def zipUpDir(myzip, tempdir,localname):
|
if inCalibre:
|
||||||
currentdir = tempdir
|
from calibre_plugins.k4mobidedrm import mobidedrm
|
||||||
if localname != "":
|
from calibre_plugins.k4mobidedrm import topazextract
|
||||||
currentdir = os.path.join(currentdir,localname)
|
from calibre_plugins.k4mobidedrm import kgenpids
|
||||||
list = os.listdir(currentdir)
|
else:
|
||||||
for file in list:
|
import mobidedrm
|
||||||
afilename = file
|
import topazextract
|
||||||
localfilePath = os.path.join(localname, afilename)
|
import kgenpids
|
||||||
realfilePath = os.path.join(currentdir,file)
|
|
||||||
if os.path.isfile(realfilePath):
|
|
||||||
myzip.write(realfilePath, localfilePath)
|
# cleanup bytestring filenames
|
||||||
elif os.path.isdir(realfilePath):
|
# borrowed from calibre from calibre/src/calibre/__init__.py
|
||||||
zipUpDir(myzip, tempdir, localfilePath)
|
# added in removal of non-printing chars
|
||||||
|
# and removal of . at start
|
||||||
|
# convert spaces to underscores
|
||||||
|
def cleanup_name(name):
|
||||||
|
_filename_sanitize = re.compile(r'[\xae\0\\|\?\*<":>\+/]')
|
||||||
|
substitute='_'
|
||||||
|
one = ''.join(char for char in name if char in string.printable)
|
||||||
|
one = _filename_sanitize.sub(substitute, one)
|
||||||
|
one = re.sub(r'\s', ' ', one).strip()
|
||||||
|
one = re.sub(r'^\.+$', '_', one)
|
||||||
|
one = one.replace('..', substitute)
|
||||||
|
# Windows doesn't like path components that end with a period
|
||||||
|
if one.endswith('.'):
|
||||||
|
one = one[:-1]+substitute
|
||||||
|
# Mac and Unix don't like file names that begin with a full stop
|
||||||
|
if len(one) > 0 and one[0] == '.':
|
||||||
|
one = substitute+one[1:]
|
||||||
|
one = one.replace(' ','_')
|
||||||
|
return one
|
||||||
|
|
||||||
|
def decryptBook(infile, outdir, k4, kInfoFiles, serials, pids):
|
||||||
|
# handle the obvious cases at the beginning
|
||||||
|
if not os.path.isfile(infile):
|
||||||
|
print >>sys.stderr, ('K4MobiDeDrm v%(__version__)s\n' % globals()) + "Error: Input file does not exist"
|
||||||
|
return 1
|
||||||
|
|
||||||
|
mobi = True
|
||||||
|
magic3 = file(infile,'rb').read(3)
|
||||||
|
if magic3 == 'TPZ':
|
||||||
|
mobi = False
|
||||||
|
|
||||||
|
bookname = os.path.splitext(os.path.basename(infile))[0]
|
||||||
|
|
||||||
|
if mobi:
|
||||||
|
mb = mobidedrm.MobiBook(infile)
|
||||||
|
else:
|
||||||
|
mb = topazextract.TopazBook(infile)
|
||||||
|
|
||||||
|
title = mb.getBookTitle()
|
||||||
|
print "Processing Book: ", title
|
||||||
|
filenametitle = cleanup_name(title)
|
||||||
|
outfilename = bookname
|
||||||
|
if len(outfilename)<=8 or len(filenametitle)<=8:
|
||||||
|
outfilename = outfilename + "_" + filenametitle
|
||||||
|
elif outfilename[:8] != filenametitle[:8]:
|
||||||
|
outfilename = outfilename[:8] + "_" + filenametitle
|
||||||
|
|
||||||
|
# avoid excessively long file names
|
||||||
|
if len(outfilename)>150:
|
||||||
|
outfilename = outfilename[:150]
|
||||||
|
|
||||||
|
# build pid list
|
||||||
|
md1, md2 = mb.getPIDMetaInfo()
|
||||||
|
pidlst = kgenpids.getPidList(md1, md2, k4, pids, serials, kInfoFiles)
|
||||||
|
|
||||||
|
try:
|
||||||
|
mb.processBook(pidlst)
|
||||||
|
|
||||||
|
except mobidedrm.DrmException, e:
|
||||||
|
print >>sys.stderr, ('K4MobiDeDrm v%(__version__)s\n' % globals()) + "Error: " + str(e) + "\nDRM Removal Failed.\n"
|
||||||
|
return 1
|
||||||
|
except topazextract.TpzDRMError, e:
|
||||||
|
print >>sys.stderr, ('K4MobiDeDrm v%(__version__)s\n' % globals()) + "Error: " + str(e) + "\nDRM Removal Failed.\n"
|
||||||
|
return 1
|
||||||
|
except Exception, e:
|
||||||
|
print >>sys.stderr, ('K4MobiDeDrm v%(__version__)s\n' % globals()) + "Error: " + str(e) + "\nDRM Removal Failed.\n"
|
||||||
|
return 1
|
||||||
|
|
||||||
|
if mobi:
|
||||||
|
if mb.getPrintReplica():
|
||||||
|
outfile = os.path.join(outdir, outfilename + '_nodrm' + '.azw4')
|
||||||
|
else:
|
||||||
|
outfile = os.path.join(outdir, outfilename + '_nodrm' + '.mobi')
|
||||||
|
mb.getMobiFile(outfile)
|
||||||
|
return 0
|
||||||
|
|
||||||
|
# topaz:
|
||||||
|
print " Creating NoDRM HTMLZ Archive"
|
||||||
|
zipname = os.path.join(outdir, outfilename + '_nodrm' + '.htmlz')
|
||||||
|
mb.getHTMLZip(zipname)
|
||||||
|
|
||||||
|
print " Creating SVG ZIP Archive"
|
||||||
|
zipname = os.path.join(outdir, outfilename + '_SVG' + '.zip')
|
||||||
|
mb.getSVGZip(zipname)
|
||||||
|
|
||||||
|
print " Creating XML ZIP Archive"
|
||||||
|
zipname = os.path.join(outdir, outfilename + '_XML' + '.zip')
|
||||||
|
mb.getXMLZip(zipname)
|
||||||
|
|
||||||
|
# remove internal temporary directory of Topaz pieces
|
||||||
|
mb.cleanup()
|
||||||
|
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
def usage(progname):
|
def usage(progname):
|
||||||
print "Removes DRM protection from K4PC/M, Kindle, Mobi and Topaz ebooks"
|
print "Removes DRM protection from K4PC/M, Kindle, Mobi and Topaz ebooks"
|
||||||
@@ -78,9 +158,6 @@ def usage(progname):
|
|||||||
# Main
|
# Main
|
||||||
#
|
#
|
||||||
def main(argv=sys.argv):
|
def main(argv=sys.argv):
|
||||||
import mobidedrm
|
|
||||||
import topazextract
|
|
||||||
import kgenpids
|
|
||||||
progname = os.path.basename(argv[0])
|
progname = os.path.basename(argv[0])
|
||||||
|
|
||||||
k4 = False
|
k4 = False
|
||||||
@@ -89,9 +166,8 @@ def main(argv=sys.argv):
|
|||||||
pids = []
|
pids = []
|
||||||
|
|
||||||
print ('K4MobiDeDrm v%(__version__)s '
|
print ('K4MobiDeDrm v%(__version__)s '
|
||||||
'provided by the work of many including DiapDealer, SomeUpdates, IHeartCabbages, CMBDTC, Skindle, DarkReverser, ApprenticeAlf, etc .' % globals())
|
'provided by the work of many including DiapDealer, SomeUpdates, IHeartCabbages, CMBDTC, Skindle, DarkReverser, ApprenticeAlf, etc .' % globals())
|
||||||
|
|
||||||
print ' '
|
|
||||||
try:
|
try:
|
||||||
opts, args = getopt.getopt(sys.argv[1:], "k:p:s:")
|
opts, args = getopt.getopt(sys.argv[1:], "k:p:s:")
|
||||||
except getopt.GetoptError, err:
|
except getopt.GetoptError, err:
|
||||||
@@ -118,217 +194,14 @@ def main(argv=sys.argv):
|
|||||||
|
|
||||||
# try with built in Kindle Info files
|
# try with built in Kindle Info files
|
||||||
k4 = True
|
k4 = True
|
||||||
|
if sys.platform.startswith('linux'):
|
||||||
|
k4 = False
|
||||||
|
kInfoFiles = None
|
||||||
infile = args[0]
|
infile = args[0]
|
||||||
outdir = args[1]
|
outdir = args[1]
|
||||||
|
return decryptBook(infile, outdir, k4, kInfoFiles, serials, pids)
|
||||||
|
|
||||||
# handle the obvious cases at the beginning
|
|
||||||
if not os.path.isfile(infile):
|
|
||||||
print "Error: Input file does not exist"
|
|
||||||
return 1
|
|
||||||
|
|
||||||
mobi = True
|
|
||||||
magic3 = file(infile,'rb').read(3)
|
|
||||||
if magic3 == 'TPZ':
|
|
||||||
mobi = False
|
|
||||||
|
|
||||||
bookname = os.path.splitext(os.path.basename(infile))[0]
|
|
||||||
|
|
||||||
if mobi:
|
|
||||||
mb = mobidedrm.MobiBook(infile)
|
|
||||||
else:
|
|
||||||
tempdir = tempfile.mkdtemp()
|
|
||||||
mb = topazextract.TopazBook(infile, tempdir)
|
|
||||||
|
|
||||||
title = mb.getBookTitle()
|
|
||||||
print "Processing Book: ", title
|
|
||||||
|
|
||||||
# build pid list
|
|
||||||
md1, md2 = mb.getPIDMetaInfo()
|
|
||||||
pidlst = kgenpids.getPidList(md1, md2, k4, pids, serials, kInfoFiles)
|
|
||||||
|
|
||||||
try:
|
|
||||||
if mobi:
|
|
||||||
unlocked_file = mb.processBook(pidlst)
|
|
||||||
else:
|
|
||||||
mb.processBook(pidlst)
|
|
||||||
|
|
||||||
except mobidedrm.DrmException, e:
|
|
||||||
print " ... not suceessful " + str(e) + "\n"
|
|
||||||
return 1
|
|
||||||
except topazextract.TpzDRMError, e:
|
|
||||||
print str(e)
|
|
||||||
print " Creating DeBug Full Zip Archive of Book"
|
|
||||||
zipname = os.path.join(outdir, bookname + '_debug' + '.zip')
|
|
||||||
myzip = zipfile.ZipFile(zipname,'w',zipfile.ZIP_DEFLATED, False)
|
|
||||||
zipUpDir(myzip, tempdir, '')
|
|
||||||
myzip.close()
|
|
||||||
shutil.rmtree(tempdir, True)
|
|
||||||
return 1
|
|
||||||
|
|
||||||
if mobi:
|
|
||||||
outfile = os.path.join(outdir,bookname + '_nodrm' + '.azw')
|
|
||||||
file(outfile, 'wb').write(unlocked_file)
|
|
||||||
return 0
|
|
||||||
|
|
||||||
# topaz: build up zip archives of results
|
|
||||||
print " Creating HTML ZIP Archive"
|
|
||||||
zipname = os.path.join(outdir, bookname + '_nodrm' + '.zip')
|
|
||||||
myzip1 = zipfile.ZipFile(zipname,'w',zipfile.ZIP_DEFLATED, False)
|
|
||||||
myzip1.write(os.path.join(tempdir,'book.html'),'book.html')
|
|
||||||
myzip1.write(os.path.join(tempdir,'book.opf'),'book.opf')
|
|
||||||
if os.path.isfile(os.path.join(tempdir,'cover.jpg')):
|
|
||||||
myzip1.write(os.path.join(tempdir,'cover.jpg'),'cover.jpg')
|
|
||||||
myzip1.write(os.path.join(tempdir,'style.css'),'style.css')
|
|
||||||
zipUpDir(myzip1, tempdir, 'img')
|
|
||||||
myzip1.close()
|
|
||||||
|
|
||||||
print " Creating SVG ZIP Archive"
|
|
||||||
zipname = os.path.join(outdir, bookname + '_SVG' + '.zip')
|
|
||||||
myzip2 = zipfile.ZipFile(zipname,'w',zipfile.ZIP_DEFLATED, False)
|
|
||||||
myzip2.write(os.path.join(tempdir,'index_svg.xhtml'),'index_svg.xhtml')
|
|
||||||
zipUpDir(myzip2, tempdir, 'svg')
|
|
||||||
zipUpDir(myzip2, tempdir, 'img')
|
|
||||||
myzip2.close()
|
|
||||||
|
|
||||||
print " Creating XML ZIP Archive"
|
|
||||||
zipname = os.path.join(outdir, bookname + '_XML' + '.zip')
|
|
||||||
myzip3 = zipfile.ZipFile(zipname,'w',zipfile.ZIP_DEFLATED, False)
|
|
||||||
targetdir = os.path.join(tempdir,'xml')
|
|
||||||
zipUpDir(myzip3, targetdir, '')
|
|
||||||
zipUpDir(myzip3, tempdir, 'img')
|
|
||||||
myzip3.close()
|
|
||||||
|
|
||||||
shutil.rmtree(tempdir, True)
|
|
||||||
return 0
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
sys.stdout=Unbuffered(sys.stdout)
|
sys.stdout=Unbuffered(sys.stdout)
|
||||||
sys.exit(main())
|
sys.exit(main())
|
||||||
|
|
||||||
if not __name__ == "__main__" and inCalibre:
|
|
||||||
from calibre.customize import FileTypePlugin
|
|
||||||
|
|
||||||
class K4DeDRM(FileTypePlugin):
|
|
||||||
name = 'K4PC, K4Mac, Kindle Mobi and Topaz DeDRM' # Name of the plugin
|
|
||||||
description = 'Removes DRM from K4PC and Mac, Kindle Mobi and Topaz files. \
|
|
||||||
Provided by the work of many including DiapDealer, SomeUpdates, IHeartCabbages, CMBDTC, Skindle, DarkReverser, ApprenticeAlf, etc.'
|
|
||||||
supported_platforms = ['osx', 'windows', 'linux'] # Platforms this plugin will run on
|
|
||||||
author = 'DiapDealer, SomeUpdates' # The author of this plugin
|
|
||||||
version = (0, 1, 7) # The version number of this plugin
|
|
||||||
file_types = set(['prc','mobi','azw','azw1','tpz']) # The file types that this plugin will be applied to
|
|
||||||
on_import = True # Run this plugin during the import
|
|
||||||
priority = 210 # run this plugin before mobidedrm, k4pcdedrm, k4dedrm
|
|
||||||
|
|
||||||
def run(self, path_to_ebook):
|
|
||||||
from calibre.gui2 import is_ok_to_use_qt
|
|
||||||
from PyQt4.Qt import QMessageBox
|
|
||||||
from calibre.ptempfile import PersistentTemporaryDirectory
|
|
||||||
|
|
||||||
import kgenpids
|
|
||||||
import zlib
|
|
||||||
import zipfile
|
|
||||||
import topazextract
|
|
||||||
import mobidedrm
|
|
||||||
|
|
||||||
k4 = True
|
|
||||||
pids = []
|
|
||||||
serials = []
|
|
||||||
kInfoFiles = []
|
|
||||||
|
|
||||||
# Get supplied list of PIDs to try from plugin customization.
|
|
||||||
customvalues = self.site_customization.split(',')
|
|
||||||
for customvalue in customvalues:
|
|
||||||
customvalue = str(customvalue)
|
|
||||||
customvalue = customvalue.strip()
|
|
||||||
if len(customvalue) == 10 or len(customvalue) == 8:
|
|
||||||
pids.append(customvalue)
|
|
||||||
else :
|
|
||||||
if len(customvalue) == 16 and customvalue[0] == 'B':
|
|
||||||
serials.append(customvalue)
|
|
||||||
else:
|
|
||||||
print "%s is not a valid Kindle serial number or PID." % str(customvalue)
|
|
||||||
|
|
||||||
# Load any kindle info files (*.info) included Calibre's config directory.
|
|
||||||
try:
|
|
||||||
# Find Calibre's configuration directory.
|
|
||||||
confpath = os.path.split(os.path.split(self.plugin_path)[0])[0]
|
|
||||||
print 'K4MobiDeDRM: Calibre configuration directory = %s' % confpath
|
|
||||||
files = os.listdir(confpath)
|
|
||||||
filefilter = re.compile("\.info$", re.IGNORECASE)
|
|
||||||
files = filter(filefilter.search, files)
|
|
||||||
|
|
||||||
if files:
|
|
||||||
for filename in files:
|
|
||||||
fpath = os.path.join(confpath, filename)
|
|
||||||
kInfoFiles.append(fpath)
|
|
||||||
print 'K4MobiDeDRM: Kindle info file %s found in config folder.' % filename
|
|
||||||
except IOError:
|
|
||||||
print 'K4MobiDeDRM: Error reading kindle info files from config directory.'
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
mobi = True
|
|
||||||
magic3 = file(path_to_ebook,'rb').read(3)
|
|
||||||
if magic3 == 'TPZ':
|
|
||||||
mobi = False
|
|
||||||
|
|
||||||
bookname = os.path.splitext(os.path.basename(path_to_ebook))[0]
|
|
||||||
|
|
||||||
if mobi:
|
|
||||||
mb = mobidedrm.MobiBook(path_to_ebook)
|
|
||||||
else:
|
|
||||||
tempdir = PersistentTemporaryDirectory()
|
|
||||||
mb = topazextract.TopazBook(path_to_ebook, tempdir)
|
|
||||||
|
|
||||||
title = mb.getBookTitle()
|
|
||||||
md1, md2 = mb.getPIDMetaInfo()
|
|
||||||
pidlst = kgenpids.getPidList(md1, md2, k4, pids, serials, kInfoFiles)
|
|
||||||
|
|
||||||
try:
|
|
||||||
if mobi:
|
|
||||||
unlocked_file = mb.processBook(pidlst)
|
|
||||||
else:
|
|
||||||
mb.processBook(pidlst)
|
|
||||||
|
|
||||||
except mobidedrm.DrmException:
|
|
||||||
#if you reached here then no luck raise and exception
|
|
||||||
if is_ok_to_use_qt():
|
|
||||||
d = QMessageBox(QMessageBox.Warning, "K4MobiDeDRM Plugin", "Error decoding: %s\n" % path_to_ebook)
|
|
||||||
d.show()
|
|
||||||
d.raise_()
|
|
||||||
d.exec_()
|
|
||||||
raise Exception("K4MobiDeDRM plugin could not decode the file")
|
|
||||||
return ""
|
|
||||||
except topazextract.TpzDRMError:
|
|
||||||
#if you reached here then no luck raise and exception
|
|
||||||
if is_ok_to_use_qt():
|
|
||||||
d = QMessageBox(QMessageBox.Warning, "K4MobiDeDRM Plugin", "Error decoding: %s\n" % path_to_ebook)
|
|
||||||
d.show()
|
|
||||||
d.raise_()
|
|
||||||
d.exec_()
|
|
||||||
raise Exception("K4MobiDeDRM plugin could not decode the file")
|
|
||||||
return ""
|
|
||||||
|
|
||||||
print "Success!"
|
|
||||||
if mobi:
|
|
||||||
of = self.temporary_file(bookname+'.mobi')
|
|
||||||
of.write(unlocked_file)
|
|
||||||
of.close()
|
|
||||||
return of.name
|
|
||||||
|
|
||||||
# topaz: build up zip archives of results
|
|
||||||
print " Creating HTML ZIP Archive"
|
|
||||||
of = self.temporary_file(bookname + '.zip')
|
|
||||||
myzip = zipfile.ZipFile(of.name,'w',zipfile.ZIP_DEFLATED, False)
|
|
||||||
myzip.write(os.path.join(tempdir,'book.html'),'book.html')
|
|
||||||
myzip.write(os.path.join(tempdir,'book.opf'),'book.opf')
|
|
||||||
if os.path.isfile(os.path.join(tempdir,'cover.jpg')):
|
|
||||||
myzip.write(os.path.join(tempdir,'cover.jpg'),'cover.jpg')
|
|
||||||
myzip.write(os.path.join(tempdir,'style.css'),'style.css')
|
|
||||||
zipUpDir(myzip, tempdir, 'img')
|
|
||||||
myzip.close()
|
|
||||||
return of.name
|
|
||||||
|
|
||||||
def customization_help(self, gui=False):
|
|
||||||
return 'Enter 10 character PIDs and/or Kindle serial numbers, separated by commas.'
|
|
||||||
|
|||||||
@@ -1,10 +1,14 @@
|
|||||||
# standlone set of Mac OSX specific routines needed for K4DeDRM
|
# standlone set of Mac OSX specific routines needed for KindleBooks
|
||||||
|
|
||||||
from __future__ import with_statement
|
from __future__ import with_statement
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
import os
|
import os
|
||||||
|
import os.path
|
||||||
|
import re
|
||||||
|
import copy
|
||||||
import subprocess
|
import subprocess
|
||||||
|
from struct import pack, unpack, unpack_from
|
||||||
|
|
||||||
class DrmException(Exception):
|
class DrmException(Exception):
|
||||||
pass
|
pass
|
||||||
@@ -21,6 +25,25 @@ def _load_crypto_libcrypto():
|
|||||||
raise DrmException('libcrypto not found')
|
raise DrmException('libcrypto not found')
|
||||||
libcrypto = CDLL(libcrypto)
|
libcrypto = CDLL(libcrypto)
|
||||||
|
|
||||||
|
# From OpenSSL's crypto aes header
|
||||||
|
#
|
||||||
|
# AES_ENCRYPT 1
|
||||||
|
# AES_DECRYPT 0
|
||||||
|
# AES_MAXNR 14 (in bytes)
|
||||||
|
# AES_BLOCK_SIZE 16 (in bytes)
|
||||||
|
#
|
||||||
|
# struct aes_key_st {
|
||||||
|
# unsigned long rd_key[4 *(AES_MAXNR + 1)];
|
||||||
|
# int rounds;
|
||||||
|
# };
|
||||||
|
# typedef struct aes_key_st AES_KEY;
|
||||||
|
#
|
||||||
|
# int AES_set_decrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key);
|
||||||
|
#
|
||||||
|
# note: the ivec string, and output buffer are mutable
|
||||||
|
# void AES_cbc_encrypt(const unsigned char *in, unsigned char *out,
|
||||||
|
# const unsigned long length, const AES_KEY *key, unsigned char *ivec, const int enc);
|
||||||
|
|
||||||
AES_MAXNR = 14
|
AES_MAXNR = 14
|
||||||
c_char_pp = POINTER(c_char_p)
|
c_char_pp = POINTER(c_char_p)
|
||||||
c_int_p = POINTER(c_int)
|
c_int_p = POINTER(c_int)
|
||||||
@@ -39,6 +62,12 @@ def _load_crypto_libcrypto():
|
|||||||
|
|
||||||
AES_set_decrypt_key = F(c_int, 'AES_set_decrypt_key',[c_char_p, c_int, AES_KEY_p])
|
AES_set_decrypt_key = F(c_int, 'AES_set_decrypt_key',[c_char_p, c_int, AES_KEY_p])
|
||||||
|
|
||||||
|
# From OpenSSL's Crypto evp/p5_crpt2.c
|
||||||
|
#
|
||||||
|
# int PKCS5_PBKDF2_HMAC_SHA1(const char *pass, int passlen,
|
||||||
|
# const unsigned char *salt, int saltlen, int iter,
|
||||||
|
# int keylen, unsigned char *out);
|
||||||
|
|
||||||
PKCS5_PBKDF2_HMAC_SHA1 = F(c_int, 'PKCS5_PBKDF2_HMAC_SHA1',
|
PKCS5_PBKDF2_HMAC_SHA1 = F(c_int, 'PKCS5_PBKDF2_HMAC_SHA1',
|
||||||
[c_char_p, c_ulong, c_char_p, c_ulong, c_ulong, c_ulong, c_char_p])
|
[c_char_p, c_ulong, c_char_p, c_ulong, c_ulong, c_ulong, c_char_p])
|
||||||
|
|
||||||
@@ -46,7 +75,7 @@ def _load_crypto_libcrypto():
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
self._blocksize = 0
|
self._blocksize = 0
|
||||||
self._keyctx = None
|
self._keyctx = None
|
||||||
self.iv = 0
|
self._iv = 0
|
||||||
|
|
||||||
def set_decrypt_key(self, userkey, iv):
|
def set_decrypt_key(self, userkey, iv):
|
||||||
self._blocksize = len(userkey)
|
self._blocksize = len(userkey)
|
||||||
@@ -54,24 +83,24 @@ def _load_crypto_libcrypto():
|
|||||||
raise DrmException('AES improper key used')
|
raise DrmException('AES improper key used')
|
||||||
return
|
return
|
||||||
keyctx = self._keyctx = AES_KEY()
|
keyctx = self._keyctx = AES_KEY()
|
||||||
self.iv = iv
|
self._iv = iv
|
||||||
|
self._userkey = userkey
|
||||||
rv = AES_set_decrypt_key(userkey, len(userkey) * 8, keyctx)
|
rv = AES_set_decrypt_key(userkey, len(userkey) * 8, keyctx)
|
||||||
if rv < 0:
|
if rv < 0:
|
||||||
raise DrmException('Failed to initialize AES key')
|
raise DrmException('Failed to initialize AES key')
|
||||||
|
|
||||||
def decrypt(self, data):
|
def decrypt(self, data):
|
||||||
out = create_string_buffer(len(data))
|
out = create_string_buffer(len(data))
|
||||||
rv = AES_cbc_encrypt(data, out, len(data), self._keyctx, self.iv, 0)
|
mutable_iv = create_string_buffer(self._iv, len(self._iv))
|
||||||
|
keyctx = self._keyctx
|
||||||
|
rv = AES_cbc_encrypt(data, out, len(data), keyctx, mutable_iv, 0)
|
||||||
if rv == 0:
|
if rv == 0:
|
||||||
raise DrmException('AES decryption failed')
|
raise DrmException('AES decryption failed')
|
||||||
return out.raw
|
return out.raw
|
||||||
|
|
||||||
def keyivgen(self, passwd):
|
def keyivgen(self, passwd, salt, iter, keylen):
|
||||||
salt = '16743'
|
saltlen = len(salt)
|
||||||
saltlen = 5
|
|
||||||
passlen = len(passwd)
|
passlen = len(passwd)
|
||||||
iter = 0x3e8
|
|
||||||
keylen = 80
|
|
||||||
out = create_string_buffer(keylen)
|
out = create_string_buffer(keylen)
|
||||||
rv = PKCS5_PBKDF2_HMAC_SHA1(passwd, passlen, salt, saltlen, iter, keylen, out)
|
rv = PKCS5_PBKDF2_HMAC_SHA1(passwd, passlen, salt, saltlen, iter, keylen, out)
|
||||||
return out.raw
|
return out.raw
|
||||||
@@ -91,13 +120,83 @@ LibCrypto = _load_crypto()
|
|||||||
# Utility Routines
|
# Utility Routines
|
||||||
#
|
#
|
||||||
|
|
||||||
|
# crypto digestroutines
|
||||||
|
import hashlib
|
||||||
|
|
||||||
|
def MD5(message):
|
||||||
|
ctx = hashlib.md5()
|
||||||
|
ctx.update(message)
|
||||||
|
return ctx.digest()
|
||||||
|
|
||||||
|
def SHA1(message):
|
||||||
|
ctx = hashlib.sha1()
|
||||||
|
ctx.update(message)
|
||||||
|
return ctx.digest()
|
||||||
|
|
||||||
|
def SHA256(message):
|
||||||
|
ctx = hashlib.sha256()
|
||||||
|
ctx.update(message)
|
||||||
|
return ctx.digest()
|
||||||
|
|
||||||
# Various character maps used to decrypt books. Probably supposed to act as obfuscation
|
# Various character maps used to decrypt books. Probably supposed to act as obfuscation
|
||||||
charMap1 = "n5Pr6St7Uv8Wx9YzAb0Cd1Ef2Gh3Jk4M"
|
charMap1 = "n5Pr6St7Uv8Wx9YzAb0Cd1Ef2Gh3Jk4M"
|
||||||
charMap2 = "ZB0bYyc1xDdW2wEV3Ff7KkPpL8UuGA4gz-Tme9Nn_tHh5SvXCsIiR6rJjQaqlOoM"
|
charMap2 = "ZB0bYyc1xDdW2wEV3Ff7KkPpL8UuGA4gz-Tme9Nn_tHh5SvXCsIiR6rJjQaqlOoM"
|
||||||
charMap3 = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
|
|
||||||
charMap4 = "ABCDEFGHIJKLMNPQRSTUVWXYZ123456789"
|
|
||||||
|
|
||||||
|
# For kinf approach of K4Mac 1.6.X or later
|
||||||
|
# On K4PC charMap5 = "AzB0bYyCeVvaZ3FfUuG4g-TtHh5SsIiR6rJjQq7KkPpL8lOoMm9Nn_c1XxDdW2wE"
|
||||||
|
# For Mac they seem to re-use charMap2 here
|
||||||
|
charMap5 = charMap2
|
||||||
|
|
||||||
|
# new in K4M 1.9.X
|
||||||
|
testMap8 = "YvaZ3FfUm9Nn_c1XuG4yCAzB0beVg-TtHh5SsIiR6rJjQdW2wEq7KkPpL8lOoMxD"
|
||||||
|
|
||||||
|
|
||||||
|
def encode(data, map):
|
||||||
|
result = ""
|
||||||
|
for char in data:
|
||||||
|
value = ord(char)
|
||||||
|
Q = (value ^ 0x80) // len(map)
|
||||||
|
R = value % len(map)
|
||||||
|
result += map[Q]
|
||||||
|
result += map[R]
|
||||||
|
return result
|
||||||
|
|
||||||
|
# Hash the bytes in data and then encode the digest with the characters in map
|
||||||
|
def encodeHash(data,map):
|
||||||
|
return encode(MD5(data),map)
|
||||||
|
|
||||||
|
# Decode the string in data with the characters in map. Returns the decoded bytes
|
||||||
|
def decode(data,map):
|
||||||
|
result = ""
|
||||||
|
for i in range (0,len(data)-1,2):
|
||||||
|
high = map.find(data[i])
|
||||||
|
low = map.find(data[i+1])
|
||||||
|
if (high == -1) or (low == -1) :
|
||||||
|
break
|
||||||
|
value = (((high * len(map)) ^ 0x80) & 0xFF) + low
|
||||||
|
result += pack("B",value)
|
||||||
|
return result
|
||||||
|
|
||||||
|
# For K4M 1.6.X and later
|
||||||
|
# generate table of prime number less than or equal to int n
|
||||||
|
def primes(n):
|
||||||
|
if n==2: return [2]
|
||||||
|
elif n<2: return []
|
||||||
|
s=range(3,n+1,2)
|
||||||
|
mroot = n ** 0.5
|
||||||
|
half=(n+1)/2-1
|
||||||
|
i=0
|
||||||
|
m=3
|
||||||
|
while m <= mroot:
|
||||||
|
if s[i]:
|
||||||
|
j=(m*m-3)/2
|
||||||
|
s[j]=0
|
||||||
|
while j<half:
|
||||||
|
s[j]=0
|
||||||
|
j+=m
|
||||||
|
i=i+1
|
||||||
|
m=2*i+3
|
||||||
|
return [2]+[x for x in s if x]
|
||||||
|
|
||||||
|
|
||||||
# uses a sub process to get the Hard Drive Serial Number using ioreg
|
# uses a sub process to get the Hard Drive Serial Number using ioreg
|
||||||
@@ -129,38 +228,234 @@ def GetVolumeSerialNumber():
|
|||||||
foundIt = True
|
foundIt = True
|
||||||
break
|
break
|
||||||
if not foundIt:
|
if not foundIt:
|
||||||
sernum = '9999999999'
|
sernum = ''
|
||||||
return sernum
|
return sernum
|
||||||
|
|
||||||
|
def GetUserHomeAppSupKindleDirParitionName():
|
||||||
|
home = os.getenv('HOME')
|
||||||
|
dpath = home + '/Library/Application Support/Kindle'
|
||||||
|
cmdline = '/sbin/mount'
|
||||||
|
cmdline = cmdline.encode(sys.getfilesystemencoding())
|
||||||
|
p = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
|
||||||
|
out1, out2 = p.communicate()
|
||||||
|
reslst = out1.split('\n')
|
||||||
|
cnt = len(reslst)
|
||||||
|
disk = ''
|
||||||
|
foundIt = False
|
||||||
|
for j in xrange(cnt):
|
||||||
|
resline = reslst[j]
|
||||||
|
if resline.startswith('/dev'):
|
||||||
|
(devpart, mpath) = resline.split(' on ')
|
||||||
|
dpart = devpart[5:]
|
||||||
|
pp = mpath.find('(')
|
||||||
|
if pp >= 0:
|
||||||
|
mpath = mpath[:pp-1]
|
||||||
|
if dpath.startswith(mpath):
|
||||||
|
disk = dpart
|
||||||
|
return disk
|
||||||
|
|
||||||
|
# uses a sub process to get the UUID of the specified disk partition using ioreg
|
||||||
|
def GetDiskPartitionUUID(diskpart):
|
||||||
|
uuidnum = os.getenv('MYUUIDNUMBER')
|
||||||
|
if uuidnum != None:
|
||||||
|
return uuidnum
|
||||||
|
cmdline = '/usr/sbin/ioreg -l -S -w 0 -r -c AppleAHCIDiskDriver'
|
||||||
|
cmdline = cmdline.encode(sys.getfilesystemencoding())
|
||||||
|
p = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
|
||||||
|
out1, out2 = p.communicate()
|
||||||
|
reslst = out1.split('\n')
|
||||||
|
cnt = len(reslst)
|
||||||
|
bsdname = None
|
||||||
|
uuidnum = None
|
||||||
|
foundIt = False
|
||||||
|
nest = 0
|
||||||
|
uuidnest = -1
|
||||||
|
partnest = -2
|
||||||
|
for j in xrange(cnt):
|
||||||
|
resline = reslst[j]
|
||||||
|
if resline.find('{') >= 0:
|
||||||
|
nest += 1
|
||||||
|
if resline.find('}') >= 0:
|
||||||
|
nest -= 1
|
||||||
|
pp = resline.find('"UUID" = "')
|
||||||
|
if pp >= 0:
|
||||||
|
uuidnum = resline[pp+10:-1]
|
||||||
|
uuidnum = uuidnum.strip()
|
||||||
|
uuidnest = nest
|
||||||
|
if partnest == uuidnest and uuidnest > 0:
|
||||||
|
foundIt = True
|
||||||
|
break
|
||||||
|
bb = resline.find('"BSD Name" = "')
|
||||||
|
if bb >= 0:
|
||||||
|
bsdname = resline[bb+14:-1]
|
||||||
|
bsdname = bsdname.strip()
|
||||||
|
if (bsdname == diskpart):
|
||||||
|
partnest = nest
|
||||||
|
else :
|
||||||
|
partnest = -2
|
||||||
|
if partnest == uuidnest and partnest > 0:
|
||||||
|
foundIt = True
|
||||||
|
break
|
||||||
|
if nest == 0:
|
||||||
|
partnest = -2
|
||||||
|
uuidnest = -1
|
||||||
|
uuidnum = None
|
||||||
|
bsdname = None
|
||||||
|
if not foundIt:
|
||||||
|
uuidnum = ''
|
||||||
|
return uuidnum
|
||||||
|
|
||||||
|
def GetMACAddressMunged():
|
||||||
|
macnum = os.getenv('MYMACNUM')
|
||||||
|
if macnum != None:
|
||||||
|
return macnum
|
||||||
|
cmdline = '/sbin/ifconfig en0'
|
||||||
|
cmdline = cmdline.encode(sys.getfilesystemencoding())
|
||||||
|
p = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
|
||||||
|
out1, out2 = p.communicate()
|
||||||
|
reslst = out1.split('\n')
|
||||||
|
cnt = len(reslst)
|
||||||
|
macnum = None
|
||||||
|
foundIt = False
|
||||||
|
for j in xrange(cnt):
|
||||||
|
resline = reslst[j]
|
||||||
|
pp = resline.find('ether ')
|
||||||
|
if pp >= 0:
|
||||||
|
macnum = resline[pp+6:-1]
|
||||||
|
macnum = macnum.strip()
|
||||||
|
# print "original mac", macnum
|
||||||
|
# now munge it up the way Kindle app does
|
||||||
|
# by xoring it with 0xa5 and swapping elements 3 and 4
|
||||||
|
maclst = macnum.split(':')
|
||||||
|
n = len(maclst)
|
||||||
|
if n != 6:
|
||||||
|
fountIt = False
|
||||||
|
break
|
||||||
|
for i in range(6):
|
||||||
|
maclst[i] = int('0x' + maclst[i], 0)
|
||||||
|
mlst = [0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
|
||||||
|
mlst[5] = maclst[5] ^ 0xa5
|
||||||
|
mlst[4] = maclst[3] ^ 0xa5
|
||||||
|
mlst[3] = maclst[4] ^ 0xa5
|
||||||
|
mlst[2] = maclst[2] ^ 0xa5
|
||||||
|
mlst[1] = maclst[1] ^ 0xa5
|
||||||
|
mlst[0] = maclst[0] ^ 0xa5
|
||||||
|
macnum = "%0.2x%0.2x%0.2x%0.2x%0.2x%0.2x" % (mlst[0], mlst[1], mlst[2], mlst[3], mlst[4], mlst[5])
|
||||||
|
foundIt = True
|
||||||
|
break
|
||||||
|
if not foundIt:
|
||||||
|
macnum = ''
|
||||||
|
return macnum
|
||||||
|
|
||||||
|
|
||||||
# uses unix env to get username instead of using sysctlbyname
|
# uses unix env to get username instead of using sysctlbyname
|
||||||
def GetUserName():
|
def GetUserName():
|
||||||
username = os.getenv('USER')
|
username = os.getenv('USER')
|
||||||
return username
|
return username
|
||||||
|
|
||||||
|
def isNewInstall():
|
||||||
|
home = os.getenv('HOME')
|
||||||
|
# soccer game fan anyone
|
||||||
|
dpath = home + '/Library/Application Support/Kindle/storage/.pes2011'
|
||||||
|
# print dpath, os.path.exists(dpath)
|
||||||
|
if os.path.exists(dpath):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
def encode(data, map):
|
|
||||||
result = ""
|
|
||||||
for char in data:
|
|
||||||
value = ord(char)
|
|
||||||
Q = (value ^ 0x80) // len(map)
|
|
||||||
R = value % len(map)
|
|
||||||
result += map[Q]
|
|
||||||
result += map[R]
|
|
||||||
return result
|
|
||||||
|
|
||||||
import hashlib
|
def GetIDString():
|
||||||
|
# K4Mac now has an extensive set of ids strings it uses
|
||||||
|
# in encoding pids and in creating unique passwords
|
||||||
|
# for use in its own version of CryptUnprotectDataV2
|
||||||
|
|
||||||
|
# BUT Amazon has now become nasty enough to detect when its app
|
||||||
|
# is being run under a debugger and actually changes code paths
|
||||||
|
# including which one of these strings is chosen, all to try
|
||||||
|
# to prevent reverse engineering
|
||||||
|
|
||||||
|
# Sad really ... they will only hurt their own sales ...
|
||||||
|
# true book lovers really want to keep their books forever
|
||||||
|
# and move them to their devices and DRM prevents that so they
|
||||||
|
# will just buy from someplace else that they can remove
|
||||||
|
# the DRM from
|
||||||
|
|
||||||
|
# Amazon should know by now that true book lover's are not like
|
||||||
|
# penniless kids that pirate music, we do not pirate books
|
||||||
|
|
||||||
|
if isNewInstall():
|
||||||
|
mungedmac = GetMACAddressMunged()
|
||||||
|
if len(mungedmac) > 7:
|
||||||
|
return mungedmac
|
||||||
|
sernum = GetVolumeSerialNumber()
|
||||||
|
if len(sernum) > 7:
|
||||||
|
return sernum
|
||||||
|
diskpart = GetUserHomeAppSupKindleDirParitionName()
|
||||||
|
uuidnum = GetDiskPartitionUUID(diskpart)
|
||||||
|
if len(uuidnum) > 7:
|
||||||
|
return uuidnum
|
||||||
|
mungedmac = GetMACAddressMunged()
|
||||||
|
if len(mungedmac) > 7:
|
||||||
|
return mungedmac
|
||||||
|
return '9999999999'
|
||||||
|
|
||||||
def SHA256(message):
|
|
||||||
ctx = hashlib.sha256()
|
|
||||||
ctx.update(message)
|
|
||||||
return ctx.digest()
|
|
||||||
|
|
||||||
# implements an Pseudo Mac Version of Windows built-in Crypto routine
|
# implements an Pseudo Mac Version of Windows built-in Crypto routine
|
||||||
def CryptUnprotectData(encryptedData):
|
# used by Kindle for Mac versions < 1.6.0
|
||||||
sp = GetVolumeSerialNumber() + '!@#' + GetUserName()
|
class CryptUnprotectData(object):
|
||||||
passwdData = encode(SHA256(sp),charMap1)
|
def __init__(self):
|
||||||
|
sernum = GetVolumeSerialNumber()
|
||||||
|
if sernum == '':
|
||||||
|
sernum = '9999999999'
|
||||||
|
sp = sernum + '!@#' + GetUserName()
|
||||||
|
passwdData = encode(SHA256(sp),charMap1)
|
||||||
|
salt = '16743'
|
||||||
|
self.crp = LibCrypto()
|
||||||
|
iter = 0x3e8
|
||||||
|
keylen = 0x80
|
||||||
|
key_iv = self.crp.keyivgen(passwdData, salt, iter, keylen)
|
||||||
|
self.key = key_iv[0:32]
|
||||||
|
self.iv = key_iv[32:48]
|
||||||
|
self.crp.set_decrypt_key(self.key, self.iv)
|
||||||
|
|
||||||
|
def decrypt(self, encryptedData):
|
||||||
|
cleartext = self.crp.decrypt(encryptedData)
|
||||||
|
cleartext = decode(cleartext,charMap1)
|
||||||
|
return cleartext
|
||||||
|
|
||||||
|
|
||||||
|
# implements an Pseudo Mac Version of Windows built-in Crypto routine
|
||||||
|
# used for Kindle for Mac Versions >= 1.6.0
|
||||||
|
class CryptUnprotectDataV2(object):
|
||||||
|
def __init__(self):
|
||||||
|
sp = GetUserName() + ':&%:' + GetIDString()
|
||||||
|
passwdData = encode(SHA256(sp),charMap5)
|
||||||
|
# salt generation as per the code
|
||||||
|
salt = 0x0512981d * 2 * 1 * 1
|
||||||
|
salt = str(salt) + GetUserName()
|
||||||
|
salt = encode(salt,charMap5)
|
||||||
|
self.crp = LibCrypto()
|
||||||
|
iter = 0x800
|
||||||
|
keylen = 0x400
|
||||||
|
key_iv = self.crp.keyivgen(passwdData, salt, iter, keylen)
|
||||||
|
self.key = key_iv[0:32]
|
||||||
|
self.iv = key_iv[32:48]
|
||||||
|
self.crp.set_decrypt_key(self.key, self.iv)
|
||||||
|
|
||||||
|
def decrypt(self, encryptedData):
|
||||||
|
cleartext = self.crp.decrypt(encryptedData)
|
||||||
|
cleartext = decode(cleartext, charMap5)
|
||||||
|
return cleartext
|
||||||
|
|
||||||
|
|
||||||
|
# unprotect the new header blob in .kinf2011
|
||||||
|
# used in Kindle for Mac Version >= 1.9.0
|
||||||
|
def UnprotectHeaderData(encryptedData):
|
||||||
|
passwdData = 'header_key_data'
|
||||||
|
salt = 'HEADER.2011'
|
||||||
|
iter = 0x80
|
||||||
|
keylen = 0x100
|
||||||
crp = LibCrypto()
|
crp = LibCrypto()
|
||||||
key_iv = crp.keyivgen(passwdData)
|
key_iv = crp.keyivgen(passwdData, salt, iter, keylen)
|
||||||
key = key_iv[0:32]
|
key = key_iv[0:32]
|
||||||
iv = key_iv[32:48]
|
iv = key_iv[32:48]
|
||||||
crp.set_decrypt_key(key,iv)
|
crp.set_decrypt_key(key,iv)
|
||||||
@@ -168,27 +463,264 @@ def CryptUnprotectData(encryptedData):
|
|||||||
return cleartext
|
return cleartext
|
||||||
|
|
||||||
|
|
||||||
# Locate and open the .kindle-info file
|
# implements an Pseudo Mac Version of Windows built-in Crypto routine
|
||||||
def openKindleInfo(kInfoFile=None):
|
# used for Kindle for Mac Versions >= 1.9.0
|
||||||
if kInfoFile == None:
|
class CryptUnprotectDataV3(object):
|
||||||
home = os.getenv('HOME')
|
def __init__(self, entropy):
|
||||||
cmdline = 'find "' + home + '/Library/Application Support" -name ".kindle-info"'
|
sp = GetUserName() + '+@#$%+' + GetIDString()
|
||||||
cmdline = cmdline.encode(sys.getfilesystemencoding())
|
passwdData = encode(SHA256(sp),charMap2)
|
||||||
p1 = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
|
salt = entropy
|
||||||
out1, out2 = p1.communicate()
|
self.crp = LibCrypto()
|
||||||
reslst = out1.split('\n')
|
iter = 0x800
|
||||||
kinfopath = 'NONE'
|
keylen = 0x400
|
||||||
cnt = len(reslst)
|
key_iv = self.crp.keyivgen(passwdData, salt, iter, keylen)
|
||||||
for j in xrange(cnt):
|
self.key = key_iv[0:32]
|
||||||
resline = reslst[j]
|
self.iv = key_iv[32:48]
|
||||||
pp = resline.find('.kindle-info')
|
self.crp.set_decrypt_key(self.key, self.iv)
|
||||||
if pp >= 0:
|
|
||||||
kinfopath = resline
|
def decrypt(self, encryptedData):
|
||||||
break
|
cleartext = self.crp.decrypt(encryptedData)
|
||||||
if not os.path.isfile(kinfopath):
|
cleartext = decode(cleartext, charMap2)
|
||||||
raise DrmException('Error: .kindle-info file can not be found')
|
return cleartext
|
||||||
return open(kinfopath,'r')
|
|
||||||
else:
|
|
||||||
if not os.path.isfile(kinfoFile):
|
# Locate the .kindle-info files
|
||||||
raise DrmException('Error: kindle-info file can not be found')
|
def getKindleInfoFiles(kInfoFiles):
|
||||||
return open(kInfoFile, 'r')
|
# first search for current .kindle-info files
|
||||||
|
home = os.getenv('HOME')
|
||||||
|
cmdline = 'find "' + home + '/Library/Application Support" -name ".kindle-info"'
|
||||||
|
cmdline = cmdline.encode(sys.getfilesystemencoding())
|
||||||
|
p1 = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
|
||||||
|
out1, out2 = p1.communicate()
|
||||||
|
reslst = out1.split('\n')
|
||||||
|
kinfopath = 'NONE'
|
||||||
|
found = False
|
||||||
|
for resline in reslst:
|
||||||
|
if os.path.isfile(resline):
|
||||||
|
kInfoFiles.append(resline)
|
||||||
|
found = True
|
||||||
|
# add any .rainier*-kinf files
|
||||||
|
cmdline = 'find "' + home + '/Library/Application Support" -name ".rainier*-kinf"'
|
||||||
|
cmdline = cmdline.encode(sys.getfilesystemencoding())
|
||||||
|
p1 = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
|
||||||
|
out1, out2 = p1.communicate()
|
||||||
|
reslst = out1.split('\n')
|
||||||
|
for resline in reslst:
|
||||||
|
if os.path.isfile(resline):
|
||||||
|
kInfoFiles.append(resline)
|
||||||
|
found = True
|
||||||
|
# add any .kinf2011 files
|
||||||
|
cmdline = 'find "' + home + '/Library/Application Support" -name ".kinf2011"'
|
||||||
|
cmdline = cmdline.encode(sys.getfilesystemencoding())
|
||||||
|
p1 = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
|
||||||
|
out1, out2 = p1.communicate()
|
||||||
|
reslst = out1.split('\n')
|
||||||
|
for resline in reslst:
|
||||||
|
if os.path.isfile(resline):
|
||||||
|
kInfoFiles.append(resline)
|
||||||
|
found = True
|
||||||
|
if not found:
|
||||||
|
print('No kindle-info files have been found.')
|
||||||
|
return kInfoFiles
|
||||||
|
|
||||||
|
# determine type of kindle info provided and return a
|
||||||
|
# database of keynames and values
|
||||||
|
def getDBfromFile(kInfoFile):
|
||||||
|
names = ["kindle.account.tokens","kindle.cookie.item","eulaVersionAccepted","login_date","kindle.token.item","login","kindle.key.item","kindle.name.info","kindle.device.info", "MazamaRandomNumber", "max_date", "SIGVERIF"]
|
||||||
|
DB = {}
|
||||||
|
cnt = 0
|
||||||
|
infoReader = open(kInfoFile, 'r')
|
||||||
|
hdr = infoReader.read(1)
|
||||||
|
data = infoReader.read()
|
||||||
|
|
||||||
|
if data.find('[') != -1 :
|
||||||
|
|
||||||
|
# older style kindle-info file
|
||||||
|
cud = CryptUnprotectData()
|
||||||
|
items = data.split('[')
|
||||||
|
for item in items:
|
||||||
|
if item != '':
|
||||||
|
keyhash, rawdata = item.split(':')
|
||||||
|
keyname = "unknown"
|
||||||
|
for name in names:
|
||||||
|
if encodeHash(name,charMap2) == keyhash:
|
||||||
|
keyname = name
|
||||||
|
break
|
||||||
|
if keyname == "unknown":
|
||||||
|
keyname = keyhash
|
||||||
|
encryptedValue = decode(rawdata,charMap2)
|
||||||
|
cleartext = cud.decrypt(encryptedValue)
|
||||||
|
DB[keyname] = cleartext
|
||||||
|
cnt = cnt + 1
|
||||||
|
if cnt == 0:
|
||||||
|
DB = None
|
||||||
|
return DB
|
||||||
|
|
||||||
|
if hdr == '/':
|
||||||
|
|
||||||
|
# else newer style .kinf file used by K4Mac >= 1.6.0
|
||||||
|
# the .kinf file uses "/" to separate it into records
|
||||||
|
# so remove the trailing "/" to make it easy to use split
|
||||||
|
data = data[:-1]
|
||||||
|
items = data.split('/')
|
||||||
|
cud = CryptUnprotectDataV2()
|
||||||
|
|
||||||
|
# loop through the item records until all are processed
|
||||||
|
while len(items) > 0:
|
||||||
|
|
||||||
|
# get the first item record
|
||||||
|
item = items.pop(0)
|
||||||
|
|
||||||
|
# the first 32 chars of the first record of a group
|
||||||
|
# is the MD5 hash of the key name encoded by charMap5
|
||||||
|
keyhash = item[0:32]
|
||||||
|
keyname = "unknown"
|
||||||
|
|
||||||
|
# the raw keyhash string is also used to create entropy for the actual
|
||||||
|
# CryptProtectData Blob that represents that keys contents
|
||||||
|
# "entropy" not used for K4Mac only K4PC
|
||||||
|
# entropy = SHA1(keyhash)
|
||||||
|
|
||||||
|
# the remainder of the first record when decoded with charMap5
|
||||||
|
# has the ':' split char followed by the string representation
|
||||||
|
# of the number of records that follow
|
||||||
|
# and make up the contents
|
||||||
|
srcnt = decode(item[34:],charMap5)
|
||||||
|
rcnt = int(srcnt)
|
||||||
|
|
||||||
|
# read and store in rcnt records of data
|
||||||
|
# that make up the contents value
|
||||||
|
edlst = []
|
||||||
|
for i in xrange(rcnt):
|
||||||
|
item = items.pop(0)
|
||||||
|
edlst.append(item)
|
||||||
|
|
||||||
|
keyname = "unknown"
|
||||||
|
for name in names:
|
||||||
|
if encodeHash(name,charMap5) == keyhash:
|
||||||
|
keyname = name
|
||||||
|
break
|
||||||
|
if keyname == "unknown":
|
||||||
|
keyname = keyhash
|
||||||
|
|
||||||
|
# the charMap5 encoded contents data has had a length
|
||||||
|
# of chars (always odd) cut off of the front and moved
|
||||||
|
# to the end to prevent decoding using charMap5 from
|
||||||
|
# working properly, and thereby preventing the ensuing
|
||||||
|
# CryptUnprotectData call from succeeding.
|
||||||
|
|
||||||
|
# The offset into the charMap5 encoded contents seems to be:
|
||||||
|
# len(contents) - largest prime number less than or equal to int(len(content)/3)
|
||||||
|
# (in other words split "about" 2/3rds of the way through)
|
||||||
|
|
||||||
|
# move first offsets chars to end to align for decode by charMap5
|
||||||
|
encdata = "".join(edlst)
|
||||||
|
contlen = len(encdata)
|
||||||
|
|
||||||
|
# now properly split and recombine
|
||||||
|
# by moving noffset chars from the start of the
|
||||||
|
# string to the end of the string
|
||||||
|
noffset = contlen - primes(int(contlen/3))[-1]
|
||||||
|
pfx = encdata[0:noffset]
|
||||||
|
encdata = encdata[noffset:]
|
||||||
|
encdata = encdata + pfx
|
||||||
|
|
||||||
|
# decode using charMap5 to get the CryptProtect Data
|
||||||
|
encryptedValue = decode(encdata,charMap5)
|
||||||
|
cleartext = cud.decrypt(encryptedValue)
|
||||||
|
DB[keyname] = cleartext
|
||||||
|
cnt = cnt + 1
|
||||||
|
|
||||||
|
if cnt == 0:
|
||||||
|
DB = None
|
||||||
|
return DB
|
||||||
|
|
||||||
|
# the latest .kinf2011 version for K4M 1.9.1
|
||||||
|
# put back the hdr char, it is needed
|
||||||
|
data = hdr + data
|
||||||
|
data = data[:-1]
|
||||||
|
items = data.split('/')
|
||||||
|
|
||||||
|
# the headerblob is the encrypted information needed to build the entropy string
|
||||||
|
headerblob = items.pop(0)
|
||||||
|
encryptedValue = decode(headerblob, charMap1)
|
||||||
|
cleartext = UnprotectHeaderData(encryptedValue)
|
||||||
|
|
||||||
|
# now extract the pieces in the same way
|
||||||
|
# this version is different from K4PC it scales the build number by multipying by 735
|
||||||
|
pattern = re.compile(r'''\[Version:(\d+)\]\[Build:(\d+)\]\[Cksum:([^\]]+)\]\[Guid:([\{\}a-z0-9\-]+)\]''', re.IGNORECASE)
|
||||||
|
for m in re.finditer(pattern, cleartext):
|
||||||
|
entropy = str(int(m.group(2)) * 0x2df) + m.group(4)
|
||||||
|
|
||||||
|
cud = CryptUnprotectDataV3(entropy)
|
||||||
|
|
||||||
|
# loop through the item records until all are processed
|
||||||
|
while len(items) > 0:
|
||||||
|
|
||||||
|
# get the first item record
|
||||||
|
item = items.pop(0)
|
||||||
|
|
||||||
|
# the first 32 chars of the first record of a group
|
||||||
|
# is the MD5 hash of the key name encoded by charMap5
|
||||||
|
keyhash = item[0:32]
|
||||||
|
keyname = "unknown"
|
||||||
|
|
||||||
|
# unlike K4PC the keyhash is not used in generating entropy
|
||||||
|
# entropy = SHA1(keyhash) + added_entropy
|
||||||
|
# entropy = added_entropy
|
||||||
|
|
||||||
|
# the remainder of the first record when decoded with charMap5
|
||||||
|
# has the ':' split char followed by the string representation
|
||||||
|
# of the number of records that follow
|
||||||
|
# and make up the contents
|
||||||
|
srcnt = decode(item[34:],charMap5)
|
||||||
|
rcnt = int(srcnt)
|
||||||
|
|
||||||
|
# read and store in rcnt records of data
|
||||||
|
# that make up the contents value
|
||||||
|
edlst = []
|
||||||
|
for i in xrange(rcnt):
|
||||||
|
item = items.pop(0)
|
||||||
|
edlst.append(item)
|
||||||
|
|
||||||
|
keyname = "unknown"
|
||||||
|
for name in names:
|
||||||
|
if encodeHash(name,testMap8) == keyhash:
|
||||||
|
keyname = name
|
||||||
|
break
|
||||||
|
if keyname == "unknown":
|
||||||
|
keyname = keyhash
|
||||||
|
|
||||||
|
# the testMap8 encoded contents data has had a length
|
||||||
|
# of chars (always odd) cut off of the front and moved
|
||||||
|
# to the end to prevent decoding using testMap8 from
|
||||||
|
# working properly, and thereby preventing the ensuing
|
||||||
|
# CryptUnprotectData call from succeeding.
|
||||||
|
|
||||||
|
# The offset into the testMap8 encoded contents seems to be:
|
||||||
|
# len(contents) - largest prime number less than or equal to int(len(content)/3)
|
||||||
|
# (in other words split "about" 2/3rds of the way through)
|
||||||
|
|
||||||
|
# move first offsets chars to end to align for decode by testMap8
|
||||||
|
encdata = "".join(edlst)
|
||||||
|
contlen = len(encdata)
|
||||||
|
|
||||||
|
# now properly split and recombine
|
||||||
|
# by moving noffset chars from the start of the
|
||||||
|
# string to the end of the string
|
||||||
|
noffset = contlen - primes(int(contlen/3))[-1]
|
||||||
|
pfx = encdata[0:noffset]
|
||||||
|
encdata = encdata[noffset:]
|
||||||
|
encdata = encdata + pfx
|
||||||
|
|
||||||
|
# decode using testMap8 to get the CryptProtect Data
|
||||||
|
encryptedValue = decode(encdata,testMap8)
|
||||||
|
cleartext = cud.decrypt(encryptedValue)
|
||||||
|
# print keyname
|
||||||
|
# print cleartext
|
||||||
|
DB[keyname] = cleartext
|
||||||
|
cnt = cnt + 1
|
||||||
|
|
||||||
|
if cnt == 0:
|
||||||
|
DB = None
|
||||||
|
return DB
|
||||||
|
|||||||
@@ -1,34 +1,129 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
# K4PC Windows specific routines
|
# K4PC Windows specific routines
|
||||||
|
|
||||||
from __future__ import with_statement
|
from __future__ import with_statement
|
||||||
|
|
||||||
import sys, os
|
import sys, os, re
|
||||||
|
from struct import pack, unpack, unpack_from
|
||||||
|
|
||||||
from ctypes import windll, c_char_p, c_wchar_p, c_uint, POINTER, byref, \
|
from ctypes import windll, c_char_p, c_wchar_p, c_uint, POINTER, byref, \
|
||||||
create_unicode_buffer, create_string_buffer, CFUNCTYPE, addressof, \
|
create_unicode_buffer, create_string_buffer, CFUNCTYPE, addressof, \
|
||||||
string_at, Structure, c_void_p, cast
|
string_at, Structure, c_void_p, cast
|
||||||
|
|
||||||
import _winreg as winreg
|
import _winreg as winreg
|
||||||
|
|
||||||
import traceback
|
|
||||||
|
|
||||||
MAX_PATH = 255
|
MAX_PATH = 255
|
||||||
|
|
||||||
kernel32 = windll.kernel32
|
kernel32 = windll.kernel32
|
||||||
advapi32 = windll.advapi32
|
advapi32 = windll.advapi32
|
||||||
crypt32 = windll.crypt32
|
crypt32 = windll.crypt32
|
||||||
|
|
||||||
|
import traceback
|
||||||
|
|
||||||
# Various character maps used to decrypt books. Probably supposed to act as obfuscation
|
# crypto digestroutines
|
||||||
charMap1 = "n5Pr6St7Uv8Wx9YzAb0Cd1Ef2Gh3Jk4M"
|
import hashlib
|
||||||
|
|
||||||
|
def MD5(message):
|
||||||
|
ctx = hashlib.md5()
|
||||||
|
ctx.update(message)
|
||||||
|
return ctx.digest()
|
||||||
|
|
||||||
|
def SHA1(message):
|
||||||
|
ctx = hashlib.sha1()
|
||||||
|
ctx.update(message)
|
||||||
|
return ctx.digest()
|
||||||
|
|
||||||
|
def SHA256(message):
|
||||||
|
ctx = hashlib.sha256()
|
||||||
|
ctx.update(message)
|
||||||
|
return ctx.digest()
|
||||||
|
|
||||||
|
# For K4PC 1.9.X
|
||||||
|
# need to use routines from openssl
|
||||||
|
# AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',[c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p,c_int])
|
||||||
|
# AES_set_decrypt_key = F(c_int, 'AES_set_decrypt_key',[c_char_p, c_int, AES_KEY_p])
|
||||||
|
# PKCS5_PBKDF2_HMAC_SHA1 = F(c_int, 'PKCS5_PBKDF2_HMAC_SHA1',
|
||||||
|
# [c_char_p, c_ulong, c_char_p, c_ulong, c_ulong, c_ulong, c_char_p])
|
||||||
|
# but the user may not have openssl installed or their version is a hacked one that was shipped
|
||||||
|
# with many ethernet cards that used software instead of hardware routines
|
||||||
|
# so using pure python implementations
|
||||||
|
from pbkdf2 import pbkdf2
|
||||||
|
import aescbc
|
||||||
|
|
||||||
|
def UnprotectHeaderData(encryptedData):
|
||||||
|
passwdData = 'header_key_data'
|
||||||
|
salt = 'HEADER.2011'
|
||||||
|
iter = 0x80
|
||||||
|
keylen = 0x100
|
||||||
|
key_iv = pbkdf2(passwdData, salt, iter, keylen)
|
||||||
|
key = key_iv[0:32]
|
||||||
|
iv = key_iv[32:48]
|
||||||
|
aes=aescbc.AES_CBC(key, aescbc.noPadding() ,32)
|
||||||
|
cleartext = aes.decrypt(iv + encryptedData)
|
||||||
|
return cleartext
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# simple primes table (<= n) calculator
|
||||||
|
def primes(n):
|
||||||
|
if n==2: return [2]
|
||||||
|
elif n<2: return []
|
||||||
|
s=range(3,n+1,2)
|
||||||
|
mroot = n ** 0.5
|
||||||
|
half=(n+1)/2-1
|
||||||
|
i=0
|
||||||
|
m=3
|
||||||
|
while m <= mroot:
|
||||||
|
if s[i]:
|
||||||
|
j=(m*m-3)/2
|
||||||
|
s[j]=0
|
||||||
|
while j<half:
|
||||||
|
s[j]=0
|
||||||
|
j+=m
|
||||||
|
i=i+1
|
||||||
|
m=2*i+3
|
||||||
|
return [2]+[x for x in s if x]
|
||||||
|
|
||||||
|
|
||||||
|
# Various character maps used to decrypt kindle info values.
|
||||||
|
# Probably supposed to act as obfuscation
|
||||||
charMap2 = "AaZzB0bYyCc1XxDdW2wEeVv3FfUuG4g-TtHh5SsIiR6rJjQq7KkPpL8lOoMm9Nn_"
|
charMap2 = "AaZzB0bYyCc1XxDdW2wEeVv3FfUuG4g-TtHh5SsIiR6rJjQq7KkPpL8lOoMm9Nn_"
|
||||||
charMap3 = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
|
charMap5 = "AzB0bYyCeVvaZ3FfUuG4g-TtHh5SsIiR6rJjQq7KkPpL8lOoMm9Nn_c1XxDdW2wE"
|
||||||
charMap4 = "ABCDEFGHIJKLMNPQRSTUVWXYZ123456789"
|
# New maps in K4PC 1.9.0
|
||||||
|
testMap1 = "n5Pr6St7Uv8Wx9YzAb0Cd1Ef2Gh3Jk4M"
|
||||||
|
testMap6 = "9YzAb0Cd1Ef2n5Pr6St7Uvh3Jk4M8WxG"
|
||||||
|
testMap8 = "YvaZ3FfUm9Nn_c1XuG4yCAzB0beVg-TtHh5SsIiR6rJjQdW2wEq7KkPpL8lOoMxD"
|
||||||
|
|
||||||
class DrmException(Exception):
|
class DrmException(Exception):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
# Encode the bytes in data with the characters in map
|
||||||
|
def encode(data, map):
|
||||||
|
result = ""
|
||||||
|
for char in data:
|
||||||
|
value = ord(char)
|
||||||
|
Q = (value ^ 0x80) // len(map)
|
||||||
|
R = value % len(map)
|
||||||
|
result += map[Q]
|
||||||
|
result += map[R]
|
||||||
|
return result
|
||||||
|
|
||||||
|
# Hash the bytes in data and then encode the digest with the characters in map
|
||||||
|
def encodeHash(data,map):
|
||||||
|
return encode(MD5(data),map)
|
||||||
|
|
||||||
|
# Decode the string in data with the characters in map. Returns the decoded bytes
|
||||||
|
def decode(data,map):
|
||||||
|
result = ""
|
||||||
|
for i in range (0,len(data)-1,2):
|
||||||
|
high = map.find(data[i])
|
||||||
|
low = map.find(data[i+1])
|
||||||
|
if (high == -1) or (low == -1) :
|
||||||
|
break
|
||||||
|
value = (((high * len(map)) ^ 0x80) & 0xFF) + low
|
||||||
|
result += pack("B",value)
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
# interface with Windows OS Routines
|
||||||
class DataBlob(Structure):
|
class DataBlob(Structure):
|
||||||
_fields_ = [('cbData', c_uint),
|
_fields_ = [('cbData', c_uint),
|
||||||
('pbData', c_void_p)]
|
('pbData', c_void_p)]
|
||||||
@@ -59,52 +154,276 @@ def GetVolumeSerialNumber():
|
|||||||
return GetVolumeSerialNumber
|
return GetVolumeSerialNumber
|
||||||
GetVolumeSerialNumber = GetVolumeSerialNumber()
|
GetVolumeSerialNumber = GetVolumeSerialNumber()
|
||||||
|
|
||||||
|
def GetIDString():
|
||||||
|
return GetVolumeSerialNumber()
|
||||||
|
|
||||||
|
def getLastError():
|
||||||
|
GetLastError = kernel32.GetLastError
|
||||||
|
GetLastError.argtypes = None
|
||||||
|
GetLastError.restype = c_uint
|
||||||
|
def getLastError():
|
||||||
|
return GetLastError()
|
||||||
|
return getLastError
|
||||||
|
getLastError = getLastError()
|
||||||
|
|
||||||
def GetUserName():
|
def GetUserName():
|
||||||
GetUserNameW = advapi32.GetUserNameW
|
GetUserNameW = advapi32.GetUserNameW
|
||||||
GetUserNameW.argtypes = [c_wchar_p, POINTER(c_uint)]
|
GetUserNameW.argtypes = [c_wchar_p, POINTER(c_uint)]
|
||||||
GetUserNameW.restype = c_uint
|
GetUserNameW.restype = c_uint
|
||||||
def GetUserName():
|
def GetUserName():
|
||||||
buffer = create_unicode_buffer(32)
|
buffer = create_unicode_buffer(2)
|
||||||
size = c_uint(len(buffer))
|
size = c_uint(len(buffer))
|
||||||
while not GetUserNameW(buffer, byref(size)):
|
while not GetUserNameW(buffer, byref(size)):
|
||||||
|
errcd = getLastError()
|
||||||
|
if errcd == 234:
|
||||||
|
# bad wine implementation up through wine 1.3.21
|
||||||
|
return "AlternateUserName"
|
||||||
buffer = create_unicode_buffer(len(buffer) * 2)
|
buffer = create_unicode_buffer(len(buffer) * 2)
|
||||||
size.value = len(buffer)
|
size.value = len(buffer)
|
||||||
return buffer.value.encode('utf-16-le')[::2]
|
return buffer.value.encode('utf-16-le')[::2]
|
||||||
return GetUserName
|
return GetUserName
|
||||||
GetUserName = GetUserName()
|
GetUserName = GetUserName()
|
||||||
|
|
||||||
|
|
||||||
def CryptUnprotectData():
|
def CryptUnprotectData():
|
||||||
_CryptUnprotectData = crypt32.CryptUnprotectData
|
_CryptUnprotectData = crypt32.CryptUnprotectData
|
||||||
_CryptUnprotectData.argtypes = [DataBlob_p, c_wchar_p, DataBlob_p,
|
_CryptUnprotectData.argtypes = [DataBlob_p, c_wchar_p, DataBlob_p,
|
||||||
c_void_p, c_void_p, c_uint, DataBlob_p]
|
c_void_p, c_void_p, c_uint, DataBlob_p]
|
||||||
_CryptUnprotectData.restype = c_uint
|
_CryptUnprotectData.restype = c_uint
|
||||||
def CryptUnprotectData(indata, entropy):
|
def CryptUnprotectData(indata, entropy, flags):
|
||||||
indatab = create_string_buffer(indata)
|
indatab = create_string_buffer(indata)
|
||||||
indata = DataBlob(len(indata), cast(indatab, c_void_p))
|
indata = DataBlob(len(indata), cast(indatab, c_void_p))
|
||||||
entropyb = create_string_buffer(entropy)
|
entropyb = create_string_buffer(entropy)
|
||||||
entropy = DataBlob(len(entropy), cast(entropyb, c_void_p))
|
entropy = DataBlob(len(entropy), cast(entropyb, c_void_p))
|
||||||
outdata = DataBlob()
|
outdata = DataBlob()
|
||||||
if not _CryptUnprotectData(byref(indata), None, byref(entropy),
|
if not _CryptUnprotectData(byref(indata), None, byref(entropy),
|
||||||
None, None, 0, byref(outdata)):
|
None, None, flags, byref(outdata)):
|
||||||
raise DrmException("Failed to Unprotect Data")
|
# raise DrmException("Failed to Unprotect Data")
|
||||||
|
return 'failed'
|
||||||
return string_at(outdata.pbData, outdata.cbData)
|
return string_at(outdata.pbData, outdata.cbData)
|
||||||
return CryptUnprotectData
|
return CryptUnprotectData
|
||||||
CryptUnprotectData = CryptUnprotectData()
|
CryptUnprotectData = CryptUnprotectData()
|
||||||
|
|
||||||
#
|
|
||||||
# Locate and open the Kindle.info file.
|
# Locate all of the kindle-info style files and return as list
|
||||||
#
|
def getKindleInfoFiles(kInfoFiles):
|
||||||
def openKindleInfo(kInfoFile=None):
|
regkey = winreg.OpenKey(winreg.HKEY_CURRENT_USER, "Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Shell Folders\\")
|
||||||
if kInfoFile == None:
|
path = winreg.QueryValueEx(regkey, 'Local AppData')[0]
|
||||||
regkey = winreg.OpenKey(winreg.HKEY_CURRENT_USER, "Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Shell Folders\\")
|
|
||||||
path = winreg.QueryValueEx(regkey, 'Local AppData')[0]
|
# first look for older kindle-info files
|
||||||
kinfopath = path +'\\Amazon\\Kindle For PC\\{AMAwzsaPaaZAzmZzZQzgZCAkZ3AjA_AY}\\kindle.info'
|
kinfopath = path +'\\Amazon\\Kindle For PC\\{AMAwzsaPaaZAzmZzZQzgZCAkZ3AjA_AY}\\kindle.info'
|
||||||
if not os.path.isfile(kinfopath):
|
if not os.path.isfile(kinfopath):
|
||||||
raise DrmException('Error: kindle.info file can not be found')
|
print('No kindle.info files have not been found.')
|
||||||
return open(kinfopath,'r')
|
|
||||||
else:
|
else:
|
||||||
if not os.path.isfile(kInfoFile):
|
kInfoFiles.append(kinfopath)
|
||||||
raise DrmException('Error: kindle.info file can not be found')
|
|
||||||
return open(kInfoFile, 'r')
|
# now look for newer (K4PC 1.5.0 and later rainier.2.1.1.kinf file
|
||||||
|
|
||||||
|
kinfopath = path +'\\Amazon\\Kindle For PC\\storage\\rainier.2.1.1.kinf'
|
||||||
|
if not os.path.isfile(kinfopath):
|
||||||
|
print('No K4PC 1.5.X .kinf files have not been found.')
|
||||||
|
else:
|
||||||
|
kInfoFiles.append(kinfopath)
|
||||||
|
|
||||||
|
# now look for even newer (K4PC 1.6.0 and later) rainier.2.1.1.kinf file
|
||||||
|
kinfopath = path +'\\Amazon\\Kindle\\storage\\rainier.2.1.1.kinf'
|
||||||
|
if not os.path.isfile(kinfopath):
|
||||||
|
print('No K4PC 1.6.X .kinf files have not been found.')
|
||||||
|
else:
|
||||||
|
kInfoFiles.append(kinfopath)
|
||||||
|
|
||||||
|
# now look for even newer (K4PC 1.9.0 and later) .kinf2011 file
|
||||||
|
kinfopath = path +'\\Amazon\\Kindle\\storage\\.kinf2011'
|
||||||
|
if not os.path.isfile(kinfopath):
|
||||||
|
print('No K4PC 1.9.X .kinf files have not been found.')
|
||||||
|
else:
|
||||||
|
kInfoFiles.append(kinfopath)
|
||||||
|
|
||||||
|
return kInfoFiles
|
||||||
|
|
||||||
|
|
||||||
|
# determine type of kindle info provided and return a
|
||||||
|
# database of keynames and values
|
||||||
|
def getDBfromFile(kInfoFile):
|
||||||
|
names = ["kindle.account.tokens","kindle.cookie.item","eulaVersionAccepted","login_date","kindle.token.item","login","kindle.key.item","kindle.name.info","kindle.device.info", "MazamaRandomNumber", "max_date", "SIGVERIF"]
|
||||||
|
DB = {}
|
||||||
|
cnt = 0
|
||||||
|
infoReader = open(kInfoFile, 'r')
|
||||||
|
hdr = infoReader.read(1)
|
||||||
|
data = infoReader.read()
|
||||||
|
|
||||||
|
if data.find('{') != -1 :
|
||||||
|
|
||||||
|
# older style kindle-info file
|
||||||
|
items = data.split('{')
|
||||||
|
for item in items:
|
||||||
|
if item != '':
|
||||||
|
keyhash, rawdata = item.split(':')
|
||||||
|
keyname = "unknown"
|
||||||
|
for name in names:
|
||||||
|
if encodeHash(name,charMap2) == keyhash:
|
||||||
|
keyname = name
|
||||||
|
break
|
||||||
|
if keyname == "unknown":
|
||||||
|
keyname = keyhash
|
||||||
|
encryptedValue = decode(rawdata,charMap2)
|
||||||
|
DB[keyname] = CryptUnprotectData(encryptedValue, "", 0)
|
||||||
|
cnt = cnt + 1
|
||||||
|
if cnt == 0:
|
||||||
|
DB = None
|
||||||
|
return DB
|
||||||
|
|
||||||
|
if hdr == '/':
|
||||||
|
# else rainier-2-1-1 .kinf file
|
||||||
|
# the .kinf file uses "/" to separate it into records
|
||||||
|
# so remove the trailing "/" to make it easy to use split
|
||||||
|
data = data[:-1]
|
||||||
|
items = data.split('/')
|
||||||
|
|
||||||
|
# loop through the item records until all are processed
|
||||||
|
while len(items) > 0:
|
||||||
|
|
||||||
|
# get the first item record
|
||||||
|
item = items.pop(0)
|
||||||
|
|
||||||
|
# the first 32 chars of the first record of a group
|
||||||
|
# is the MD5 hash of the key name encoded by charMap5
|
||||||
|
keyhash = item[0:32]
|
||||||
|
|
||||||
|
# the raw keyhash string is used to create entropy for the actual
|
||||||
|
# CryptProtectData Blob that represents that keys contents
|
||||||
|
entropy = SHA1(keyhash)
|
||||||
|
|
||||||
|
# the remainder of the first record when decoded with charMap5
|
||||||
|
# has the ':' split char followed by the string representation
|
||||||
|
# of the number of records that follow
|
||||||
|
# and make up the contents
|
||||||
|
srcnt = decode(item[34:],charMap5)
|
||||||
|
rcnt = int(srcnt)
|
||||||
|
|
||||||
|
# read and store in rcnt records of data
|
||||||
|
# that make up the contents value
|
||||||
|
edlst = []
|
||||||
|
for i in xrange(rcnt):
|
||||||
|
item = items.pop(0)
|
||||||
|
edlst.append(item)
|
||||||
|
|
||||||
|
keyname = "unknown"
|
||||||
|
for name in names:
|
||||||
|
if encodeHash(name,charMap5) == keyhash:
|
||||||
|
keyname = name
|
||||||
|
break
|
||||||
|
if keyname == "unknown":
|
||||||
|
keyname = keyhash
|
||||||
|
# the charMap5 encoded contents data has had a length
|
||||||
|
# of chars (always odd) cut off of the front and moved
|
||||||
|
# to the end to prevent decoding using charMap5 from
|
||||||
|
# working properly, and thereby preventing the ensuing
|
||||||
|
# CryptUnprotectData call from succeeding.
|
||||||
|
|
||||||
|
# The offset into the charMap5 encoded contents seems to be:
|
||||||
|
# len(contents)-largest prime number <= int(len(content)/3)
|
||||||
|
# (in other words split "about" 2/3rds of the way through)
|
||||||
|
|
||||||
|
# move first offsets chars to end to align for decode by charMap5
|
||||||
|
encdata = "".join(edlst)
|
||||||
|
contlen = len(encdata)
|
||||||
|
noffset = contlen - primes(int(contlen/3))[-1]
|
||||||
|
|
||||||
|
# now properly split and recombine
|
||||||
|
# by moving noffset chars from the start of the
|
||||||
|
# string to the end of the string
|
||||||
|
pfx = encdata[0:noffset]
|
||||||
|
encdata = encdata[noffset:]
|
||||||
|
encdata = encdata + pfx
|
||||||
|
|
||||||
|
# decode using Map5 to get the CryptProtect Data
|
||||||
|
encryptedValue = decode(encdata,charMap5)
|
||||||
|
DB[keyname] = CryptUnprotectData(encryptedValue, entropy, 1)
|
||||||
|
cnt = cnt + 1
|
||||||
|
|
||||||
|
if cnt == 0:
|
||||||
|
DB = None
|
||||||
|
return DB
|
||||||
|
|
||||||
|
# else newest .kinf2011 style .kinf file
|
||||||
|
# the .kinf file uses "/" to separate it into records
|
||||||
|
# so remove the trailing "/" to make it easy to use split
|
||||||
|
# need to put back the first char read because it it part
|
||||||
|
# of the added entropy blob
|
||||||
|
data = hdr + data[:-1]
|
||||||
|
items = data.split('/')
|
||||||
|
|
||||||
|
# starts with and encoded and encrypted header blob
|
||||||
|
headerblob = items.pop(0)
|
||||||
|
encryptedValue = decode(headerblob, testMap1)
|
||||||
|
cleartext = UnprotectHeaderData(encryptedValue)
|
||||||
|
# now extract the pieces that form the added entropy
|
||||||
|
pattern = re.compile(r'''\[Version:(\d+)\]\[Build:(\d+)\]\[Cksum:([^\]]+)\]\[Guid:([\{\}a-z0-9\-]+)\]''', re.IGNORECASE)
|
||||||
|
for m in re.finditer(pattern, cleartext):
|
||||||
|
added_entropy = m.group(2) + m.group(4)
|
||||||
|
|
||||||
|
|
||||||
|
# loop through the item records until all are processed
|
||||||
|
while len(items) > 0:
|
||||||
|
|
||||||
|
# get the first item record
|
||||||
|
item = items.pop(0)
|
||||||
|
|
||||||
|
# the first 32 chars of the first record of a group
|
||||||
|
# is the MD5 hash of the key name encoded by charMap5
|
||||||
|
keyhash = item[0:32]
|
||||||
|
|
||||||
|
# the sha1 of raw keyhash string is used to create entropy along
|
||||||
|
# with the added entropy provided above from the headerblob
|
||||||
|
entropy = SHA1(keyhash) + added_entropy
|
||||||
|
|
||||||
|
# the remainder of the first record when decoded with charMap5
|
||||||
|
# has the ':' split char followed by the string representation
|
||||||
|
# of the number of records that follow
|
||||||
|
# and make up the contents
|
||||||
|
srcnt = decode(item[34:],charMap5)
|
||||||
|
rcnt = int(srcnt)
|
||||||
|
|
||||||
|
# read and store in rcnt records of data
|
||||||
|
# that make up the contents value
|
||||||
|
edlst = []
|
||||||
|
for i in xrange(rcnt):
|
||||||
|
item = items.pop(0)
|
||||||
|
edlst.append(item)
|
||||||
|
|
||||||
|
# key names now use the new testMap8 encoding
|
||||||
|
keyname = "unknown"
|
||||||
|
for name in names:
|
||||||
|
if encodeHash(name,testMap8) == keyhash:
|
||||||
|
keyname = name
|
||||||
|
break
|
||||||
|
|
||||||
|
# the testMap8 encoded contents data has had a length
|
||||||
|
# of chars (always odd) cut off of the front and moved
|
||||||
|
# to the end to prevent decoding using testMap8 from
|
||||||
|
# working properly, and thereby preventing the ensuing
|
||||||
|
# CryptUnprotectData call from succeeding.
|
||||||
|
|
||||||
|
# The offset into the testMap8 encoded contents seems to be:
|
||||||
|
# len(contents)-largest prime number <= int(len(content)/3)
|
||||||
|
# (in other words split "about" 2/3rds of the way through)
|
||||||
|
|
||||||
|
# move first offsets chars to end to align for decode by testMap8
|
||||||
|
# by moving noffset chars from the start of the
|
||||||
|
# string to the end of the string
|
||||||
|
encdata = "".join(edlst)
|
||||||
|
contlen = len(encdata)
|
||||||
|
noffset = contlen - primes(int(contlen/3))[-1]
|
||||||
|
pfx = encdata[0:noffset]
|
||||||
|
encdata = encdata[noffset:]
|
||||||
|
encdata = encdata + pfx
|
||||||
|
|
||||||
|
# decode using new testMap8 to get the original CryptProtect Data
|
||||||
|
encryptedValue = decode(encdata,testMap8)
|
||||||
|
cleartext = CryptUnprotectData(encryptedValue, entropy, 1)
|
||||||
|
DB[keyname] = cleartext
|
||||||
|
cnt = cnt + 1
|
||||||
|
|
||||||
|
if cnt == 0:
|
||||||
|
DB = None
|
||||||
|
return DB
|
||||||
|
|||||||
@@ -11,16 +11,28 @@ from struct import pack, unpack, unpack_from
|
|||||||
class DrmException(Exception):
|
class DrmException(Exception):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
global kindleDatabase
|
|
||||||
global charMap1
|
global charMap1
|
||||||
global charMap2
|
|
||||||
global charMap3
|
global charMap3
|
||||||
global charMap4
|
global charMap4
|
||||||
|
|
||||||
if sys.platform.startswith('win'):
|
if 'calibre' in sys.modules:
|
||||||
from k4pcutils import openKindleInfo, CryptUnprotectData, GetUserName, GetVolumeSerialNumber, charMap2
|
inCalibre = True
|
||||||
if sys.platform.startswith('darwin'):
|
else:
|
||||||
from k4mutils import openKindleInfo, CryptUnprotectData, GetUserName, GetVolumeSerialNumber, charMap2
|
inCalibre = False
|
||||||
|
|
||||||
|
if inCalibre:
|
||||||
|
if sys.platform.startswith('win'):
|
||||||
|
from calibre_plugins.k4mobidedrm.k4pcutils import getKindleInfoFiles, getDBfromFile, GetUserName, GetIDString
|
||||||
|
|
||||||
|
if sys.platform.startswith('darwin'):
|
||||||
|
from calibre_plugins.k4mobidedrm.k4mutils import getKindleInfoFiles, getDBfromFile, GetUserName, GetIDString
|
||||||
|
else:
|
||||||
|
if sys.platform.startswith('win'):
|
||||||
|
from k4pcutils import getKindleInfoFiles, getDBfromFile, GetUserName, GetIDString
|
||||||
|
|
||||||
|
if sys.platform.startswith('darwin'):
|
||||||
|
from k4mutils import getKindleInfoFiles, getDBfromFile, GetUserName, GetIDString
|
||||||
|
|
||||||
|
|
||||||
charMap1 = "n5Pr6St7Uv8Wx9YzAb0Cd1Ef2Gh3Jk4M"
|
charMap1 = "n5Pr6St7Uv8Wx9YzAb0Cd1Ef2Gh3Jk4M"
|
||||||
charMap3 = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
|
charMap3 = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
|
||||||
@@ -67,65 +79,6 @@ def decode(data,map):
|
|||||||
result += pack("B",value)
|
result += pack("B",value)
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
# Parse the Kindle.info file and return the records as a list of key-values
|
|
||||||
def parseKindleInfo(kInfoFile):
|
|
||||||
DB = {}
|
|
||||||
infoReader = openKindleInfo(kInfoFile)
|
|
||||||
infoReader.read(1)
|
|
||||||
data = infoReader.read()
|
|
||||||
if sys.platform.startswith('win'):
|
|
||||||
items = data.split('{')
|
|
||||||
else :
|
|
||||||
items = data.split('[')
|
|
||||||
for item in items:
|
|
||||||
splito = item.split(':')
|
|
||||||
DB[splito[0]] =splito[1]
|
|
||||||
return DB
|
|
||||||
|
|
||||||
# Get a record from the Kindle.info file for the key "hashedKey" (already hashed and encoded).
|
|
||||||
# Return the decoded and decrypted record
|
|
||||||
def getKindleInfoValueForHash(hashedKey):
|
|
||||||
global kindleDatabase
|
|
||||||
global charMap1
|
|
||||||
global charMap2
|
|
||||||
encryptedValue = decode(kindleDatabase[hashedKey],charMap2)
|
|
||||||
if sys.platform.startswith('win'):
|
|
||||||
return CryptUnprotectData(encryptedValue,"")
|
|
||||||
else:
|
|
||||||
cleartext = CryptUnprotectData(encryptedValue)
|
|
||||||
return decode(cleartext, charMap1)
|
|
||||||
|
|
||||||
# Get a record from the Kindle.info file for the string in "key" (plaintext).
|
|
||||||
# Return the decoded and decrypted record
|
|
||||||
def getKindleInfoValueForKey(key):
|
|
||||||
global charMap2
|
|
||||||
return getKindleInfoValueForHash(encodeHash(key,charMap2))
|
|
||||||
|
|
||||||
# Find if the original string for a hashed/encoded string is known.
|
|
||||||
# If so return the original string othwise return an empty string.
|
|
||||||
def findNameForHash(hash):
|
|
||||||
global charMap2
|
|
||||||
names = ["kindle.account.tokens","kindle.cookie.item","eulaVersionAccepted","login_date","kindle.token.item","login","kindle.key.item","kindle.name.info","kindle.device.info", "MazamaRandomNumber"]
|
|
||||||
result = ""
|
|
||||||
for name in names:
|
|
||||||
if hash == encodeHash(name, charMap2):
|
|
||||||
result = name
|
|
||||||
break
|
|
||||||
return result
|
|
||||||
|
|
||||||
# Print all the records from the kindle.info file (option -i)
|
|
||||||
def printKindleInfo():
|
|
||||||
for record in kindleDatabase:
|
|
||||||
name = findNameForHash(record)
|
|
||||||
if name != "" :
|
|
||||||
print (name)
|
|
||||||
print ("--------------------------")
|
|
||||||
else :
|
|
||||||
print ("Unknown Record")
|
|
||||||
print getKindleInfoValueForHash(record)
|
|
||||||
print "\n"
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# PID generation routines
|
# PID generation routines
|
||||||
#
|
#
|
||||||
@@ -138,9 +91,9 @@ def getTwoBitsFromBitField(bitField,offset):
|
|||||||
|
|
||||||
# Returns the six bits at offset from a bit field
|
# Returns the six bits at offset from a bit field
|
||||||
def getSixBitsFromBitField(bitField,offset):
|
def getSixBitsFromBitField(bitField,offset):
|
||||||
offset *= 3
|
offset *= 3
|
||||||
value = (getTwoBitsFromBitField(bitField,offset) <<4) + (getTwoBitsFromBitField(bitField,offset+1) << 2) +getTwoBitsFromBitField(bitField,offset+2)
|
value = (getTwoBitsFromBitField(bitField,offset) <<4) + (getTwoBitsFromBitField(bitField,offset+1) << 2) +getTwoBitsFromBitField(bitField,offset+2)
|
||||||
return value
|
return value
|
||||||
|
|
||||||
# 8 bits to six bits encoding from hash to generate PID string
|
# 8 bits to six bits encoding from hash to generate PID string
|
||||||
def encodePID(hash):
|
def encodePID(hash):
|
||||||
@@ -168,8 +121,8 @@ def generatePidEncryptionTable() :
|
|||||||
def generatePidSeed(table,dsn) :
|
def generatePidSeed(table,dsn) :
|
||||||
value = 0
|
value = 0
|
||||||
for counter in range (0,4) :
|
for counter in range (0,4) :
|
||||||
index = (ord(dsn[counter]) ^ value) &0xFF
|
index = (ord(dsn[counter]) ^ value) &0xFF
|
||||||
value = (value >> 8) ^ table[index]
|
value = (value >> 8) ^ table[index]
|
||||||
return value
|
return value
|
||||||
|
|
||||||
# Generate the device PID
|
# Generate the device PID
|
||||||
@@ -188,7 +141,7 @@ def generateDevicePID(table,dsn,nbRoll):
|
|||||||
return pidAscii
|
return pidAscii
|
||||||
|
|
||||||
def crc32(s):
|
def crc32(s):
|
||||||
return (~binascii.crc32(s,-1))&0xFFFFFFFF
|
return (~binascii.crc32(s,-1))&0xFFFFFFFF
|
||||||
|
|
||||||
# convert from 8 digit PID to 10 digit PID with checksum
|
# convert from 8 digit PID to 10 digit PID with checksum
|
||||||
def checksumPid(s):
|
def checksumPid(s):
|
||||||
@@ -224,13 +177,11 @@ def pidFromSerial(s, l):
|
|||||||
|
|
||||||
# Parse the EXTH header records and use the Kindle serial number to calculate the book pid.
|
# Parse the EXTH header records and use the Kindle serial number to calculate the book pid.
|
||||||
def getKindlePid(pidlst, rec209, token, serialnum):
|
def getKindlePid(pidlst, rec209, token, serialnum):
|
||||||
|
# Compute book PID
|
||||||
if rec209 != None and token != None:
|
pidHash = SHA1(serialnum+rec209+token)
|
||||||
# Compute book PID
|
bookPID = encodePID(pidHash)
|
||||||
pidHash = SHA1(serialnum+rec209+token)
|
bookPID = checksumPid(bookPID)
|
||||||
bookPID = encodePID(pidHash)
|
pidlst.append(bookPID)
|
||||||
bookPID = checksumPid(bookPID)
|
|
||||||
pidlst.append(bookPID)
|
|
||||||
|
|
||||||
# compute fixed pid for old pre 2.5 firmware update pid as well
|
# compute fixed pid for old pre 2.5 firmware update pid as well
|
||||||
bookPID = pidFromSerial(serialnum, 7) + "*"
|
bookPID = pidFromSerial(serialnum, 7) + "*"
|
||||||
@@ -240,15 +191,15 @@ def getKindlePid(pidlst, rec209, token, serialnum):
|
|||||||
return pidlst
|
return pidlst
|
||||||
|
|
||||||
|
|
||||||
# Parse the EXTH header records and parse the Kindleinfo
|
# parse the Kindleinfo file to calculate the book pid.
|
||||||
# file to calculate the book pid.
|
|
||||||
|
|
||||||
def getK4Pids(pidlst, rec209, token, kInfoFile=None):
|
keynames = ["kindle.account.tokens","kindle.cookie.item","eulaVersionAccepted","login_date","kindle.token.item","login","kindle.key.item","kindle.name.info","kindle.device.info", "MazamaRandomNumber"]
|
||||||
global kindleDatabase
|
|
||||||
|
def getK4Pids(pidlst, rec209, token, kInfoFile):
|
||||||
global charMap1
|
global charMap1
|
||||||
kindleDatabase = None
|
kindleDatabase = None
|
||||||
try:
|
try:
|
||||||
kindleDatabase = parseKindleInfo(kInfoFile)
|
kindleDatabase = getDBfromFile(kInfoFile)
|
||||||
except Exception, message:
|
except Exception, message:
|
||||||
print(message)
|
print(message)
|
||||||
kindleDatabase = None
|
kindleDatabase = None
|
||||||
@@ -257,17 +208,24 @@ def getK4Pids(pidlst, rec209, token, kInfoFile=None):
|
|||||||
if kindleDatabase == None :
|
if kindleDatabase == None :
|
||||||
return pidlst
|
return pidlst
|
||||||
|
|
||||||
# Get the Mazama Random number
|
try:
|
||||||
MazamaRandomNumber = getKindleInfoValueForKey("MazamaRandomNumber")
|
# Get the Mazama Random number
|
||||||
|
MazamaRandomNumber = kindleDatabase["MazamaRandomNumber"]
|
||||||
|
|
||||||
# Get the HDD serial
|
# Get the kindle account token
|
||||||
encodedSystemVolumeSerialNumber = encodeHash(GetVolumeSerialNumber(),charMap1)
|
kindleAccountToken = kindleDatabase["kindle.account.tokens"]
|
||||||
|
except KeyError:
|
||||||
|
print "Keys not found in " + kInfoFile
|
||||||
|
return pidlst
|
||||||
|
|
||||||
|
# Get the ID string used
|
||||||
|
encodedIDString = encodeHash(GetIDString(),charMap1)
|
||||||
|
|
||||||
# Get the current user name
|
# Get the current user name
|
||||||
encodedUsername = encodeHash(GetUserName(),charMap1)
|
encodedUsername = encodeHash(GetUserName(),charMap1)
|
||||||
|
|
||||||
# concat, hash and encode to calculate the DSN
|
# concat, hash and encode to calculate the DSN
|
||||||
DSN = encode(SHA1(MazamaRandomNumber+encodedSystemVolumeSerialNumber+encodedUsername),charMap1)
|
DSN = encode(SHA1(MazamaRandomNumber+encodedIDString+encodedUsername),charMap1)
|
||||||
|
|
||||||
# Compute the device PID (for which I can tell, is used for nothing).
|
# Compute the device PID (for which I can tell, is used for nothing).
|
||||||
table = generatePidEncryptionTable()
|
table = generatePidEncryptionTable()
|
||||||
@@ -275,13 +233,7 @@ def getK4Pids(pidlst, rec209, token, kInfoFile=None):
|
|||||||
devicePID = checksumPid(devicePID)
|
devicePID = checksumPid(devicePID)
|
||||||
pidlst.append(devicePID)
|
pidlst.append(devicePID)
|
||||||
|
|
||||||
# Compute book PID
|
# Compute book PIDs
|
||||||
if rec209 == None or token == None:
|
|
||||||
print "\nNo EXTH record type 209 or token - Perhaps not a K4 file?"
|
|
||||||
return pidlst
|
|
||||||
|
|
||||||
# Get the kindle account token
|
|
||||||
kindleAccountToken = getKindleInfoValueForKey("kindle.account.tokens")
|
|
||||||
|
|
||||||
# book pid
|
# book pid
|
||||||
pidHash = SHA1(DSN+kindleAccountToken+rec209+token)
|
pidHash = SHA1(DSN+kindleAccountToken+rec209+token)
|
||||||
@@ -305,8 +257,10 @@ def getK4Pids(pidlst, rec209, token, kInfoFile=None):
|
|||||||
|
|
||||||
def getPidList(md1, md2, k4, pids, serials, kInfoFiles):
|
def getPidList(md1, md2, k4, pids, serials, kInfoFiles):
|
||||||
pidlst = []
|
pidlst = []
|
||||||
|
if kInfoFiles is None:
|
||||||
|
kInfoFiles = []
|
||||||
if k4:
|
if k4:
|
||||||
pidlst = getK4Pids(pidlst, md1, md2)
|
kInfoFiles = getKindleInfoFiles(kInfoFiles)
|
||||||
for infoFile in kInfoFiles:
|
for infoFile in kInfoFiles:
|
||||||
pidlst = getK4Pids(pidlst, md1, md2, infoFile)
|
pidlst = getK4Pids(pidlst, md1, md2, infoFile)
|
||||||
for serialnum in serials:
|
for serialnum in serials:
|
||||||
|
|||||||
Binary file not shown.
Binary file not shown.
@@ -44,8 +44,20 @@
|
|||||||
# 0.22 - revised structure to hold MobiBook as a class to allow an extended interface
|
# 0.22 - revised structure to hold MobiBook as a class to allow an extended interface
|
||||||
# 0.23 - fixed problem with older files with no EXTH section
|
# 0.23 - fixed problem with older files with no EXTH section
|
||||||
# 0.24 - add support for type 1 encryption and 'TEXtREAd' books as well
|
# 0.24 - add support for type 1 encryption and 'TEXtREAd' books as well
|
||||||
|
# 0.25 - Fixed support for 'BOOKMOBI' type 1 encryption
|
||||||
|
# 0.26 - Now enables Text-To-Speech flag and sets clipping limit to 100%
|
||||||
|
# 0.27 - Correct pid metadata token generation to match that used by skindle (Thank You Bart!)
|
||||||
|
# 0.28 - slight additional changes to metadata token generation (None -> '')
|
||||||
|
# 0.29 - It seems that the ideas about when multibyte trailing characters were
|
||||||
|
# included in the encryption were wrong. They are for DOC compressed
|
||||||
|
# files, but they are not for HUFF/CDIC compress files!
|
||||||
|
# 0.30 - Modified interface slightly to work better with new calibre plugin style
|
||||||
|
# 0.31 - The multibyte encrytion info is true for version 7 files too.
|
||||||
|
# 0.32 - Added support for "Print Replica" Kindle ebooks
|
||||||
|
# 0.33 - Performance improvements for large files (concatenation)
|
||||||
|
# 0.34 - Performance improvements in decryption (libalfcrypto)
|
||||||
|
|
||||||
__version__ = '0.24'
|
__version__ = '0.34'
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
@@ -62,6 +74,7 @@ sys.stdout=Unbuffered(sys.stdout)
|
|||||||
import os
|
import os
|
||||||
import struct
|
import struct
|
||||||
import binascii
|
import binascii
|
||||||
|
from alfcrypto import Pukall_Cipher
|
||||||
|
|
||||||
class DrmException(Exception):
|
class DrmException(Exception):
|
||||||
pass
|
pass
|
||||||
@@ -73,36 +86,37 @@ class DrmException(Exception):
|
|||||||
|
|
||||||
# Implementation of Pukall Cipher 1
|
# Implementation of Pukall Cipher 1
|
||||||
def PC1(key, src, decryption=True):
|
def PC1(key, src, decryption=True):
|
||||||
sum1 = 0;
|
return Pukall_Cipher().PC1(key,src,decryption)
|
||||||
sum2 = 0;
|
# sum1 = 0;
|
||||||
keyXorVal = 0;
|
# sum2 = 0;
|
||||||
if len(key)!=16:
|
# keyXorVal = 0;
|
||||||
print "Bad key length!"
|
# if len(key)!=16:
|
||||||
return None
|
# print "Bad key length!"
|
||||||
wkey = []
|
# return None
|
||||||
for i in xrange(8):
|
# wkey = []
|
||||||
wkey.append(ord(key[i*2])<<8 | ord(key[i*2+1]))
|
# for i in xrange(8):
|
||||||
dst = ""
|
# wkey.append(ord(key[i*2])<<8 | ord(key[i*2+1]))
|
||||||
for i in xrange(len(src)):
|
# dst = ""
|
||||||
temp1 = 0;
|
# for i in xrange(len(src)):
|
||||||
byteXorVal = 0;
|
# temp1 = 0;
|
||||||
for j in xrange(8):
|
# byteXorVal = 0;
|
||||||
temp1 ^= wkey[j]
|
# for j in xrange(8):
|
||||||
sum2 = (sum2+j)*20021 + sum1
|
# temp1 ^= wkey[j]
|
||||||
sum1 = (temp1*346)&0xFFFF
|
# sum2 = (sum2+j)*20021 + sum1
|
||||||
sum2 = (sum2+sum1)&0xFFFF
|
# sum1 = (temp1*346)&0xFFFF
|
||||||
temp1 = (temp1*20021+1)&0xFFFF
|
# sum2 = (sum2+sum1)&0xFFFF
|
||||||
byteXorVal ^= temp1 ^ sum2
|
# temp1 = (temp1*20021+1)&0xFFFF
|
||||||
curByte = ord(src[i])
|
# byteXorVal ^= temp1 ^ sum2
|
||||||
if not decryption:
|
# curByte = ord(src[i])
|
||||||
keyXorVal = curByte * 257;
|
# if not decryption:
|
||||||
curByte = ((curByte ^ (byteXorVal >> 8)) ^ byteXorVal) & 0xFF
|
# keyXorVal = curByte * 257;
|
||||||
if decryption:
|
# curByte = ((curByte ^ (byteXorVal >> 8)) ^ byteXorVal) & 0xFF
|
||||||
keyXorVal = curByte * 257;
|
# if decryption:
|
||||||
for j in xrange(8):
|
# keyXorVal = curByte * 257;
|
||||||
wkey[j] ^= keyXorVal;
|
# for j in xrange(8):
|
||||||
dst+=chr(curByte)
|
# wkey[j] ^= keyXorVal;
|
||||||
return dst
|
# dst+=chr(curByte)
|
||||||
|
# return dst
|
||||||
|
|
||||||
def checksumPid(s):
|
def checksumPid(s):
|
||||||
letters = "ABCDEFGHIJKLMNPQRSTUVWXYZ123456789"
|
letters = "ABCDEFGHIJKLMNPQRSTUVWXYZ123456789"
|
||||||
@@ -154,8 +168,12 @@ class MobiBook:
|
|||||||
return self.data_file[off:endoff]
|
return self.data_file[off:endoff]
|
||||||
|
|
||||||
def __init__(self, infile):
|
def __init__(self, infile):
|
||||||
|
print ('MobiDeDrm v%(__version__)s. '
|
||||||
|
'Copyright 2008-2011 The Dark Reverser et al.' % globals())
|
||||||
|
|
||||||
# initial sanity check on file
|
# initial sanity check on file
|
||||||
self.data_file = file(infile, 'rb').read()
|
self.data_file = file(infile, 'rb').read()
|
||||||
|
self.mobi_data = ''
|
||||||
self.header = self.data_file[0:78]
|
self.header = self.data_file[0:78]
|
||||||
if self.header[0x3C:0x3C+8] != 'BOOKMOBI' and self.header[0x3C:0x3C+8] != 'TEXtREAd':
|
if self.header[0x3C:0x3C+8] != 'BOOKMOBI' and self.header[0x3C:0x3C+8] != 'TEXtREAd':
|
||||||
raise DrmException("invalid file format")
|
raise DrmException("invalid file format")
|
||||||
@@ -173,6 +191,7 @@ class MobiBook:
|
|||||||
# parse information from section 0
|
# parse information from section 0
|
||||||
self.sect = self.loadSection(0)
|
self.sect = self.loadSection(0)
|
||||||
self.records, = struct.unpack('>H', self.sect[0x8:0x8+2])
|
self.records, = struct.unpack('>H', self.sect[0x8:0x8+2])
|
||||||
|
self.compression, = struct.unpack('>H', self.sect[0x0:0x0+2])
|
||||||
|
|
||||||
if self.magic == 'TEXtREAd':
|
if self.magic == 'TEXtREAd':
|
||||||
print "Book has format: ", self.magic
|
print "Book has format: ", self.magic
|
||||||
@@ -182,14 +201,15 @@ class MobiBook:
|
|||||||
self.meta_array = {}
|
self.meta_array = {}
|
||||||
return
|
return
|
||||||
self.mobi_length, = struct.unpack('>L',self.sect[0x14:0x18])
|
self.mobi_length, = struct.unpack('>L',self.sect[0x14:0x18])
|
||||||
|
self.mobi_codepage, = struct.unpack('>L',self.sect[0x1c:0x20])
|
||||||
self.mobi_version, = struct.unpack('>L',self.sect[0x68:0x6C])
|
self.mobi_version, = struct.unpack('>L',self.sect[0x68:0x6C])
|
||||||
print "MOBI header version = %d, length = %d" %(self.mobi_version, self.mobi_length)
|
print "MOBI header version = %d, length = %d" %(self.mobi_version, self.mobi_length)
|
||||||
self.extra_data_flags = 0
|
self.extra_data_flags = 0
|
||||||
if (self.mobi_length >= 0xE4) and (self.mobi_version >= 5):
|
if (self.mobi_length >= 0xE4) and (self.mobi_version >= 5):
|
||||||
self.extra_data_flags, = struct.unpack('>H', self.sect[0xF2:0xF4])
|
self.extra_data_flags, = struct.unpack('>H', self.sect[0xF2:0xF4])
|
||||||
print "Extra Data Flags = %d" % self.extra_data_flags
|
print "Extra Data Flags = %d" % self.extra_data_flags
|
||||||
if self.mobi_version < 7:
|
if (self.compression != 17480):
|
||||||
# multibyte utf8 data is included in the encryption for mobi_version 6 and below
|
# multibyte utf8 data is included in the encryption for PalmDoc compression
|
||||||
# so clear that byte so that we leave it to be decrypted.
|
# so clear that byte so that we leave it to be decrypted.
|
||||||
self.extra_data_flags &= 0xFFFE
|
self.extra_data_flags &= 0xFFFE
|
||||||
|
|
||||||
@@ -207,12 +227,25 @@ class MobiBook:
|
|||||||
type, size = struct.unpack('>II', exth[pos: pos + 8])
|
type, size = struct.unpack('>II', exth[pos: pos + 8])
|
||||||
content = exth[pos + 8: pos + size]
|
content = exth[pos + 8: pos + size]
|
||||||
self.meta_array[type] = content
|
self.meta_array[type] = content
|
||||||
|
# reset the text to speech flag and clipping limit, if present
|
||||||
|
if type == 401 and size == 9:
|
||||||
|
# set clipping limit to 100%
|
||||||
|
self.patchSection(0, "\144", 16 + self.mobi_length + pos + 8)
|
||||||
|
elif type == 404 and size == 9:
|
||||||
|
# make sure text to speech is enabled
|
||||||
|
self.patchSection(0, "\0", 16 + self.mobi_length + pos + 8)
|
||||||
|
# print type, size, content, content.encode('hex')
|
||||||
pos += size
|
pos += size
|
||||||
except:
|
except:
|
||||||
self.meta_array = {}
|
self.meta_array = {}
|
||||||
pass
|
pass
|
||||||
|
self.print_replica = False
|
||||||
|
|
||||||
def getBookTitle(self):
|
def getBookTitle(self):
|
||||||
|
codec_map = {
|
||||||
|
1252 : 'windows-1252',
|
||||||
|
65001 : 'utf-8',
|
||||||
|
}
|
||||||
title = ''
|
title = ''
|
||||||
if 503 in self.meta_array:
|
if 503 in self.meta_array:
|
||||||
title = self.meta_array[503]
|
title = self.meta_array[503]
|
||||||
@@ -223,21 +256,24 @@ class MobiBook:
|
|||||||
if title == '':
|
if title == '':
|
||||||
title = self.header[:32]
|
title = self.header[:32]
|
||||||
title = title.split("\0")[0]
|
title = title.split("\0")[0]
|
||||||
return title
|
codec = 'windows-1252'
|
||||||
|
if self.mobi_codepage in codec_map.keys():
|
||||||
|
codec = codec_map[self.mobi_codepage]
|
||||||
|
return unicode(title, codec).encode('utf-8')
|
||||||
|
|
||||||
def getPIDMetaInfo(self):
|
def getPIDMetaInfo(self):
|
||||||
rec209 = None
|
rec209 = ''
|
||||||
token = None
|
token = ''
|
||||||
if 209 in self.meta_array:
|
if 209 in self.meta_array:
|
||||||
rec209 = self.meta_array[209]
|
rec209 = self.meta_array[209]
|
||||||
data = rec209
|
data = rec209
|
||||||
# Parse the 209 data to find the the exth record with the token data.
|
# The 209 data comes in five byte groups. Interpret the last four bytes
|
||||||
# The last character of the 209 data points to the record with the token.
|
# of each group as a big endian unsigned integer to get a key value
|
||||||
# Always 208 from my experience, but I'll leave the logic in case that changes.
|
# if that key exists in the meta_array, append its contents to the token
|
||||||
for i in xrange(len(data)):
|
for i in xrange(0,len(data),5):
|
||||||
if ord(data[i]) != 0:
|
val, = struct.unpack('>I',data[i+1:i+5])
|
||||||
if self.meta_array[ord(data[i])] != None:
|
sval = self.meta_array.get(val,'')
|
||||||
token = self.meta_array[ord(data[i])]
|
token += sval
|
||||||
return rec209, token
|
return rec209, token
|
||||||
|
|
||||||
def patch(self, off, new):
|
def patch(self, off, new):
|
||||||
@@ -285,15 +321,29 @@ class MobiBook:
|
|||||||
break
|
break
|
||||||
return [found_key,pid]
|
return [found_key,pid]
|
||||||
|
|
||||||
|
def getMobiFile(self, outpath):
|
||||||
|
file(outpath,'wb').write(self.mobi_data)
|
||||||
|
|
||||||
|
def getPrintReplica(self):
|
||||||
|
return self.print_replica
|
||||||
|
|
||||||
def processBook(self, pidlist):
|
def processBook(self, pidlist):
|
||||||
crypto_type, = struct.unpack('>H', self.sect[0xC:0xC+2])
|
crypto_type, = struct.unpack('>H', self.sect[0xC:0xC+2])
|
||||||
print 'Crypto Type is: ', crypto_type
|
print 'Crypto Type is: ', crypto_type
|
||||||
self.crypto_type = crypto_type
|
self.crypto_type = crypto_type
|
||||||
if crypto_type == 0:
|
if crypto_type == 0:
|
||||||
print "This book is not encrypted."
|
print "This book is not encrypted."
|
||||||
return self.data_file
|
# we must still check for Print Replica
|
||||||
|
self.print_replica = (self.loadSection(1)[0:4] == '%MOP')
|
||||||
|
self.mobi_data = self.data_file
|
||||||
|
return
|
||||||
if crypto_type != 2 and crypto_type != 1:
|
if crypto_type != 2 and crypto_type != 1:
|
||||||
raise DrmException("Cannot decode unknown Mobipocket encryption type %d" % crypto_type)
|
raise DrmException("Cannot decode unknown Mobipocket encryption type %d" % crypto_type)
|
||||||
|
if 406 in self.meta_array:
|
||||||
|
data406 = self.meta_array[406]
|
||||||
|
val406, = struct.unpack('>Q',data406)
|
||||||
|
if val406 != 0:
|
||||||
|
raise DrmException("Cannot decode library or rented ebooks.")
|
||||||
|
|
||||||
goodpids = []
|
goodpids = []
|
||||||
for pid in pidlist:
|
for pid in pidlist:
|
||||||
@@ -308,8 +358,10 @@ class MobiBook:
|
|||||||
t1_keyvec = "QDCVEPMU675RUBSZ"
|
t1_keyvec = "QDCVEPMU675RUBSZ"
|
||||||
if self.magic == 'TEXtREAd':
|
if self.magic == 'TEXtREAd':
|
||||||
bookkey_data = self.sect[0x0E:0x0E+16]
|
bookkey_data = self.sect[0x0E:0x0E+16]
|
||||||
else:
|
elif self.mobi_version < 0:
|
||||||
bookkey_data = self.sect[0x90:0x90+16]
|
bookkey_data = self.sect[0x90:0x90+16]
|
||||||
|
else:
|
||||||
|
bookkey_data = self.sect[self.mobi_length+16:self.mobi_length+32]
|
||||||
pid = "00000000"
|
pid = "00000000"
|
||||||
found_key = PC1(t1_keyvec, bookkey_data)
|
found_key = PC1(t1_keyvec, bookkey_data)
|
||||||
else :
|
else :
|
||||||
@@ -335,46 +387,56 @@ class MobiBook:
|
|||||||
|
|
||||||
# decrypt sections
|
# decrypt sections
|
||||||
print "Decrypting. Please wait . . .",
|
print "Decrypting. Please wait . . .",
|
||||||
new_data = self.data_file[:self.sections[1][0]]
|
mobidataList = []
|
||||||
|
mobidataList.append(self.data_file[:self.sections[1][0]])
|
||||||
for i in xrange(1, self.records+1):
|
for i in xrange(1, self.records+1):
|
||||||
data = self.loadSection(i)
|
data = self.loadSection(i)
|
||||||
extra_size = getSizeOfTrailingDataEntries(data, len(data), self.extra_data_flags)
|
extra_size = getSizeOfTrailingDataEntries(data, len(data), self.extra_data_flags)
|
||||||
if i%100 == 0:
|
if i%100 == 0:
|
||||||
print ".",
|
print ".",
|
||||||
# print "record %d, extra_size %d" %(i,extra_size)
|
# print "record %d, extra_size %d" %(i,extra_size)
|
||||||
new_data += PC1(found_key, data[0:len(data) - extra_size])
|
decoded_data = PC1(found_key, data[0:len(data) - extra_size])
|
||||||
|
if i==1:
|
||||||
|
self.print_replica = (decoded_data[0:4] == '%MOP')
|
||||||
|
mobidataList.append(decoded_data)
|
||||||
if extra_size > 0:
|
if extra_size > 0:
|
||||||
new_data += data[-extra_size:]
|
mobidataList.append(data[-extra_size:])
|
||||||
if self.num_sections > self.records+1:
|
if self.num_sections > self.records+1:
|
||||||
new_data += self.data_file[self.sections[self.records+1][0]:]
|
mobidataList.append(self.data_file[self.sections[self.records+1][0]:])
|
||||||
self.data_file = new_data
|
self.mobi_data = "".join(mobidataList)
|
||||||
print "done"
|
print "done"
|
||||||
return self.data_file
|
return
|
||||||
|
|
||||||
def getUnencryptedBook(infile,pid):
|
def getUnencryptedBook(infile,pid):
|
||||||
if not os.path.isfile(infile):
|
if not os.path.isfile(infile):
|
||||||
raise DrmException('Input File Not Found')
|
raise DrmException('Input File Not Found')
|
||||||
book = MobiBook(infile)
|
book = MobiBook(infile)
|
||||||
return book.processBook([pid])
|
book.processBook([pid])
|
||||||
|
return book.mobi_data
|
||||||
|
|
||||||
def getUnencryptedBookWithList(infile,pidlist):
|
def getUnencryptedBookWithList(infile,pidlist):
|
||||||
if not os.path.isfile(infile):
|
if not os.path.isfile(infile):
|
||||||
raise DrmException('Input File Not Found')
|
raise DrmException('Input File Not Found')
|
||||||
book = MobiBook(infile)
|
book = MobiBook(infile)
|
||||||
return book.processBook(pidlist)
|
book.processBook(pidlist)
|
||||||
|
return book.mobi_data
|
||||||
|
|
||||||
|
|
||||||
def main(argv=sys.argv):
|
def main(argv=sys.argv):
|
||||||
print ('MobiDeDrm v%(__version__)s. '
|
print ('MobiDeDrm v%(__version__)s. '
|
||||||
'Copyright 2008-2010 The Dark Reverser.' % globals())
|
'Copyright 2008-2011 The Dark Reverser et al.' % globals())
|
||||||
if len(argv)<4:
|
if len(argv)<3 or len(argv)>4:
|
||||||
print "Removes protection from Mobipocket books"
|
print "Removes protection from Kindle/Mobipocket and Kindle/Print Replica ebooks"
|
||||||
print "Usage:"
|
print "Usage:"
|
||||||
print " %s <infile> <outfile> <Comma separated list of PIDs to try>" % sys.argv[0]
|
print " %s <infile> <outfile> [<Comma separated list of PIDs to try>]" % sys.argv[0]
|
||||||
return 1
|
return 1
|
||||||
else:
|
else:
|
||||||
infile = argv[1]
|
infile = argv[1]
|
||||||
outfile = argv[2]
|
outfile = argv[2]
|
||||||
pidlist = argv[3].split(',')
|
if len(argv) is 4:
|
||||||
|
pidlist = argv[3].split(',')
|
||||||
|
else:
|
||||||
|
pidlist = {}
|
||||||
try:
|
try:
|
||||||
stripped_file = getUnencryptedBookWithList(infile, pidlist)
|
stripped_file = getUnencryptedBookWithList(infile, pidlist)
|
||||||
file(outfile, 'wb').write(stripped_file)
|
file(outfile, 'wb').write(stripped_file)
|
||||||
|
|||||||
@@ -44,8 +44,18 @@
|
|||||||
# 0.22 - revised structure to hold MobiBook as a class to allow an extended interface
|
# 0.22 - revised structure to hold MobiBook as a class to allow an extended interface
|
||||||
# 0.23 - fixed problem with older files with no EXTH section
|
# 0.23 - fixed problem with older files with no EXTH section
|
||||||
# 0.24 - add support for type 1 encryption and 'TEXtREAd' books as well
|
# 0.24 - add support for type 1 encryption and 'TEXtREAd' books as well
|
||||||
|
# 0.25 - Fixed support for 'BOOKMOBI' type 1 encryption
|
||||||
|
# 0.26 - Now enables Text-To-Speech flag and sets clipping limit to 100%
|
||||||
|
# 0.27 - Correct pid metadata token generation to match that used by skindle (Thank You Bart!)
|
||||||
|
# 0.28 - slight additional changes to metadata token generation (None -> '')
|
||||||
|
# 0.29 - It seems that the ideas about when multibyte trailing characters were
|
||||||
|
# included in the encryption were wrong. They are for DOC compressed
|
||||||
|
# files, but they are not for HUFF/CDIC compress files!
|
||||||
|
# 0.30 - Modified interface slightly to work better with new calibre plugin style
|
||||||
|
# 0.31 - The multibyte encrytion info is true for version 7 files too.
|
||||||
|
# 0.32 - Added support for "Print Replica" Kindle ebooks
|
||||||
|
|
||||||
__version__ = '0.24'
|
__version__ = '0.32'
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
@@ -154,8 +164,12 @@ class MobiBook:
|
|||||||
return self.data_file[off:endoff]
|
return self.data_file[off:endoff]
|
||||||
|
|
||||||
def __init__(self, infile):
|
def __init__(self, infile):
|
||||||
|
print ('MobiDeDrm v%(__version__)s. '
|
||||||
|
'Copyright 2008-2011 The Dark Reverser et al.' % globals())
|
||||||
|
|
||||||
# initial sanity check on file
|
# initial sanity check on file
|
||||||
self.data_file = file(infile, 'rb').read()
|
self.data_file = file(infile, 'rb').read()
|
||||||
|
self.mobi_data = ''
|
||||||
self.header = self.data_file[0:78]
|
self.header = self.data_file[0:78]
|
||||||
if self.header[0x3C:0x3C+8] != 'BOOKMOBI' and self.header[0x3C:0x3C+8] != 'TEXtREAd':
|
if self.header[0x3C:0x3C+8] != 'BOOKMOBI' and self.header[0x3C:0x3C+8] != 'TEXtREAd':
|
||||||
raise DrmException("invalid file format")
|
raise DrmException("invalid file format")
|
||||||
@@ -173,6 +187,7 @@ class MobiBook:
|
|||||||
# parse information from section 0
|
# parse information from section 0
|
||||||
self.sect = self.loadSection(0)
|
self.sect = self.loadSection(0)
|
||||||
self.records, = struct.unpack('>H', self.sect[0x8:0x8+2])
|
self.records, = struct.unpack('>H', self.sect[0x8:0x8+2])
|
||||||
|
self.compression, = struct.unpack('>H', self.sect[0x0:0x0+2])
|
||||||
|
|
||||||
if self.magic == 'TEXtREAd':
|
if self.magic == 'TEXtREAd':
|
||||||
print "Book has format: ", self.magic
|
print "Book has format: ", self.magic
|
||||||
@@ -182,14 +197,15 @@ class MobiBook:
|
|||||||
self.meta_array = {}
|
self.meta_array = {}
|
||||||
return
|
return
|
||||||
self.mobi_length, = struct.unpack('>L',self.sect[0x14:0x18])
|
self.mobi_length, = struct.unpack('>L',self.sect[0x14:0x18])
|
||||||
|
self.mobi_codepage, = struct.unpack('>L',self.sect[0x1c:0x20])
|
||||||
self.mobi_version, = struct.unpack('>L',self.sect[0x68:0x6C])
|
self.mobi_version, = struct.unpack('>L',self.sect[0x68:0x6C])
|
||||||
print "MOBI header version = %d, length = %d" %(self.mobi_version, self.mobi_length)
|
print "MOBI header version = %d, length = %d" %(self.mobi_version, self.mobi_length)
|
||||||
self.extra_data_flags = 0
|
self.extra_data_flags = 0
|
||||||
if (self.mobi_length >= 0xE4) and (self.mobi_version >= 5):
|
if (self.mobi_length >= 0xE4) and (self.mobi_version >= 5):
|
||||||
self.extra_data_flags, = struct.unpack('>H', self.sect[0xF2:0xF4])
|
self.extra_data_flags, = struct.unpack('>H', self.sect[0xF2:0xF4])
|
||||||
print "Extra Data Flags = %d" % self.extra_data_flags
|
print "Extra Data Flags = %d" % self.extra_data_flags
|
||||||
if self.mobi_version < 7:
|
if (self.compression != 17480):
|
||||||
# multibyte utf8 data is included in the encryption for mobi_version 6 and below
|
# multibyte utf8 data is included in the encryption for PalmDoc compression
|
||||||
# so clear that byte so that we leave it to be decrypted.
|
# so clear that byte so that we leave it to be decrypted.
|
||||||
self.extra_data_flags &= 0xFFFE
|
self.extra_data_flags &= 0xFFFE
|
||||||
|
|
||||||
@@ -207,12 +223,25 @@ class MobiBook:
|
|||||||
type, size = struct.unpack('>II', exth[pos: pos + 8])
|
type, size = struct.unpack('>II', exth[pos: pos + 8])
|
||||||
content = exth[pos + 8: pos + size]
|
content = exth[pos + 8: pos + size]
|
||||||
self.meta_array[type] = content
|
self.meta_array[type] = content
|
||||||
|
# reset the text to speech flag and clipping limit, if present
|
||||||
|
if type == 401 and size == 9:
|
||||||
|
# set clipping limit to 100%
|
||||||
|
self.patchSection(0, "\144", 16 + self.mobi_length + pos + 8)
|
||||||
|
elif type == 404 and size == 9:
|
||||||
|
# make sure text to speech is enabled
|
||||||
|
self.patchSection(0, "\0", 16 + self.mobi_length + pos + 8)
|
||||||
|
# print type, size, content, content.encode('hex')
|
||||||
pos += size
|
pos += size
|
||||||
except:
|
except:
|
||||||
self.meta_array = {}
|
self.meta_array = {}
|
||||||
pass
|
pass
|
||||||
|
self.print_replica = False
|
||||||
|
|
||||||
def getBookTitle(self):
|
def getBookTitle(self):
|
||||||
|
codec_map = {
|
||||||
|
1252 : 'windows-1252',
|
||||||
|
65001 : 'utf-8',
|
||||||
|
}
|
||||||
title = ''
|
title = ''
|
||||||
if 503 in self.meta_array:
|
if 503 in self.meta_array:
|
||||||
title = self.meta_array[503]
|
title = self.meta_array[503]
|
||||||
@@ -223,21 +252,24 @@ class MobiBook:
|
|||||||
if title == '':
|
if title == '':
|
||||||
title = self.header[:32]
|
title = self.header[:32]
|
||||||
title = title.split("\0")[0]
|
title = title.split("\0")[0]
|
||||||
return title
|
codec = 'windows-1252'
|
||||||
|
if self.mobi_codepage in codec_map.keys():
|
||||||
|
codec = codec_map[self.mobi_codepage]
|
||||||
|
return unicode(title, codec).encode('utf-8')
|
||||||
|
|
||||||
def getPIDMetaInfo(self):
|
def getPIDMetaInfo(self):
|
||||||
rec209 = None
|
rec209 = ''
|
||||||
token = None
|
token = ''
|
||||||
if 209 in self.meta_array:
|
if 209 in self.meta_array:
|
||||||
rec209 = self.meta_array[209]
|
rec209 = self.meta_array[209]
|
||||||
data = rec209
|
data = rec209
|
||||||
# Parse the 209 data to find the the exth record with the token data.
|
# The 209 data comes in five byte groups. Interpret the last four bytes
|
||||||
# The last character of the 209 data points to the record with the token.
|
# of each group as a big endian unsigned integer to get a key value
|
||||||
# Always 208 from my experience, but I'll leave the logic in case that changes.
|
# if that key exists in the meta_array, append its contents to the token
|
||||||
for i in xrange(len(data)):
|
for i in xrange(0,len(data),5):
|
||||||
if ord(data[i]) != 0:
|
val, = struct.unpack('>I',data[i+1:i+5])
|
||||||
if self.meta_array[ord(data[i])] != None:
|
sval = self.meta_array.get(val,'')
|
||||||
token = self.meta_array[ord(data[i])]
|
token += sval
|
||||||
return rec209, token
|
return rec209, token
|
||||||
|
|
||||||
def patch(self, off, new):
|
def patch(self, off, new):
|
||||||
@@ -285,15 +317,29 @@ class MobiBook:
|
|||||||
break
|
break
|
||||||
return [found_key,pid]
|
return [found_key,pid]
|
||||||
|
|
||||||
|
def getMobiFile(self, outpath):
|
||||||
|
file(outpath,'wb').write(self.mobi_data)
|
||||||
|
|
||||||
|
def getPrintReplica(self):
|
||||||
|
return self.print_replica
|
||||||
|
|
||||||
def processBook(self, pidlist):
|
def processBook(self, pidlist):
|
||||||
crypto_type, = struct.unpack('>H', self.sect[0xC:0xC+2])
|
crypto_type, = struct.unpack('>H', self.sect[0xC:0xC+2])
|
||||||
print 'Crypto Type is: ', crypto_type
|
print 'Crypto Type is: ', crypto_type
|
||||||
self.crypto_type = crypto_type
|
self.crypto_type = crypto_type
|
||||||
if crypto_type == 0:
|
if crypto_type == 0:
|
||||||
print "This book is not encrypted."
|
print "This book is not encrypted."
|
||||||
return self.data_file
|
# we must still check for Print Replica
|
||||||
|
self.print_replica = (self.loadSection(1)[0:4] == '%MOP')
|
||||||
|
self.mobi_data = self.data_file
|
||||||
|
return
|
||||||
if crypto_type != 2 and crypto_type != 1:
|
if crypto_type != 2 and crypto_type != 1:
|
||||||
raise DrmException("Cannot decode unknown Mobipocket encryption type %d" % crypto_type)
|
raise DrmException("Cannot decode unknown Mobipocket encryption type %d" % crypto_type)
|
||||||
|
if 406 in self.meta_array:
|
||||||
|
data406 = self.meta_array[406]
|
||||||
|
val406, = struct.unpack('>Q',data406)
|
||||||
|
if val406 != 0:
|
||||||
|
raise DrmException("Cannot decode library or rented ebooks.")
|
||||||
|
|
||||||
goodpids = []
|
goodpids = []
|
||||||
for pid in pidlist:
|
for pid in pidlist:
|
||||||
@@ -308,8 +354,10 @@ class MobiBook:
|
|||||||
t1_keyvec = "QDCVEPMU675RUBSZ"
|
t1_keyvec = "QDCVEPMU675RUBSZ"
|
||||||
if self.magic == 'TEXtREAd':
|
if self.magic == 'TEXtREAd':
|
||||||
bookkey_data = self.sect[0x0E:0x0E+16]
|
bookkey_data = self.sect[0x0E:0x0E+16]
|
||||||
else:
|
elif self.mobi_version < 0:
|
||||||
bookkey_data = self.sect[0x90:0x90+16]
|
bookkey_data = self.sect[0x90:0x90+16]
|
||||||
|
else:
|
||||||
|
bookkey_data = self.sect[self.mobi_length+16:self.mobi_length+32]
|
||||||
pid = "00000000"
|
pid = "00000000"
|
||||||
found_key = PC1(t1_keyvec, bookkey_data)
|
found_key = PC1(t1_keyvec, bookkey_data)
|
||||||
else :
|
else :
|
||||||
@@ -335,46 +383,54 @@ class MobiBook:
|
|||||||
|
|
||||||
# decrypt sections
|
# decrypt sections
|
||||||
print "Decrypting. Please wait . . .",
|
print "Decrypting. Please wait . . .",
|
||||||
new_data = self.data_file[:self.sections[1][0]]
|
self.mobi_data = self.data_file[:self.sections[1][0]]
|
||||||
for i in xrange(1, self.records+1):
|
for i in xrange(1, self.records+1):
|
||||||
data = self.loadSection(i)
|
data = self.loadSection(i)
|
||||||
extra_size = getSizeOfTrailingDataEntries(data, len(data), self.extra_data_flags)
|
extra_size = getSizeOfTrailingDataEntries(data, len(data), self.extra_data_flags)
|
||||||
if i%100 == 0:
|
if i%100 == 0:
|
||||||
print ".",
|
print ".",
|
||||||
# print "record %d, extra_size %d" %(i,extra_size)
|
# print "record %d, extra_size %d" %(i,extra_size)
|
||||||
new_data += PC1(found_key, data[0:len(data) - extra_size])
|
decoded_data = PC1(found_key, data[0:len(data) - extra_size])
|
||||||
|
if i==1:
|
||||||
|
self.print_replica = (decoded_data[0:4] == '%MOP')
|
||||||
|
self.mobi_data += decoded_data
|
||||||
if extra_size > 0:
|
if extra_size > 0:
|
||||||
new_data += data[-extra_size:]
|
self.mobi_data += data[-extra_size:]
|
||||||
if self.num_sections > self.records+1:
|
if self.num_sections > self.records+1:
|
||||||
new_data += self.data_file[self.sections[self.records+1][0]:]
|
self.mobi_data += self.data_file[self.sections[self.records+1][0]:]
|
||||||
self.data_file = new_data
|
|
||||||
print "done"
|
print "done"
|
||||||
return self.data_file
|
return
|
||||||
|
|
||||||
def getUnencryptedBook(infile,pid):
|
def getUnencryptedBook(infile,pid):
|
||||||
if not os.path.isfile(infile):
|
if not os.path.isfile(infile):
|
||||||
raise DrmException('Input File Not Found')
|
raise DrmException('Input File Not Found')
|
||||||
book = MobiBook(infile)
|
book = MobiBook(infile)
|
||||||
return book.processBook([pid])
|
book.processBook([pid])
|
||||||
|
return book.mobi_data
|
||||||
|
|
||||||
def getUnencryptedBookWithList(infile,pidlist):
|
def getUnencryptedBookWithList(infile,pidlist):
|
||||||
if not os.path.isfile(infile):
|
if not os.path.isfile(infile):
|
||||||
raise DrmException('Input File Not Found')
|
raise DrmException('Input File Not Found')
|
||||||
book = MobiBook(infile)
|
book = MobiBook(infile)
|
||||||
return book.processBook(pidlist)
|
book.processBook(pidlist)
|
||||||
|
return book.mobi_data
|
||||||
|
|
||||||
|
|
||||||
def main(argv=sys.argv):
|
def main(argv=sys.argv):
|
||||||
print ('MobiDeDrm v%(__version__)s. '
|
print ('MobiDeDrm v%(__version__)s. '
|
||||||
'Copyright 2008-2010 The Dark Reverser.' % globals())
|
'Copyright 2008-2011 The Dark Reverser et al.' % globals())
|
||||||
if len(argv)<4:
|
if len(argv)<3 or len(argv)>4:
|
||||||
print "Removes protection from Mobipocket books"
|
print "Removes protection from Kindle/Mobipocket and Kindle/Print Replica ebooks"
|
||||||
print "Usage:"
|
print "Usage:"
|
||||||
print " %s <infile> <outfile> <Comma separated list of PIDs to try>" % sys.argv[0]
|
print " %s <infile> <outfile> [<Comma separated list of PIDs to try>]" % sys.argv[0]
|
||||||
return 1
|
return 1
|
||||||
else:
|
else:
|
||||||
infile = argv[1]
|
infile = argv[1]
|
||||||
outfile = argv[2]
|
outfile = argv[2]
|
||||||
pidlist = argv[3].split(',')
|
if len(argv) is 4:
|
||||||
|
pidlist = argv[3].split(',')
|
||||||
|
else:
|
||||||
|
pidlist = {}
|
||||||
try:
|
try:
|
||||||
stripped_file = getUnencryptedBookWithList(infile, pidlist)
|
stripped_file = getUnencryptedBookWithList(infile, pidlist)
|
||||||
file(outfile, 'wb').write(stripped_file)
|
file(outfile, 'wb').write(stripped_file)
|
||||||
@@ -87,4 +87,3 @@ def load_libcrypto():
|
|||||||
return ''.join(result)
|
return ''.join(result)
|
||||||
|
|
||||||
return DES
|
return DES
|
||||||
|
|
||||||
|
|||||||
@@ -0,0 +1,68 @@
|
|||||||
|
# A simple implementation of pbkdf2 using stock python modules. See RFC2898
|
||||||
|
# for details. Basically, it derives a key from a password and salt.
|
||||||
|
|
||||||
|
# Copyright 2004 Matt Johnston <matt @ ucc asn au>
|
||||||
|
# Copyright 2009 Daniel Holth <dholth@fastmail.fm>
|
||||||
|
# This code may be freely used and modified for any purpose.
|
||||||
|
|
||||||
|
# Revision history
|
||||||
|
# v0.1 October 2004 - Initial release
|
||||||
|
# v0.2 8 March 2007 - Make usable with hashlib in Python 2.5 and use
|
||||||
|
# v0.3 "" the correct digest_size rather than always 20
|
||||||
|
# v0.4 Oct 2009 - Rescue from chandler svn, test and optimize.
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import hmac
|
||||||
|
from struct import pack
|
||||||
|
try:
|
||||||
|
# only in python 2.5
|
||||||
|
import hashlib
|
||||||
|
sha = hashlib.sha1
|
||||||
|
md5 = hashlib.md5
|
||||||
|
sha256 = hashlib.sha256
|
||||||
|
except ImportError: # pragma: NO COVERAGE
|
||||||
|
# fallback
|
||||||
|
import sha
|
||||||
|
import md5
|
||||||
|
|
||||||
|
# this is what you want to call.
|
||||||
|
def pbkdf2( password, salt, itercount, keylen, hashfn = sha ):
|
||||||
|
try:
|
||||||
|
# depending whether the hashfn is from hashlib or sha/md5
|
||||||
|
digest_size = hashfn().digest_size
|
||||||
|
except TypeError: # pragma: NO COVERAGE
|
||||||
|
digest_size = hashfn.digest_size
|
||||||
|
# l - number of output blocks to produce
|
||||||
|
l = keylen / digest_size
|
||||||
|
if keylen % digest_size != 0:
|
||||||
|
l += 1
|
||||||
|
|
||||||
|
h = hmac.new( password, None, hashfn )
|
||||||
|
|
||||||
|
T = ""
|
||||||
|
for i in range(1, l+1):
|
||||||
|
T += pbkdf2_F( h, salt, itercount, i )
|
||||||
|
|
||||||
|
return T[0: keylen]
|
||||||
|
|
||||||
|
def xorstr( a, b ):
|
||||||
|
if len(a) != len(b):
|
||||||
|
raise ValueError("xorstr(): lengths differ")
|
||||||
|
return ''.join((chr(ord(x)^ord(y)) for x, y in zip(a, b)))
|
||||||
|
|
||||||
|
def prf( h, data ):
|
||||||
|
hm = h.copy()
|
||||||
|
hm.update( data )
|
||||||
|
return hm.digest()
|
||||||
|
|
||||||
|
# Helper as per the spec. h is a hmac which has been created seeded with the
|
||||||
|
# password, it will be copy()ed and not modified.
|
||||||
|
def pbkdf2_F( h, salt, itercount, blocknum ):
|
||||||
|
U = prf( h, salt + pack('>i',blocknum ) )
|
||||||
|
T = U
|
||||||
|
|
||||||
|
for i in range(2, itercount+1):
|
||||||
|
U = prf( h, U )
|
||||||
|
T = xorstr( T, U )
|
||||||
|
|
||||||
|
return T
|
||||||
@@ -6,6 +6,7 @@ import csv
|
|||||||
import sys
|
import sys
|
||||||
import os
|
import os
|
||||||
import getopt
|
import getopt
|
||||||
|
import re
|
||||||
from struct import pack
|
from struct import pack
|
||||||
from struct import unpack
|
from struct import unpack
|
||||||
|
|
||||||
@@ -81,6 +82,21 @@ class DocParser(object):
|
|||||||
pos = foundpos + 1
|
pos = foundpos + 1
|
||||||
return startpos
|
return startpos
|
||||||
|
|
||||||
|
# returns a vector of integers for the tagpath
|
||||||
|
def getData(self, tagpath, pos, end, clean=False):
|
||||||
|
if clean:
|
||||||
|
digits_only = re.compile(r'''([0-9]+)''')
|
||||||
|
argres=[]
|
||||||
|
(foundat, argt) = self.findinDoc(tagpath, pos, end)
|
||||||
|
if (argt != None) and (len(argt) > 0) :
|
||||||
|
argList = argt.split('|')
|
||||||
|
for strval in argList:
|
||||||
|
if clean:
|
||||||
|
m = re.search(digits_only, strval)
|
||||||
|
if m != None:
|
||||||
|
strval = m.group()
|
||||||
|
argres.append(int(strval))
|
||||||
|
return argres
|
||||||
|
|
||||||
def process(self):
|
def process(self):
|
||||||
|
|
||||||
@@ -237,7 +253,11 @@ def convert2CSS(flatxml, fontsize, ph, pw):
|
|||||||
|
|
||||||
# create a document parser
|
# create a document parser
|
||||||
dp = DocParser(flatxml, fontsize, ph, pw)
|
dp = DocParser(flatxml, fontsize, ph, pw)
|
||||||
|
|
||||||
csspage = dp.process()
|
csspage = dp.process()
|
||||||
|
|
||||||
return csspage
|
return csspage
|
||||||
|
|
||||||
|
|
||||||
|
def getpageIDMap(flatxml):
|
||||||
|
dp = DocParser(flatxml, 0, 0, 0)
|
||||||
|
pageidnumbers = dp.getData('info.original.pid', 0, -1, True)
|
||||||
|
return pageidnumbers
|
||||||
|
|||||||
@@ -52,7 +52,7 @@ class Process(object):
|
|||||||
self.__stdout_thread = threading.Thread(
|
self.__stdout_thread = threading.Thread(
|
||||||
name="stdout-thread",
|
name="stdout-thread",
|
||||||
target=self.__reader, args=(self.__collected_outdata,
|
target=self.__reader, args=(self.__collected_outdata,
|
||||||
self.__process.stdout))
|
self.__process.stdout))
|
||||||
self.__stdout_thread.setDaemon(True)
|
self.__stdout_thread.setDaemon(True)
|
||||||
self.__stdout_thread.start()
|
self.__stdout_thread.start()
|
||||||
|
|
||||||
@@ -60,7 +60,7 @@ class Process(object):
|
|||||||
self.__stderr_thread = threading.Thread(
|
self.__stderr_thread = threading.Thread(
|
||||||
name="stderr-thread",
|
name="stderr-thread",
|
||||||
target=self.__reader, args=(self.__collected_errdata,
|
target=self.__reader, args=(self.__collected_errdata,
|
||||||
self.__process.stderr))
|
self.__process.stderr))
|
||||||
self.__stderr_thread.setDaemon(True)
|
self.__stderr_thread.setDaemon(True)
|
||||||
self.__stderr_thread.start()
|
self.__stderr_thread.start()
|
||||||
|
|
||||||
@@ -146,4 +146,3 @@ class Process(object):
|
|||||||
self.__quit = True
|
self.__quit = True
|
||||||
self.__inputsem.release()
|
self.__inputsem.release()
|
||||||
self.__lock.release()
|
self.__lock.release()
|
||||||
|
|
||||||
|
|||||||
@@ -10,18 +10,46 @@ class Unbuffered:
|
|||||||
return getattr(self.stream, attr)
|
return getattr(self.stream, attr)
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
sys.stdout=Unbuffered(sys.stdout)
|
|
||||||
|
if 'calibre' in sys.modules:
|
||||||
|
inCalibre = True
|
||||||
|
else:
|
||||||
|
inCalibre = False
|
||||||
|
|
||||||
import os, csv, getopt
|
import os, csv, getopt
|
||||||
import zlib, zipfile, tempfile, shutil
|
import zlib, zipfile, tempfile, shutil
|
||||||
from struct import pack
|
from struct import pack
|
||||||
from struct import unpack
|
from struct import unpack
|
||||||
|
from alfcrypto import Topaz_Cipher
|
||||||
|
|
||||||
class TpzDRMError(Exception):
|
class TpzDRMError(Exception):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
# local support routines
|
# local support routines
|
||||||
import kgenpids
|
if inCalibre:
|
||||||
import genbook
|
from calibre_plugins.k4mobidedrm import kgenpids
|
||||||
|
from calibre_plugins.k4mobidedrm import genbook
|
||||||
|
else:
|
||||||
|
import kgenpids
|
||||||
|
import genbook
|
||||||
|
|
||||||
|
|
||||||
|
# recursive zip creation support routine
|
||||||
|
def zipUpDir(myzip, tdir, localname):
|
||||||
|
currentdir = tdir
|
||||||
|
if localname != "":
|
||||||
|
currentdir = os.path.join(currentdir,localname)
|
||||||
|
list = os.listdir(currentdir)
|
||||||
|
for file in list:
|
||||||
|
afilename = file
|
||||||
|
localfilePath = os.path.join(localname, afilename)
|
||||||
|
realfilePath = os.path.join(currentdir,file)
|
||||||
|
if os.path.isfile(realfilePath):
|
||||||
|
myzip.write(realfilePath, localfilePath)
|
||||||
|
elif os.path.isdir(realfilePath):
|
||||||
|
zipUpDir(myzip, tdir, localfilePath)
|
||||||
|
|
||||||
#
|
#
|
||||||
# Utility routines
|
# Utility routines
|
||||||
#
|
#
|
||||||
@@ -31,8 +59,8 @@ def bookReadEncodedNumber(fo):
|
|||||||
flag = False
|
flag = False
|
||||||
data = ord(fo.read(1))
|
data = ord(fo.read(1))
|
||||||
if data == 0xFF:
|
if data == 0xFF:
|
||||||
flag = True
|
flag = True
|
||||||
data = ord(fo.read(1))
|
data = ord(fo.read(1))
|
||||||
if data >= 0x80:
|
if data >= 0x80:
|
||||||
datax = (data & 0x7F)
|
datax = (data & 0x7F)
|
||||||
while data >= 0x80 :
|
while data >= 0x80 :
|
||||||
@@ -40,7 +68,7 @@ def bookReadEncodedNumber(fo):
|
|||||||
datax = (datax <<7) + (data & 0x7F)
|
datax = (datax <<7) + (data & 0x7F)
|
||||||
data = datax
|
data = datax
|
||||||
if flag:
|
if flag:
|
||||||
data = -data
|
data = -data
|
||||||
return data
|
return data
|
||||||
|
|
||||||
# Get a length prefixed string from file
|
# Get a length prefixed string from file
|
||||||
@@ -54,25 +82,28 @@ def bookReadString(fo):
|
|||||||
|
|
||||||
# Context initialisation for the Topaz Crypto
|
# Context initialisation for the Topaz Crypto
|
||||||
def topazCryptoInit(key):
|
def topazCryptoInit(key):
|
||||||
ctx1 = 0x0CAFFE19E
|
return Topaz_Cipher().ctx_init(key)
|
||||||
for keyChar in key:
|
|
||||||
keyByte = ord(keyChar)
|
# ctx1 = 0x0CAFFE19E
|
||||||
ctx2 = ctx1
|
# for keyChar in key:
|
||||||
ctx1 = ((((ctx1 >>2) * (ctx1 >>7))&0xFFFFFFFF) ^ (keyByte * keyByte * 0x0F902007)& 0xFFFFFFFF )
|
# keyByte = ord(keyChar)
|
||||||
return [ctx1,ctx2]
|
# ctx2 = ctx1
|
||||||
|
# ctx1 = ((((ctx1 >>2) * (ctx1 >>7))&0xFFFFFFFF) ^ (keyByte * keyByte * 0x0F902007)& 0xFFFFFFFF )
|
||||||
|
# return [ctx1,ctx2]
|
||||||
|
|
||||||
# decrypt data with the context prepared by topazCryptoInit()
|
# decrypt data with the context prepared by topazCryptoInit()
|
||||||
def topazCryptoDecrypt(data, ctx):
|
def topazCryptoDecrypt(data, ctx):
|
||||||
ctx1 = ctx[0]
|
return Topaz_Cipher().decrypt(data, ctx)
|
||||||
ctx2 = ctx[1]
|
# ctx1 = ctx[0]
|
||||||
plainText = ""
|
# ctx2 = ctx[1]
|
||||||
for dataChar in data:
|
# plainText = ""
|
||||||
dataByte = ord(dataChar)
|
# for dataChar in data:
|
||||||
m = (dataByte ^ ((ctx1 >> 3) &0xFF) ^ ((ctx2<<3) & 0xFF)) &0xFF
|
# dataByte = ord(dataChar)
|
||||||
ctx2 = ctx1
|
# m = (dataByte ^ ((ctx1 >> 3) &0xFF) ^ ((ctx2<<3) & 0xFF)) &0xFF
|
||||||
ctx1 = (((ctx1 >> 2) * (ctx1 >> 7)) &0xFFFFFFFF) ^((m * m * 0x0F902007) &0xFFFFFFFF)
|
# ctx2 = ctx1
|
||||||
plainText += chr(m)
|
# ctx1 = (((ctx1 >> 2) * (ctx1 >> 7)) &0xFFFFFFFF) ^((m * m * 0x0F902007) &0xFFFFFFFF)
|
||||||
return plainText
|
# plainText += chr(m)
|
||||||
|
# return plainText
|
||||||
|
|
||||||
# Decrypt data with the PID
|
# Decrypt data with the PID
|
||||||
def decryptRecord(data,PID):
|
def decryptRecord(data,PID):
|
||||||
@@ -110,9 +141,10 @@ def decryptDkeyRecords(data,PID):
|
|||||||
|
|
||||||
|
|
||||||
class TopazBook:
|
class TopazBook:
|
||||||
def __init__(self, filename, outdir):
|
def __init__(self, filename):
|
||||||
self.fo = file(filename, 'rb')
|
self.fo = file(filename, 'rb')
|
||||||
self.outdir = outdir
|
self.outdir = tempfile.mkdtemp()
|
||||||
|
# self.outdir = 'rawdat'
|
||||||
self.bookPayloadOffset = 0
|
self.bookPayloadOffset = 0
|
||||||
self.bookHeaderRecords = {}
|
self.bookHeaderRecords = {}
|
||||||
self.bookMetadata = {}
|
self.bookMetadata = {}
|
||||||
@@ -157,18 +189,22 @@ class TopazBook:
|
|||||||
raise TpzDRMError("Parse Error : Record Names Don't Match")
|
raise TpzDRMError("Parse Error : Record Names Don't Match")
|
||||||
flags = ord(self.fo.read(1))
|
flags = ord(self.fo.read(1))
|
||||||
nbRecords = ord(self.fo.read(1))
|
nbRecords = ord(self.fo.read(1))
|
||||||
|
# print nbRecords
|
||||||
for i in range (0,nbRecords) :
|
for i in range (0,nbRecords) :
|
||||||
record = [bookReadString(self.fo), bookReadString(self.fo)]
|
keyval = bookReadString(self.fo)
|
||||||
self.bookMetadata[record[0]] = record[1]
|
content = bookReadString(self.fo)
|
||||||
|
# print keyval
|
||||||
|
# print content
|
||||||
|
self.bookMetadata[keyval] = content
|
||||||
return self.bookMetadata
|
return self.bookMetadata
|
||||||
|
|
||||||
def getPIDMetaInfo(self):
|
def getPIDMetaInfo(self):
|
||||||
keysRecord = None
|
keysRecord = self.bookMetadata.get('keys','')
|
||||||
keysRecordRecord = None
|
keysRecordRecord = ''
|
||||||
if 'keys' in self.bookMetadata:
|
if keysRecord != '':
|
||||||
keysRecord = self.bookMetadata['keys']
|
keylst = keysRecord.split(',')
|
||||||
if keysRecord in self.bookMetadata:
|
for keyval in keylst:
|
||||||
keysRecordRecord = self.bookMetadata[keysRecord]
|
keysRecordRecord += self.bookMetadata.get(keyval,'')
|
||||||
return keysRecord, keysRecordRecord
|
return keysRecord, keysRecordRecord
|
||||||
|
|
||||||
def getBookTitle(self):
|
def getBookTitle(self):
|
||||||
@@ -313,21 +349,34 @@ class TopazBook:
|
|||||||
file(outputFile, 'wb').write(record)
|
file(outputFile, 'wb').write(record)
|
||||||
print " "
|
print " "
|
||||||
|
|
||||||
|
def getHTMLZip(self, zipname):
|
||||||
|
htmlzip = zipfile.ZipFile(zipname,'w',zipfile.ZIP_DEFLATED, False)
|
||||||
|
htmlzip.write(os.path.join(self.outdir,'book.html'),'book.html')
|
||||||
|
htmlzip.write(os.path.join(self.outdir,'book.opf'),'book.opf')
|
||||||
|
if os.path.isfile(os.path.join(self.outdir,'cover.jpg')):
|
||||||
|
htmlzip.write(os.path.join(self.outdir,'cover.jpg'),'cover.jpg')
|
||||||
|
htmlzip.write(os.path.join(self.outdir,'style.css'),'style.css')
|
||||||
|
zipUpDir(htmlzip, self.outdir, 'img')
|
||||||
|
htmlzip.close()
|
||||||
|
|
||||||
def zipUpDir(myzip, tempdir,localname):
|
def getSVGZip(self, zipname):
|
||||||
currentdir = tempdir
|
svgzip = zipfile.ZipFile(zipname,'w',zipfile.ZIP_DEFLATED, False)
|
||||||
if localname != "":
|
svgzip.write(os.path.join(self.outdir,'index_svg.xhtml'),'index_svg.xhtml')
|
||||||
currentdir = os.path.join(currentdir,localname)
|
zipUpDir(svgzip, self.outdir, 'svg')
|
||||||
list = os.listdir(currentdir)
|
zipUpDir(svgzip, self.outdir, 'img')
|
||||||
for file in list:
|
svgzip.close()
|
||||||
afilename = file
|
|
||||||
localfilePath = os.path.join(localname, afilename)
|
|
||||||
realfilePath = os.path.join(currentdir,file)
|
|
||||||
if os.path.isfile(realfilePath):
|
|
||||||
myzip.write(realfilePath, localfilePath)
|
|
||||||
elif os.path.isdir(realfilePath):
|
|
||||||
zipUpDir(myzip, tempdir, localfilePath)
|
|
||||||
|
|
||||||
|
def getXMLZip(self, zipname):
|
||||||
|
xmlzip = zipfile.ZipFile(zipname,'w',zipfile.ZIP_DEFLATED, False)
|
||||||
|
targetdir = os.path.join(self.outdir,'xml')
|
||||||
|
zipUpDir(xmlzip, targetdir, '')
|
||||||
|
zipUpDir(xmlzip, self.outdir, 'img')
|
||||||
|
xmlzip.close()
|
||||||
|
|
||||||
|
def cleanup(self):
|
||||||
|
if os.path.isdir(self.outdir):
|
||||||
|
pass
|
||||||
|
# shutil.rmtree(self.outdir, True)
|
||||||
|
|
||||||
def usage(progname):
|
def usage(progname):
|
||||||
print "Removes DRM protection from Topaz ebooks and extract the contents"
|
print "Removes DRM protection from Topaz ebooks and extract the contents"
|
||||||
@@ -379,58 +428,45 @@ def main(argv=sys.argv):
|
|||||||
return 1
|
return 1
|
||||||
|
|
||||||
bookname = os.path.splitext(os.path.basename(infile))[0]
|
bookname = os.path.splitext(os.path.basename(infile))[0]
|
||||||
tempdir = tempfile.mkdtemp()
|
|
||||||
|
|
||||||
tb = TopazBook(infile, tempdir)
|
tb = TopazBook(infile)
|
||||||
title = tb.getBookTitle()
|
title = tb.getBookTitle()
|
||||||
print "Processing Book: ", title
|
print "Processing Book: ", title
|
||||||
keysRecord, keysRecordRecord = tb.getPIDMetaInfo()
|
keysRecord, keysRecordRecord = tb.getPIDMetaInfo()
|
||||||
pidlst = kgenpids.getPidList(keysRecord, keysRecordRecord, k4, pids, serials, kInfoFiles)
|
pidlst = kgenpids.getPidList(keysRecord, keysRecordRecord, k4, pids, serials, kInfoFiles)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
print "Decrypting Book"
|
||||||
tb.processBook(pidlst)
|
tb.processBook(pidlst)
|
||||||
|
|
||||||
|
print " Creating HTML ZIP Archive"
|
||||||
|
zipname = os.path.join(outdir, bookname + '_nodrm' + '.htmlz')
|
||||||
|
tb.getHTMLZip(zipname)
|
||||||
|
|
||||||
|
print " Creating SVG ZIP Archive"
|
||||||
|
zipname = os.path.join(outdir, bookname + '_SVG' + '.zip')
|
||||||
|
tb.getSVGZip(zipname)
|
||||||
|
|
||||||
|
print " Creating XML ZIP Archive"
|
||||||
|
zipname = os.path.join(outdir, bookname + '_XML' + '.zip')
|
||||||
|
tb.getXMLZip(zipname)
|
||||||
|
|
||||||
|
# removing internal temporary directory of pieces
|
||||||
|
tb.cleanup()
|
||||||
|
|
||||||
except TpzDRMError, e:
|
except TpzDRMError, e:
|
||||||
print str(e)
|
print str(e)
|
||||||
print " Creating DeBug Full Zip Archive of Book"
|
# tb.cleanup()
|
||||||
zipname = os.path.join(outdir, bookname + '_debug' + '.zip')
|
|
||||||
myzip = zipfile.ZipFile(zipname,'w',zipfile.ZIP_DEFLATED, False)
|
|
||||||
zipUpDir(myzip, tempdir, '')
|
|
||||||
myzip.close()
|
|
||||||
shutil.rmtree(tempdir, True)
|
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
print " Creating HTML ZIP Archive"
|
except Exception, e:
|
||||||
zipname = os.path.join(outdir, bookname + '_nodrm' + '.zip')
|
print str(e)
|
||||||
myzip1 = zipfile.ZipFile(zipname,'w',zipfile.ZIP_DEFLATED, False)
|
# tb.cleanup
|
||||||
myzip1.write(os.path.join(tempdir,'book.html'),'book.html')
|
return 1
|
||||||
myzip1.write(os.path.join(tempdir,'book.opf'),'book.opf')
|
|
||||||
if os.path.isfile(os.path.join(tempdir,'cover.jpg')):
|
|
||||||
myzip1.write(os.path.join(tempdir,'cover.jpg'),'cover.jpg')
|
|
||||||
myzip1.write(os.path.join(tempdir,'style.css'),'style.css')
|
|
||||||
zipUpDir(myzip1, tempdir, 'img')
|
|
||||||
myzip1.close()
|
|
||||||
|
|
||||||
print " Creating SVG ZIP Archive"
|
|
||||||
zipname = os.path.join(outdir, bookname + '_SVG' + '.zip')
|
|
||||||
myzip2 = zipfile.ZipFile(zipname,'w',zipfile.ZIP_DEFLATED, False)
|
|
||||||
myzip2.write(os.path.join(tempdir,'index_svg.xhtml'),'index_svg.xhtml')
|
|
||||||
zipUpDir(myzip2, tempdir, 'svg')
|
|
||||||
zipUpDir(myzip2, tempdir, 'img')
|
|
||||||
myzip2.close()
|
|
||||||
|
|
||||||
print " Creating XML ZIP Archive"
|
|
||||||
zipname = os.path.join(outdir, bookname + '_XML' + '.zip')
|
|
||||||
myzip3 = zipfile.ZipFile(zipname,'w',zipfile.ZIP_DEFLATED, False)
|
|
||||||
targetdir = os.path.join(tempdir,'xml')
|
|
||||||
zipUpDir(myzip3, targetdir, '')
|
|
||||||
zipUpDir(myzip3, tempdir, 'img')
|
|
||||||
myzip3.close()
|
|
||||||
|
|
||||||
shutil.rmtree(tempdir, True)
|
|
||||||
|
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
sys.stdout=Unbuffered(sys.stdout)
|
||||||
sys.exit(main())
|
sys.exit(main())
|
||||||
|
|
||||||
|
|||||||
@@ -13,13 +13,24 @@ _FILENAME_LEN_OFFSET = 26
|
|||||||
_EXTRA_LEN_OFFSET = 28
|
_EXTRA_LEN_OFFSET = 28
|
||||||
_FILENAME_OFFSET = 30
|
_FILENAME_OFFSET = 30
|
||||||
_MAX_SIZE = 64 * 1024
|
_MAX_SIZE = 64 * 1024
|
||||||
|
_MIMETYPE = 'application/epub+zip'
|
||||||
|
|
||||||
|
class ZipInfo(zipfile.ZipInfo):
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
if 'compress_type' in kwargs:
|
||||||
|
compress_type = kwargs.pop('compress_type')
|
||||||
|
super(ZipInfo, self).__init__(*args, **kwargs)
|
||||||
|
self.compress_type = compress_type
|
||||||
|
|
||||||
class fixZip:
|
class fixZip:
|
||||||
def __init__(self, zinput, zoutput):
|
def __init__(self, zinput, zoutput):
|
||||||
|
self.ztype = 'zip'
|
||||||
|
if zinput.lower().find('.epub') >= 0 :
|
||||||
|
self.ztype = 'epub'
|
||||||
self.inzip = zipfile.ZipFile(zinput,'r')
|
self.inzip = zipfile.ZipFile(zinput,'r')
|
||||||
self.outzip = zipfile.ZipFile(zoutput,'w')
|
self.outzip = zipfile.ZipFile(zoutput,'w')
|
||||||
# open the input zip for reading only as a raw file
|
# open the input zip for reading only as a raw file
|
||||||
self.bzf = file(zinput,'rb')
|
self.bzf = file(zinput,'rb')
|
||||||
|
|
||||||
def getlocalname(self, zi):
|
def getlocalname(self, zi):
|
||||||
local_header_offset = zi.header_offset
|
local_header_offset = zi.header_offset
|
||||||
@@ -82,21 +93,28 @@ class fixZip:
|
|||||||
# and copy member over to output archive
|
# and copy member over to output archive
|
||||||
# if problems exist with local vs central filename, fix them
|
# if problems exist with local vs central filename, fix them
|
||||||
|
|
||||||
for zinfo in self.inzip.infolist():
|
# if epub write mimetype file first, with no compression
|
||||||
data = None
|
if self.ztype == 'epub':
|
||||||
nzinfo = zinfo
|
nzinfo = ZipInfo('mimetype', compress_type=zipfile.ZIP_STORED)
|
||||||
try:
|
self.outzip.writestr(nzinfo, _MIMETYPE)
|
||||||
data = self.inzip.read(zinfo.filename)
|
|
||||||
except zipfile.BadZipfile or zipfile.error:
|
|
||||||
local_name = self.getlocalname(zinfo)
|
|
||||||
data = self.getfiledata(zinfo)
|
|
||||||
nzinfo.filename = local_name
|
|
||||||
|
|
||||||
nzinfo.date_time = zinfo.date_time
|
# write the rest of the files
|
||||||
nzinfo.compress_type = zinfo.compress_type
|
for zinfo in self.inzip.infolist():
|
||||||
nzinfo.flag_bits = 0
|
if zinfo.filename != "mimetype" or self.ztype == '.zip':
|
||||||
nzinfo.internal_attr = 0
|
data = None
|
||||||
self.outzip.writestr(nzinfo,data)
|
nzinfo = zinfo
|
||||||
|
try:
|
||||||
|
data = self.inzip.read(zinfo.filename)
|
||||||
|
except zipfile.BadZipfile or zipfile.error:
|
||||||
|
local_name = self.getlocalname(zinfo)
|
||||||
|
data = self.getfiledata(zinfo)
|
||||||
|
nzinfo.filename = local_name
|
||||||
|
|
||||||
|
nzinfo.date_time = zinfo.date_time
|
||||||
|
nzinfo.compress_type = zinfo.compress_type
|
||||||
|
nzinfo.flag_bits = 0
|
||||||
|
nzinfo.internal_attr = 0
|
||||||
|
self.outzip.writestr(nzinfo,data)
|
||||||
|
|
||||||
self.bzf.close()
|
self.bzf.close()
|
||||||
self.inzip.close()
|
self.inzip.close()
|
||||||
@@ -110,14 +128,7 @@ def usage():
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
def main(argv=sys.argv):
|
def repairBook(infile, outfile):
|
||||||
if len(argv)!=3:
|
|
||||||
usage()
|
|
||||||
return 1
|
|
||||||
infile = None
|
|
||||||
outfile = None
|
|
||||||
infile = argv[1]
|
|
||||||
outfile = argv[2]
|
|
||||||
if not os.path.exists(infile):
|
if not os.path.exists(infile):
|
||||||
print "Error: Input Zip File does not exist"
|
print "Error: Input Zip File does not exist"
|
||||||
return 1
|
return 1
|
||||||
@@ -129,7 +140,15 @@ def main(argv=sys.argv):
|
|||||||
print "Error Occurred ", e
|
print "Error Occurred ", e
|
||||||
return 2
|
return 2
|
||||||
|
|
||||||
|
|
||||||
|
def main(argv=sys.argv):
|
||||||
|
if len(argv)!=3:
|
||||||
|
usage()
|
||||||
|
return 1
|
||||||
|
infile = argv[1]
|
||||||
|
outfile = argv[2]
|
||||||
|
return repairBook(infile, outfile)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__' :
|
if __name__ == '__main__' :
|
||||||
sys.exit(main())
|
sys.exit(main())
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
48
DeDRM_Macintosh_Application/ReadMe_DeDRM.app.rtf
Normal file
48
DeDRM_Macintosh_Application/ReadMe_DeDRM.app.rtf
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
{\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360
|
||||||
|
{\fonttbl\f0\fswiss\fcharset0 Helvetica;}
|
||||||
|
{\colortbl;\red255\green255\blue255;}
|
||||||
|
\paperw11900\paperh16840\margl1440\margr1440\vieww10320\viewh9840\viewkind0
|
||||||
|
\pard\tx566\tx1133\tx1700\tx2267\tx2834\tx3401\tx3968\tx4535\tx5102\tx5669\tx6236\tx6803\ql\qnatural\pardirnatural
|
||||||
|
|
||||||
|
\f0\b\fs24 \cf0 ReadMe_DeDRM_X.X
|
||||||
|
\b0 \
|
||||||
|
\
|
||||||
|
\pard\tx566\tx1133\tx1700\tx2267\tx2834\tx3401\tx3968\tx4535\tx5102\tx5669\tx6236\tx6803\qj\pardirnatural
|
||||||
|
\cf0 DeDRM_X.X is an AppleScript droplet that allows users to drag and drop ebooks or folders of ebooks onto the DeDRM droplet to have the DRM removed. It repackages the all the "tools" DeDRM python software in one easy to use program that remembers preferences and settings.\
|
||||||
|
\
|
||||||
|
It should work without manual configuration with Kindle for Mac ebooks and Adobe Adept epub and pdf ebooks.\
|
||||||
|
\
|
||||||
|
To remove the DRM from standalone Kindle ebooks, eReader pdb ebooks, Barnes and Noble epubs, and Mobipocket ebooks requires the user to double-click the DeDRM droplet and set some additional Preferences including:\
|
||||||
|
\
|
||||||
|
Kindle 16 digit Serial Number\
|
||||||
|
Barnes & Noble key files (bnepubkey.b64)\
|
||||||
|
eReader Social DRM: (Name:Last 8 digits of CC number)\
|
||||||
|
MobiPocket, Kindle for iPhone/iPad/iPodTouch 10 digit PID\
|
||||||
|
Location for DRM-free ebooks.\
|
||||||
|
\
|
||||||
|
Once these preferences have been set, the user can simply drag and drop ebooks onto the DeDRM droplet to remove the DRM.\
|
||||||
|
\
|
||||||
|
This program requires Mac OS X 10.5, 10.5 or 10.7 (Leopard, Snow Leopard or Lion)
\
|
||||||
|
\pard\tx566\tx1133\tx1700\tx2267\tx2834\tx3401\tx3968\tx4535\tx5102\tx5669\tx6236\tx6803\ql\qnatural\pardirnatural
|
||||||
|
\cf0 \
|
||||||
|
\
|
||||||
|
\
|
||||||
|
|
||||||
|
\b Installation\
|
||||||
|
|
||||||
|
\b0 \
|
||||||
|
1. From tools_vX.X\\DeDRM_Applications\\, double click on DeDRM_X.X.zip to extract its contents. \
|
||||||
|
\
|
||||||
|
2. Move the resulting DeDRM X.X.app AppleScript droplet to whereever you keep you other applications. (Typically your Applications folder.)\
|
||||||
|
\
|
||||||
|
3. Optionally drag it into your dock, to make it easily available.\
|
||||||
|
\
|
||||||
|
\
|
||||||
|
\
|
||||||
|
|
||||||
|
\b Use\
|
||||||
|
|
||||||
|
\b0 \
|
||||||
|
1. To set the preferences simply double-click the Applescript droplet in your Applications folder or click on its icon in your dock, and follow the instructions in the dialogs.\
|
||||||
|
\
|
||||||
|
2. Drag & Drop DRMed ebooks or folders containing DRMed ebooks onto the Application, either in your Applications folder, or the icon in your dock.}
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
echo off
|
||||||
|
set PWD=%~dp0
|
||||||
|
cd /d %PWD%\DeDRM_lib && start /min python DeDRM_app.pyw %*
|
||||||
|
exit
|
||||||
590
DeDRM_Windows_Application/DeDRM_WinApp/DeDRM_lib/DeDRM_app.pyw
Normal file
590
DeDRM_Windows_Application/DeDRM_WinApp/DeDRM_lib/DeDRM_app.pyw
Normal file
@@ -0,0 +1,590 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import os, os.path
|
||||||
|
sys.path.append(sys.path[0]+os.sep+'lib')
|
||||||
|
import shutil
|
||||||
|
import Tkinter
|
||||||
|
from Tkinter import *
|
||||||
|
import Tkconstants
|
||||||
|
import tkFileDialog
|
||||||
|
from scrolltextwidget import ScrolledText
|
||||||
|
from activitybar import ActivityBar
|
||||||
|
import subprocess
|
||||||
|
from subprocess import Popen, PIPE, STDOUT
|
||||||
|
import subasyncio
|
||||||
|
from subasyncio import Process
|
||||||
|
import re
|
||||||
|
import simpleprefs
|
||||||
|
|
||||||
|
|
||||||
|
__version__ = '5.0'
|
||||||
|
|
||||||
|
class DrmException(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
class MainApp(Tk):
|
||||||
|
def __init__(self, apphome, dnd=False, filenames=[]):
|
||||||
|
Tk.__init__(self)
|
||||||
|
self.withdraw()
|
||||||
|
self.dnd = dnd
|
||||||
|
self.apphome = apphome
|
||||||
|
# preference settings
|
||||||
|
# [dictionary key, file in preferences directory where info is stored]
|
||||||
|
description = [ ['pids' , 'pidlist.txt' ],
|
||||||
|
['serials', 'seriallist.txt'],
|
||||||
|
['sdrms' , 'sdrmlist.txt' ],
|
||||||
|
['outdir' , 'outdir.txt' ]]
|
||||||
|
self.po = simpleprefs.SimplePrefs('DeDRM',description)
|
||||||
|
if self.dnd:
|
||||||
|
self.cd = ConvDialog(self)
|
||||||
|
prefs = self.getPreferences()
|
||||||
|
self.cd.doit(prefs, filenames)
|
||||||
|
else:
|
||||||
|
prefs = self.getPreferences()
|
||||||
|
self.pd = PrefsDialog(self, prefs)
|
||||||
|
self.cd = ConvDialog(self)
|
||||||
|
self.pd.show()
|
||||||
|
|
||||||
|
def getPreferences(self):
|
||||||
|
prefs = self.po.getPreferences()
|
||||||
|
prefdir = prefs['dir']
|
||||||
|
keyfile = os.path.join(prefdir,'adeptkey.der')
|
||||||
|
if not os.path.exists(keyfile):
|
||||||
|
import ineptkey
|
||||||
|
try:
|
||||||
|
ineptkey.extractKeyfile(keyfile)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
return prefs
|
||||||
|
|
||||||
|
def setPreferences(self, newprefs):
|
||||||
|
prefdir = self.po.prefdir
|
||||||
|
if 'adkfile' in newprefs:
|
||||||
|
dfile = newprefs['adkfile']
|
||||||
|
fname = os.path.basename(dfile)
|
||||||
|
nfile = os.path.join(prefdir,fname)
|
||||||
|
if os.path.isfile(dfile):
|
||||||
|
shutil.copyfile(dfile,nfile)
|
||||||
|
if 'bnkfile' in newprefs:
|
||||||
|
dfile = newprefs['bnkfile']
|
||||||
|
fname = os.path.basename(dfile)
|
||||||
|
nfile = os.path.join(prefdir,fname)
|
||||||
|
if os.path.isfile(dfile):
|
||||||
|
shutil.copyfile(dfile,nfile)
|
||||||
|
if 'kinfofile' in newprefs:
|
||||||
|
dfile = newprefs['kinfofile']
|
||||||
|
fname = os.path.basename(dfile)
|
||||||
|
nfile = os.path.join(prefdir,fname)
|
||||||
|
if os.path.isfile(dfile):
|
||||||
|
shutil.copyfile(dfile,nfile)
|
||||||
|
self.po.setPreferences(newprefs)
|
||||||
|
return
|
||||||
|
|
||||||
|
def alldone(self):
|
||||||
|
if not self.dnd:
|
||||||
|
self.pd.enablebuttons()
|
||||||
|
else:
|
||||||
|
self.destroy()
|
||||||
|
|
||||||
|
class PrefsDialog(Toplevel):
|
||||||
|
def __init__(self, mainapp, prefs_array):
|
||||||
|
Toplevel.__init__(self, mainapp)
|
||||||
|
self.withdraw()
|
||||||
|
self.protocol("WM_DELETE_WINDOW", self.withdraw)
|
||||||
|
self.title("DeDRM")
|
||||||
|
self.prefs_array = prefs_array
|
||||||
|
self.status = Tkinter.Label(self, text='Setting Preferences')
|
||||||
|
self.status.pack(fill=Tkconstants.X, expand=1)
|
||||||
|
body = Tkinter.Frame(self)
|
||||||
|
self.body = body
|
||||||
|
body.pack(fill=Tkconstants.X, expand=1)
|
||||||
|
sticky = Tkconstants.E + Tkconstants.W
|
||||||
|
body.grid_columnconfigure(1, weight=2)
|
||||||
|
|
||||||
|
Tkinter.Label(body, text='Adept Key file (adeptkey.der)').grid(row=0, sticky=Tkconstants.E)
|
||||||
|
self.adkpath = Tkinter.Entry(body, width=50)
|
||||||
|
self.adkpath.grid(row=0, column=1, sticky=sticky)
|
||||||
|
prefdir = self.prefs_array['dir']
|
||||||
|
keyfile = os.path.join(prefdir,'adeptkey.der')
|
||||||
|
if os.path.isfile(keyfile):
|
||||||
|
path = keyfile
|
||||||
|
path = path.encode('utf-8')
|
||||||
|
self.adkpath.insert(0, path)
|
||||||
|
button = Tkinter.Button(body, text="...", command=self.get_adkpath)
|
||||||
|
button.grid(row=0, column=2)
|
||||||
|
|
||||||
|
Tkinter.Label(body, text='Barnes and Noble Key file (bnepubkey.b64)').grid(row=1, sticky=Tkconstants.E)
|
||||||
|
self.bnkpath = Tkinter.Entry(body, width=50)
|
||||||
|
self.bnkpath.grid(row=1, column=1, sticky=sticky)
|
||||||
|
prefdir = self.prefs_array['dir']
|
||||||
|
keyfile = os.path.join(prefdir,'bnepubkey.b64')
|
||||||
|
if os.path.isfile(keyfile):
|
||||||
|
path = keyfile
|
||||||
|
path = path.encode('utf-8')
|
||||||
|
self.bnkpath.insert(0, path)
|
||||||
|
button = Tkinter.Button(body, text="...", command=self.get_bnkpath)
|
||||||
|
button.grid(row=1, column=2)
|
||||||
|
|
||||||
|
Tkinter.Label(body, text='Additional kindle.info or .kinf file').grid(row=2, sticky=Tkconstants.E)
|
||||||
|
self.altinfopath = Tkinter.Entry(body, width=50)
|
||||||
|
self.altinfopath.grid(row=2, column=1, sticky=sticky)
|
||||||
|
prefdir = self.prefs_array['dir']
|
||||||
|
path = ''
|
||||||
|
infofile = os.path.join(prefdir,'kindle.info')
|
||||||
|
ainfofile = os.path.join(prefdir,'.kinf')
|
||||||
|
if os.path.isfile(infofile):
|
||||||
|
path = infofile
|
||||||
|
elif os.path.isfile(ainfofile):
|
||||||
|
path = ainfofile
|
||||||
|
path = path.encode('utf-8')
|
||||||
|
self.altinfopath.insert(0, path)
|
||||||
|
button = Tkinter.Button(body, text="...", command=self.get_altinfopath)
|
||||||
|
button.grid(row=2, column=2)
|
||||||
|
|
||||||
|
Tkinter.Label(body, text='PID list (10 characters, no spaces, comma separated)').grid(row=3, sticky=Tkconstants.E)
|
||||||
|
self.pidnums = Tkinter.StringVar()
|
||||||
|
self.pidinfo = Tkinter.Entry(body, width=50, textvariable=self.pidnums)
|
||||||
|
if 'pids' in self.prefs_array:
|
||||||
|
self.pidnums.set(self.prefs_array['pids'])
|
||||||
|
self.pidinfo.grid(row=3, column=1, sticky=sticky)
|
||||||
|
|
||||||
|
Tkinter.Label(body, text='Kindle Serial Number list (16 characters, no spaces, comma separated)').grid(row=4, sticky=Tkconstants.E)
|
||||||
|
self.sernums = Tkinter.StringVar()
|
||||||
|
self.serinfo = Tkinter.Entry(body, width=50, textvariable=self.sernums)
|
||||||
|
if 'serials' in self.prefs_array:
|
||||||
|
self.sernums.set(self.prefs_array['serials'])
|
||||||
|
self.serinfo.grid(row=4, column=1, sticky=sticky)
|
||||||
|
|
||||||
|
Tkinter.Label(body, text='eReader data list (name:last 8 digits on credit card, comma separated)').grid(row=5, sticky=Tkconstants.E)
|
||||||
|
self.sdrmnums = Tkinter.StringVar()
|
||||||
|
self.sdrminfo = Tkinter.Entry(body, width=50, textvariable=self.sdrmnums)
|
||||||
|
if 'sdrms' in self.prefs_array:
|
||||||
|
self.sdrmnums.set(self.prefs_array['sdrms'])
|
||||||
|
self.sdrminfo.grid(row=5, column=1, sticky=sticky)
|
||||||
|
|
||||||
|
Tkinter.Label(body, text="Output Folder (if blank, use input ebook's folder)").grid(row=6, sticky=Tkconstants.E)
|
||||||
|
self.outpath = Tkinter.Entry(body, width=50)
|
||||||
|
self.outpath.grid(row=6, column=1, sticky=sticky)
|
||||||
|
if 'outdir' in self.prefs_array:
|
||||||
|
dpath = self.prefs_array['outdir']
|
||||||
|
dpath = dpath.encode('utf-8')
|
||||||
|
self.outpath.insert(0, dpath)
|
||||||
|
button = Tkinter.Button(body, text="...", command=self.get_outpath)
|
||||||
|
button.grid(row=6, column=2)
|
||||||
|
|
||||||
|
Tkinter.Label(body, text='').grid(row=7, column=0, columnspan=2, sticky=Tkconstants.N)
|
||||||
|
|
||||||
|
Tkinter.Label(body, text='Alternatively Process an eBook').grid(row=8, column=0, columnspan=2, sticky=Tkconstants.N)
|
||||||
|
|
||||||
|
Tkinter.Label(body, text='Select an eBook to Process*').grid(row=9, sticky=Tkconstants.E)
|
||||||
|
self.bookpath = Tkinter.Entry(body, width=50)
|
||||||
|
self.bookpath.grid(row=9, column=1, sticky=sticky)
|
||||||
|
button = Tkinter.Button(body, text="...", command=self.get_bookpath)
|
||||||
|
button.grid(row=9, column=2)
|
||||||
|
|
||||||
|
Tkinter.Label(body, font=("Helvetica", "10", "italic"), text='*To DeDRM multiple ebooks simultaneously, set your preferences and quit.\nThen drag and drop ebooks or folders onto the DeDRM_Drop_Target').grid(row=10, column=1, sticky=Tkconstants.E)
|
||||||
|
|
||||||
|
Tkinter.Label(body, text='').grid(row=11, column=0, columnspan=2, sticky=Tkconstants.E)
|
||||||
|
|
||||||
|
buttons = Tkinter.Frame(self)
|
||||||
|
buttons.pack()
|
||||||
|
self.sbotton = Tkinter.Button(buttons, text="Set Prefs", width=14, command=self.setprefs)
|
||||||
|
self.sbotton.pack(side=Tkconstants.LEFT)
|
||||||
|
|
||||||
|
buttons.pack()
|
||||||
|
self.pbotton = Tkinter.Button(buttons, text="Process eBook", width=14, command=self.doit)
|
||||||
|
self.pbotton.pack(side=Tkconstants.LEFT)
|
||||||
|
buttons.pack()
|
||||||
|
self.qbotton = Tkinter.Button(buttons, text="Quit", width=14, command=self.quitting)
|
||||||
|
self.qbotton.pack(side=Tkconstants.RIGHT)
|
||||||
|
buttons.pack()
|
||||||
|
|
||||||
|
def disablebuttons(self):
|
||||||
|
self.sbotton.configure(state='disabled')
|
||||||
|
self.pbotton.configure(state='disabled')
|
||||||
|
self.qbotton.configure(state='disabled')
|
||||||
|
|
||||||
|
def enablebuttons(self):
|
||||||
|
self.sbotton.configure(state='normal')
|
||||||
|
self.pbotton.configure(state='normal')
|
||||||
|
self.qbotton.configure(state='normal')
|
||||||
|
|
||||||
|
def show(self):
|
||||||
|
self.deiconify()
|
||||||
|
self.tkraise()
|
||||||
|
|
||||||
|
def hide(self):
|
||||||
|
self.withdraw()
|
||||||
|
|
||||||
|
def get_outpath(self):
|
||||||
|
cpath = self.outpath.get()
|
||||||
|
outpath = tkFileDialog.askdirectory(
|
||||||
|
parent=None, title='Folder to Store Unencrypted file(s) into',
|
||||||
|
initialdir=cpath, initialfile=None)
|
||||||
|
if outpath:
|
||||||
|
outpath = os.path.normpath(outpath)
|
||||||
|
self.outpath.delete(0, Tkconstants.END)
|
||||||
|
self.outpath.insert(0, outpath)
|
||||||
|
return
|
||||||
|
|
||||||
|
def get_adkpath(self):
|
||||||
|
cpath = self.adkpath.get()
|
||||||
|
adkpath = tkFileDialog.askopenfilename(initialdir = cpath, parent=None, title='Select Adept Key file',
|
||||||
|
defaultextension='.der', filetypes=[('Adept Key file', '.der'), ('All Files', '.*')])
|
||||||
|
if adkpath:
|
||||||
|
adkpath = os.path.normpath(adkpath)
|
||||||
|
self.adkpath.delete(0, Tkconstants.END)
|
||||||
|
self.adkpath.insert(0, adkpath)
|
||||||
|
return
|
||||||
|
|
||||||
|
def get_bnkpath(self):
|
||||||
|
cpath = self.bnkpath.get()
|
||||||
|
bnkpath = tkFileDialog.askopenfilename(initialdir = cpath, parent=None, title='Select Barnes and Noble Key file',
|
||||||
|
defaultextension='.b64', filetypes=[('Barnes and Noble Key file', '.b64'), ('All Files', '.*')])
|
||||||
|
if bnkpath:
|
||||||
|
bnkpath = os.path.normpath(bnkpath)
|
||||||
|
self.bnkpath.delete(0, Tkconstants.END)
|
||||||
|
self.bnkpath.insert(0, bnkpath)
|
||||||
|
return
|
||||||
|
|
||||||
|
def get_altinfopath(self):
|
||||||
|
cpath = self.altinfopath.get()
|
||||||
|
altinfopath = tkFileDialog.askopenfilename(parent=None, title='Select Alternative kindle.info or .kinf File',
|
||||||
|
defaultextension='.info', filetypes=[('Kindle Info', '.info'),('Kindle KInf','.kinf')('All Files', '.*')],
|
||||||
|
initialdir=cpath)
|
||||||
|
if altinfopath:
|
||||||
|
altinfopath = os.path.normpath(altinfopath)
|
||||||
|
self.altinfopath.delete(0, Tkconstants.END)
|
||||||
|
self.altinfopath.insert(0, altinfopath)
|
||||||
|
return
|
||||||
|
|
||||||
|
def get_bookpath(self):
|
||||||
|
cpath = self.bookpath.get()
|
||||||
|
bookpath = tkFileDialog.askopenfilename(parent=None, title='Select eBook for DRM Removal',
|
||||||
|
filetypes=[('ePub Files','.epub'),
|
||||||
|
('Kindle','.azw'),
|
||||||
|
('Kindle','.azw1'),
|
||||||
|
('Kindle','.azw4'),
|
||||||
|
('Kindle','.tpz'),
|
||||||
|
('Kindle','.mobi'),
|
||||||
|
('Kindle','.prc'),
|
||||||
|
('eReader','.pdb'),
|
||||||
|
('PDF','.pdf'),
|
||||||
|
('All Files', '.*')],
|
||||||
|
initialdir=cpath)
|
||||||
|
if bookpath:
|
||||||
|
bookpath = os.path.normpath(bookpath)
|
||||||
|
self.bookpath.delete(0, Tkconstants.END)
|
||||||
|
self.bookpath.insert(0, bookpath)
|
||||||
|
return
|
||||||
|
|
||||||
|
def quitting(self):
|
||||||
|
self.master.destroy()
|
||||||
|
|
||||||
|
def setprefs(self):
|
||||||
|
# setting new prefereces
|
||||||
|
new_prefs = {}
|
||||||
|
prefdir = self.prefs_array['dir']
|
||||||
|
new_prefs['dir'] = prefdir
|
||||||
|
new_prefs['pids'] = self.pidinfo.get().strip()
|
||||||
|
new_prefs['serials'] = self.serinfo.get().strip()
|
||||||
|
new_prefs['sdrms'] = self.sdrminfo.get().strip()
|
||||||
|
new_prefs['outdir'] = self.outpath.get().strip()
|
||||||
|
adkpath = self.adkpath.get()
|
||||||
|
if os.path.dirname(adkpath) != prefdir:
|
||||||
|
new_prefs['adkfile'] = adkpath
|
||||||
|
bnkpath = self.bnkpath.get()
|
||||||
|
if os.path.dirname(bnkpath) != prefdir:
|
||||||
|
new_prefs['bnkfile'] = bnkpath
|
||||||
|
altinfopath = self.altinfopath.get()
|
||||||
|
if os.path.dirname(altinfopath) != prefdir:
|
||||||
|
new_prefs['kinfofile'] = altinfopath
|
||||||
|
self.master.setPreferences(new_prefs)
|
||||||
|
|
||||||
|
def doit(self):
|
||||||
|
self.disablebuttons()
|
||||||
|
filenames=[]
|
||||||
|
bookpath = self.bookpath.get()
|
||||||
|
bookpath = os.path.abspath(bookpath)
|
||||||
|
filenames.append(bookpath)
|
||||||
|
self.master.cd.doit(self.prefs_array,filenames)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class ConvDialog(Toplevel):
|
||||||
|
def __init__(self, master, prefs_array={}, filenames=[]):
|
||||||
|
Toplevel.__init__(self, master)
|
||||||
|
self.withdraw()
|
||||||
|
self.protocol("WM_DELETE_WINDOW", self.withdraw)
|
||||||
|
self.title("DeDRM Processing")
|
||||||
|
self.master = master
|
||||||
|
self.apphome = self.master.apphome
|
||||||
|
self.prefs_array = prefs_array
|
||||||
|
self.filenames = filenames
|
||||||
|
self.interval = 50
|
||||||
|
self.p2 = None
|
||||||
|
self.running = 'inactive'
|
||||||
|
self.numgood = 0
|
||||||
|
self.numbad = 0
|
||||||
|
self.log = ''
|
||||||
|
self.status = Tkinter.Label(self, text='DeDRM processing...')
|
||||||
|
self.status.pack(fill=Tkconstants.X, expand=1)
|
||||||
|
body = Tkinter.Frame(self)
|
||||||
|
body.pack(fill=Tkconstants.X, expand=1)
|
||||||
|
sticky = Tkconstants.E + Tkconstants.W
|
||||||
|
body.grid_columnconfigure(1, weight=2)
|
||||||
|
|
||||||
|
Tkinter.Label(body, text='Activity Bar').grid(row=0, sticky=Tkconstants.E)
|
||||||
|
self.bar = ActivityBar(body, length=80, height=15, barwidth=5)
|
||||||
|
self.bar.grid(row=0, column=1, sticky=sticky)
|
||||||
|
|
||||||
|
msg1 = ''
|
||||||
|
self.stext = ScrolledText(body, bd=5, relief=Tkconstants.RIDGE, height=4, width=80, wrap=Tkconstants.WORD)
|
||||||
|
self.stext.grid(row=2, column=0, columnspan=2,sticky=sticky)
|
||||||
|
self.stext.insert(Tkconstants.END,msg1)
|
||||||
|
|
||||||
|
buttons = Tkinter.Frame(self)
|
||||||
|
buttons.pack()
|
||||||
|
self.qbutton = Tkinter.Button(buttons, text="Quit", width=14, command=self.quitting)
|
||||||
|
self.qbutton.pack(side=Tkconstants.BOTTOM)
|
||||||
|
self.status['text'] = ''
|
||||||
|
|
||||||
|
def show(self):
|
||||||
|
self.deiconify()
|
||||||
|
self.tkraise()
|
||||||
|
|
||||||
|
def hide(self):
|
||||||
|
self.withdraw()
|
||||||
|
|
||||||
|
def doit(self, prefs, filenames):
|
||||||
|
self.running = 'inactive'
|
||||||
|
self.prefs_array = prefs
|
||||||
|
self.filenames = filenames
|
||||||
|
self.show()
|
||||||
|
self.processBooks()
|
||||||
|
|
||||||
|
def conversion_done(self):
|
||||||
|
self.hide()
|
||||||
|
self.master.alldone()
|
||||||
|
|
||||||
|
def processBooks(self):
|
||||||
|
while self.running == 'inactive':
|
||||||
|
rscpath = self.prefs_array['dir']
|
||||||
|
filename = None
|
||||||
|
if len(self.filenames) > 0:
|
||||||
|
filename = self.filenames.pop(0)
|
||||||
|
if filename == None:
|
||||||
|
msg = '\nComplete: '
|
||||||
|
msg += 'Successes: %d, ' % self.numgood
|
||||||
|
msg += 'Failures: %d\n' % self.numbad
|
||||||
|
self.showCmdOutput(msg)
|
||||||
|
if self.numbad == 0:
|
||||||
|
self.after(2000,self.conversion_done())
|
||||||
|
logfile = os.path.join(rscpath,'dedrm.log')
|
||||||
|
file(logfile,'w').write(self.log)
|
||||||
|
return
|
||||||
|
infile = filename
|
||||||
|
bname = os.path.basename(infile)
|
||||||
|
msg = 'Processing: ' + bname + ' ... '
|
||||||
|
self.log += msg
|
||||||
|
self.showCmdOutput(msg)
|
||||||
|
outdir = os.path.dirname(filename)
|
||||||
|
if 'outdir' in self.prefs_array:
|
||||||
|
dpath = self.prefs_array['outdir']
|
||||||
|
if dpath.strip() != '':
|
||||||
|
outdir = dpath
|
||||||
|
rv = self.decrypt_ebook(infile, outdir, rscpath)
|
||||||
|
if rv == 0:
|
||||||
|
self.bar.start()
|
||||||
|
self.running = 'active'
|
||||||
|
self.processPipe()
|
||||||
|
else:
|
||||||
|
msg = 'Unknown File: ' + bname + '\n'
|
||||||
|
self.log += msg
|
||||||
|
self.showCmdOutput(msg)
|
||||||
|
self.numbad += 1
|
||||||
|
|
||||||
|
def quitting(self):
|
||||||
|
# kill any still running subprocess
|
||||||
|
self.running = 'stopped'
|
||||||
|
if self.p2 != None:
|
||||||
|
if (self.p2.wait('nowait') == None):
|
||||||
|
self.p2.terminate()
|
||||||
|
self.conversion_done()
|
||||||
|
|
||||||
|
# post output from subprocess in scrolled text widget
|
||||||
|
def showCmdOutput(self, msg):
|
||||||
|
if msg and msg !='':
|
||||||
|
msg = msg.encode('utf-8')
|
||||||
|
if sys.platform.startswith('win'):
|
||||||
|
msg = msg.replace('\r\n','\n')
|
||||||
|
self.stext.insert(Tkconstants.END,msg)
|
||||||
|
self.stext.yview_pickplace(Tkconstants.END)
|
||||||
|
return
|
||||||
|
|
||||||
|
# read from subprocess pipe without blocking
|
||||||
|
# invoked every interval via the widget "after"
|
||||||
|
# option being used, so need to reset it for the next time
|
||||||
|
def processPipe(self):
|
||||||
|
if self.p2 == None:
|
||||||
|
# nothing to wait for so just return
|
||||||
|
return
|
||||||
|
poll = self.p2.wait('nowait')
|
||||||
|
if poll != None:
|
||||||
|
self.bar.stop()
|
||||||
|
if poll == 0:
|
||||||
|
msg = 'Success\n'
|
||||||
|
self.numgood += 1
|
||||||
|
text = self.p2.read()
|
||||||
|
text += self.p2.readerr()
|
||||||
|
self.log += text
|
||||||
|
self.log += msg
|
||||||
|
if poll != 0:
|
||||||
|
msg = 'Failed\n'
|
||||||
|
text = self.p2.read()
|
||||||
|
text += self.p2.readerr()
|
||||||
|
msg += text
|
||||||
|
msg += '\n'
|
||||||
|
self.numbad += 1
|
||||||
|
self.log += msg
|
||||||
|
self.showCmdOutput(msg)
|
||||||
|
self.p2 = None
|
||||||
|
self.running = 'inactive'
|
||||||
|
self.after(50,self.processBooks)
|
||||||
|
return
|
||||||
|
# make sure we get invoked again by event loop after interval
|
||||||
|
self.stext.after(self.interval,self.processPipe)
|
||||||
|
return
|
||||||
|
|
||||||
|
def decrypt_ebook(self, infile, outdir, rscpath):
|
||||||
|
apphome = self.apphome
|
||||||
|
rv = 1
|
||||||
|
name, ext = os.path.splitext(os.path.basename(infile))
|
||||||
|
ext = ext.lower()
|
||||||
|
if ext == '.epub':
|
||||||
|
self.p2 = processEPUB(apphome, infile, outdir, rscpath)
|
||||||
|
return 0
|
||||||
|
if ext == '.pdb':
|
||||||
|
self.p2 = processPDB(apphome, infile, outdir, rscpath)
|
||||||
|
return 0
|
||||||
|
if ext in ['.azw', '.azw1', '.azw4', '.prc', '.mobi', '.tpz']:
|
||||||
|
self.p2 = processK4MOBI(apphome, infile, outdir, rscpath)
|
||||||
|
return 0
|
||||||
|
if ext == '.pdf':
|
||||||
|
self.p2 = processPDF(apphome, infile, outdir, rscpath)
|
||||||
|
return 0
|
||||||
|
return rv
|
||||||
|
|
||||||
|
|
||||||
|
# run as a subprocess via pipes and collect stdout, stderr, and return value
|
||||||
|
def runit(apphome, ncmd, nparms):
|
||||||
|
cmdline = 'python ' + '"' + os.path.join(apphome, ncmd) + '" '
|
||||||
|
if sys.platform.startswith('win'):
|
||||||
|
search_path = os.environ['PATH']
|
||||||
|
search_path = search_path.lower()
|
||||||
|
if search_path.find('python') < 0:
|
||||||
|
# if no python hope that win registry finds what is associated with py extension
|
||||||
|
cmdline = '"' + os.path.join(apphome, ncmd) + '" '
|
||||||
|
cmdline += nparms
|
||||||
|
cmdline = cmdline.encode(sys.getfilesystemencoding())
|
||||||
|
p2 = subasyncio.Process(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
|
||||||
|
return p2
|
||||||
|
|
||||||
|
def processK4MOBI(apphome, infile, outdir, rscpath):
|
||||||
|
cmd = os.path.join('lib','k4mobidedrm.py')
|
||||||
|
parms = ''
|
||||||
|
pidnums = ''
|
||||||
|
pidspath = os.path.join(rscpath,'pidlist.txt')
|
||||||
|
if os.path.exists(pidspath):
|
||||||
|
pidnums = file(pidspath,'r').read()
|
||||||
|
pidnums = pidnums.rstrip(os.linesep)
|
||||||
|
if pidnums != '':
|
||||||
|
parms += '-p "' + pidnums + '" '
|
||||||
|
serialnums = ''
|
||||||
|
serialnumspath = os.path.join(rscpath,'seriallist.txt')
|
||||||
|
if os.path.exists(serialnumspath):
|
||||||
|
serialnums = file(serialnumspath,'r').read()
|
||||||
|
serialnums = serialnums.rstrip(os.linesep)
|
||||||
|
if serialnums != '':
|
||||||
|
parms += '-s "' + serialnums + '" '
|
||||||
|
|
||||||
|
files = os.listdir(rscpath)
|
||||||
|
filefilter = re.compile("\.info$|\.kinf$", re.IGNORECASE)
|
||||||
|
files = filter(filefilter.search, files)
|
||||||
|
if files:
|
||||||
|
for filename in files:
|
||||||
|
dpath = os.path.join(rscpath,filename)
|
||||||
|
parms += '-k "' + dpath + '" '
|
||||||
|
parms += '"' + infile +'" "' + outdir + '"'
|
||||||
|
p2 = runit(apphome, cmd, parms)
|
||||||
|
return p2
|
||||||
|
|
||||||
|
def processPDF(apphome, infile, outdir, rscpath):
|
||||||
|
cmd = os.path.join('lib','decryptpdf.py')
|
||||||
|
parms = '"' + infile + '" "' + outdir + '" "' + rscpath + '"'
|
||||||
|
p2 = runit(apphome, cmd, parms)
|
||||||
|
return p2
|
||||||
|
|
||||||
|
def processEPUB(apphome, infile, outdir, rscpath):
|
||||||
|
# invoke routine to check both Adept and Barnes and Noble
|
||||||
|
cmd = os.path.join('lib','decryptepub.py')
|
||||||
|
parms = '"' + infile + '" "' + outdir + '" "' + rscpath + '"'
|
||||||
|
p2 = runit(apphome, cmd, parms)
|
||||||
|
return p2
|
||||||
|
|
||||||
|
def processPDB(apphome, infile, outdir, rscpath):
|
||||||
|
cmd = os.path.join('lib','decryptpdb.py')
|
||||||
|
parms = '"' + infile + '" "' + outdir + '" "' + rscpath + '"'
|
||||||
|
p2 = runit(apphome, cmd, parms)
|
||||||
|
return p2
|
||||||
|
|
||||||
|
|
||||||
|
def main(argv=sys.argv):
|
||||||
|
apphome = os.path.dirname(sys.argv[0])
|
||||||
|
apphome = os.path.abspath(apphome)
|
||||||
|
|
||||||
|
# windows may pass a spurious quoted null string as argv[1] from bat file
|
||||||
|
# simply work around this until we can figure out a better way to handle things
|
||||||
|
if len(argv) == 2:
|
||||||
|
temp = argv[1]
|
||||||
|
temp = temp.strip('"')
|
||||||
|
temp = temp.strip()
|
||||||
|
if temp == '':
|
||||||
|
argv.pop()
|
||||||
|
|
||||||
|
if len(argv) == 1:
|
||||||
|
filenames = []
|
||||||
|
dnd = False
|
||||||
|
|
||||||
|
else : # processing books via drag and drop
|
||||||
|
dnd = True
|
||||||
|
# build a list of the files to be processed
|
||||||
|
infilelst = argv[1:]
|
||||||
|
filenames = []
|
||||||
|
for infile in infilelst:
|
||||||
|
infile = infile.replace('"','')
|
||||||
|
infile = os.path.abspath(infile)
|
||||||
|
if os.path.isdir(infile):
|
||||||
|
bpath = infile
|
||||||
|
filelst = os.listdir(infile)
|
||||||
|
for afile in filelst:
|
||||||
|
if not afile.startswith('.'):
|
||||||
|
filepath = os.path.join(bpath,afile)
|
||||||
|
if os.path.isfile(filepath):
|
||||||
|
filenames.append(filepath)
|
||||||
|
else :
|
||||||
|
afile = os.path.basename(infile)
|
||||||
|
if not afile.startswith('.'):
|
||||||
|
if os.path.isfile(infile):
|
||||||
|
filenames.append(infile)
|
||||||
|
|
||||||
|
# start up gui app
|
||||||
|
app = MainApp(apphome, dnd, filenames)
|
||||||
|
app.mainloop()
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
sys.exit(main())
|
||||||
@@ -0,0 +1,75 @@
|
|||||||
|
import sys
|
||||||
|
import Tkinter
|
||||||
|
import Tkconstants
|
||||||
|
|
||||||
|
class ActivityBar(Tkinter.Frame):
|
||||||
|
|
||||||
|
def __init__(self, master, length=300, height=20, barwidth=15, interval=50, bg='white', fillcolor='orchid1',\
|
||||||
|
bd=2, relief=Tkconstants.GROOVE, *args, **kw):
|
||||||
|
Tkinter.Frame.__init__(self, master, bg=bg, width=length, height=height, *args, **kw)
|
||||||
|
self._master = master
|
||||||
|
self._interval = interval
|
||||||
|
self._maximum = length
|
||||||
|
self._startx = 0
|
||||||
|
self._barwidth = barwidth
|
||||||
|
self._bardiv = length / barwidth
|
||||||
|
if self._bardiv < 10:
|
||||||
|
self._bardiv = 10
|
||||||
|
stopx = self._startx + self._barwidth
|
||||||
|
if stopx > self._maximum:
|
||||||
|
stopx = self._maximum
|
||||||
|
# self._canv = Tkinter.Canvas(self, bg=self['bg'], width=self['width'], height=self['height'],\
|
||||||
|
# highlightthickness=0, relief='flat', bd=0)
|
||||||
|
self._canv = Tkinter.Canvas(self, bg=self['bg'], width=self['width'], height=self['height'],\
|
||||||
|
highlightthickness=0, relief=relief, bd=bd)
|
||||||
|
self._canv.pack(fill='both', expand=1)
|
||||||
|
self._rect = self._canv.create_rectangle(0, 0, self._canv.winfo_reqwidth(), self._canv.winfo_reqheight(), fill=fillcolor, width=0)
|
||||||
|
|
||||||
|
self._set()
|
||||||
|
self.bind('<Configure>', self._update_coords)
|
||||||
|
self._running = False
|
||||||
|
|
||||||
|
def _update_coords(self, event):
|
||||||
|
'''Updates the position of the rectangle inside the canvas when the size of
|
||||||
|
the widget gets changed.'''
|
||||||
|
# looks like we have to call update_idletasks() twice to make sure
|
||||||
|
# to get the results we expect
|
||||||
|
self._canv.update_idletasks()
|
||||||
|
self._maximum = self._canv.winfo_width()
|
||||||
|
self._startx = 0
|
||||||
|
self._barwidth = self._maximum / self._bardiv
|
||||||
|
if self._barwidth < 2:
|
||||||
|
self._barwidth = 2
|
||||||
|
stopx = self._startx + self._barwidth
|
||||||
|
if stopx > self._maximum:
|
||||||
|
stopx = self._maximum
|
||||||
|
self._canv.coords(self._rect, 0, 0, stopx, self._canv.winfo_height())
|
||||||
|
self._canv.update_idletasks()
|
||||||
|
|
||||||
|
def _set(self):
|
||||||
|
if self._startx < 0:
|
||||||
|
self._startx = 0
|
||||||
|
if self._startx > self._maximum:
|
||||||
|
self._startx = self._startx % self._maximum
|
||||||
|
stopx = self._startx + self._barwidth
|
||||||
|
if stopx > self._maximum:
|
||||||
|
stopx = self._maximum
|
||||||
|
self._canv.coords(self._rect, self._startx, 0, stopx, self._canv.winfo_height())
|
||||||
|
self._canv.update_idletasks()
|
||||||
|
|
||||||
|
def start(self):
|
||||||
|
self._running = True
|
||||||
|
self.after(self._interval, self._step)
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
self._running = False
|
||||||
|
self._set()
|
||||||
|
|
||||||
|
def _step(self):
|
||||||
|
if self._running:
|
||||||
|
stepsize = self._barwidth / 4
|
||||||
|
if stepsize < 2:
|
||||||
|
stepsize = 2
|
||||||
|
self._startx += stepsize
|
||||||
|
self._set()
|
||||||
|
self.after(self._interval, self._step)
|
||||||
568
DeDRM_Windows_Application/DeDRM_WinApp/DeDRM_lib/lib/aescbc.py
Normal file
568
DeDRM_Windows_Application/DeDRM_WinApp/DeDRM_lib/lib/aescbc.py
Normal file
@@ -0,0 +1,568 @@
|
|||||||
|
#! /usr/bin/env python
|
||||||
|
|
||||||
|
"""
|
||||||
|
Routines for doing AES CBC in one file
|
||||||
|
|
||||||
|
Modified by some_updates to extract
|
||||||
|
and combine only those parts needed for AES CBC
|
||||||
|
into one simple to add python file
|
||||||
|
|
||||||
|
Original Version
|
||||||
|
Copyright (c) 2002 by Paul A. Lambert
|
||||||
|
Under:
|
||||||
|
CryptoPy Artisitic License Version 1.0
|
||||||
|
See the wonderful pure python package cryptopy-1.2.5
|
||||||
|
and read its LICENSE.txt for complete license details.
|
||||||
|
"""
|
||||||
|
|
||||||
|
class CryptoError(Exception):
|
||||||
|
""" Base class for crypto exceptions """
|
||||||
|
def __init__(self,errorMessage='Error!'):
|
||||||
|
self.message = errorMessage
|
||||||
|
def __str__(self):
|
||||||
|
return self.message
|
||||||
|
|
||||||
|
class InitCryptoError(CryptoError):
|
||||||
|
""" Crypto errors during algorithm initialization """
|
||||||
|
class BadKeySizeError(InitCryptoError):
|
||||||
|
""" Bad key size error """
|
||||||
|
class EncryptError(CryptoError):
|
||||||
|
""" Error in encryption processing """
|
||||||
|
class DecryptError(CryptoError):
|
||||||
|
""" Error in decryption processing """
|
||||||
|
class DecryptNotBlockAlignedError(DecryptError):
|
||||||
|
""" Error in decryption processing """
|
||||||
|
|
||||||
|
def xorS(a,b):
|
||||||
|
""" XOR two strings """
|
||||||
|
assert len(a)==len(b)
|
||||||
|
x = []
|
||||||
|
for i in range(len(a)):
|
||||||
|
x.append( chr(ord(a[i])^ord(b[i])))
|
||||||
|
return ''.join(x)
|
||||||
|
|
||||||
|
def xor(a,b):
|
||||||
|
""" XOR two strings """
|
||||||
|
x = []
|
||||||
|
for i in range(min(len(a),len(b))):
|
||||||
|
x.append( chr(ord(a[i])^ord(b[i])))
|
||||||
|
return ''.join(x)
|
||||||
|
|
||||||
|
"""
|
||||||
|
Base 'BlockCipher' and Pad classes for cipher instances.
|
||||||
|
BlockCipher supports automatic padding and type conversion. The BlockCipher
|
||||||
|
class was written to make the actual algorithm code more readable and
|
||||||
|
not for performance.
|
||||||
|
"""
|
||||||
|
|
||||||
|
class BlockCipher:
|
||||||
|
""" Block ciphers """
|
||||||
|
def __init__(self):
|
||||||
|
self.reset()
|
||||||
|
|
||||||
|
def reset(self):
|
||||||
|
self.resetEncrypt()
|
||||||
|
self.resetDecrypt()
|
||||||
|
def resetEncrypt(self):
|
||||||
|
self.encryptBlockCount = 0
|
||||||
|
self.bytesToEncrypt = ''
|
||||||
|
def resetDecrypt(self):
|
||||||
|
self.decryptBlockCount = 0
|
||||||
|
self.bytesToDecrypt = ''
|
||||||
|
|
||||||
|
def encrypt(self, plainText, more = None):
|
||||||
|
""" Encrypt a string and return a binary string """
|
||||||
|
self.bytesToEncrypt += plainText # append plainText to any bytes from prior encrypt
|
||||||
|
numBlocks, numExtraBytes = divmod(len(self.bytesToEncrypt), self.blockSize)
|
||||||
|
cipherText = ''
|
||||||
|
for i in range(numBlocks):
|
||||||
|
bStart = i*self.blockSize
|
||||||
|
ctBlock = self.encryptBlock(self.bytesToEncrypt[bStart:bStart+self.blockSize])
|
||||||
|
self.encryptBlockCount += 1
|
||||||
|
cipherText += ctBlock
|
||||||
|
if numExtraBytes > 0: # save any bytes that are not block aligned
|
||||||
|
self.bytesToEncrypt = self.bytesToEncrypt[-numExtraBytes:]
|
||||||
|
else:
|
||||||
|
self.bytesToEncrypt = ''
|
||||||
|
|
||||||
|
if more == None: # no more data expected from caller
|
||||||
|
finalBytes = self.padding.addPad(self.bytesToEncrypt,self.blockSize)
|
||||||
|
if len(finalBytes) > 0:
|
||||||
|
ctBlock = self.encryptBlock(finalBytes)
|
||||||
|
self.encryptBlockCount += 1
|
||||||
|
cipherText += ctBlock
|
||||||
|
self.resetEncrypt()
|
||||||
|
return cipherText
|
||||||
|
|
||||||
|
def decrypt(self, cipherText, more = None):
|
||||||
|
""" Decrypt a string and return a string """
|
||||||
|
self.bytesToDecrypt += cipherText # append to any bytes from prior decrypt
|
||||||
|
|
||||||
|
numBlocks, numExtraBytes = divmod(len(self.bytesToDecrypt), self.blockSize)
|
||||||
|
if more == None: # no more calls to decrypt, should have all the data
|
||||||
|
if numExtraBytes != 0:
|
||||||
|
raise DecryptNotBlockAlignedError, 'Data not block aligned on decrypt'
|
||||||
|
|
||||||
|
# hold back some bytes in case last decrypt has zero len
|
||||||
|
if (more != None) and (numExtraBytes == 0) and (numBlocks >0) :
|
||||||
|
numBlocks -= 1
|
||||||
|
numExtraBytes = self.blockSize
|
||||||
|
|
||||||
|
plainText = ''
|
||||||
|
for i in range(numBlocks):
|
||||||
|
bStart = i*self.blockSize
|
||||||
|
ptBlock = self.decryptBlock(self.bytesToDecrypt[bStart : bStart+self.blockSize])
|
||||||
|
self.decryptBlockCount += 1
|
||||||
|
plainText += ptBlock
|
||||||
|
|
||||||
|
if numExtraBytes > 0: # save any bytes that are not block aligned
|
||||||
|
self.bytesToEncrypt = self.bytesToEncrypt[-numExtraBytes:]
|
||||||
|
else:
|
||||||
|
self.bytesToEncrypt = ''
|
||||||
|
|
||||||
|
if more == None: # last decrypt remove padding
|
||||||
|
plainText = self.padding.removePad(plainText, self.blockSize)
|
||||||
|
self.resetDecrypt()
|
||||||
|
return plainText
|
||||||
|
|
||||||
|
|
||||||
|
class Pad:
|
||||||
|
def __init__(self):
|
||||||
|
pass # eventually could put in calculation of min and max size extension
|
||||||
|
|
||||||
|
class padWithPadLen(Pad):
|
||||||
|
""" Pad a binary string with the length of the padding """
|
||||||
|
|
||||||
|
def addPad(self, extraBytes, blockSize):
|
||||||
|
""" Add padding to a binary string to make it an even multiple
|
||||||
|
of the block size """
|
||||||
|
blocks, numExtraBytes = divmod(len(extraBytes), blockSize)
|
||||||
|
padLength = blockSize - numExtraBytes
|
||||||
|
return extraBytes + padLength*chr(padLength)
|
||||||
|
|
||||||
|
def removePad(self, paddedBinaryString, blockSize):
|
||||||
|
""" Remove padding from a binary string """
|
||||||
|
if not(0<len(paddedBinaryString)):
|
||||||
|
raise DecryptNotBlockAlignedError, 'Expected More Data'
|
||||||
|
return paddedBinaryString[:-ord(paddedBinaryString[-1])]
|
||||||
|
|
||||||
|
class noPadding(Pad):
|
||||||
|
""" No padding. Use this to get ECB behavior from encrypt/decrypt """
|
||||||
|
|
||||||
|
def addPad(self, extraBytes, blockSize):
|
||||||
|
""" Add no padding """
|
||||||
|
return extraBytes
|
||||||
|
|
||||||
|
def removePad(self, paddedBinaryString, blockSize):
|
||||||
|
""" Remove no padding """
|
||||||
|
return paddedBinaryString
|
||||||
|
|
||||||
|
"""
|
||||||
|
Rijndael encryption algorithm
|
||||||
|
This byte oriented implementation is intended to closely
|
||||||
|
match FIPS specification for readability. It is not implemented
|
||||||
|
for performance.
|
||||||
|
"""
|
||||||
|
|
||||||
|
class Rijndael(BlockCipher):
|
||||||
|
""" Rijndael encryption algorithm """
|
||||||
|
def __init__(self, key = None, padding = padWithPadLen(), keySize=16, blockSize=16 ):
|
||||||
|
self.name = 'RIJNDAEL'
|
||||||
|
self.keySize = keySize
|
||||||
|
self.strength = keySize*8
|
||||||
|
self.blockSize = blockSize # blockSize is in bytes
|
||||||
|
self.padding = padding # change default to noPadding() to get normal ECB behavior
|
||||||
|
|
||||||
|
assert( keySize%4==0 and NrTable[4].has_key(keySize/4)),'key size must be 16,20,24,29 or 32 bytes'
|
||||||
|
assert( blockSize%4==0 and NrTable.has_key(blockSize/4)), 'block size must be 16,20,24,29 or 32 bytes'
|
||||||
|
|
||||||
|
self.Nb = self.blockSize/4 # Nb is number of columns of 32 bit words
|
||||||
|
self.Nk = keySize/4 # Nk is the key length in 32-bit words
|
||||||
|
self.Nr = NrTable[self.Nb][self.Nk] # The number of rounds (Nr) is a function of
|
||||||
|
# the block (Nb) and key (Nk) sizes.
|
||||||
|
if key != None:
|
||||||
|
self.setKey(key)
|
||||||
|
|
||||||
|
def setKey(self, key):
|
||||||
|
""" Set a key and generate the expanded key """
|
||||||
|
assert( len(key) == (self.Nk*4) ), 'Key length must be same as keySize parameter'
|
||||||
|
self.__expandedKey = keyExpansion(self, key)
|
||||||
|
self.reset() # BlockCipher.reset()
|
||||||
|
|
||||||
|
def encryptBlock(self, plainTextBlock):
|
||||||
|
""" Encrypt a block, plainTextBlock must be a array of bytes [Nb by 4] """
|
||||||
|
self.state = self._toBlock(plainTextBlock)
|
||||||
|
AddRoundKey(self, self.__expandedKey[0:self.Nb])
|
||||||
|
for round in range(1,self.Nr): #for round = 1 step 1 to Nr
|
||||||
|
SubBytes(self)
|
||||||
|
ShiftRows(self)
|
||||||
|
MixColumns(self)
|
||||||
|
AddRoundKey(self, self.__expandedKey[round*self.Nb:(round+1)*self.Nb])
|
||||||
|
SubBytes(self)
|
||||||
|
ShiftRows(self)
|
||||||
|
AddRoundKey(self, self.__expandedKey[self.Nr*self.Nb:(self.Nr+1)*self.Nb])
|
||||||
|
return self._toBString(self.state)
|
||||||
|
|
||||||
|
|
||||||
|
def decryptBlock(self, encryptedBlock):
|
||||||
|
""" decrypt a block (array of bytes) """
|
||||||
|
self.state = self._toBlock(encryptedBlock)
|
||||||
|
AddRoundKey(self, self.__expandedKey[self.Nr*self.Nb:(self.Nr+1)*self.Nb])
|
||||||
|
for round in range(self.Nr-1,0,-1):
|
||||||
|
InvShiftRows(self)
|
||||||
|
InvSubBytes(self)
|
||||||
|
AddRoundKey(self, self.__expandedKey[round*self.Nb:(round+1)*self.Nb])
|
||||||
|
InvMixColumns(self)
|
||||||
|
InvShiftRows(self)
|
||||||
|
InvSubBytes(self)
|
||||||
|
AddRoundKey(self, self.__expandedKey[0:self.Nb])
|
||||||
|
return self._toBString(self.state)
|
||||||
|
|
||||||
|
def _toBlock(self, bs):
|
||||||
|
""" Convert binary string to array of bytes, state[col][row]"""
|
||||||
|
assert ( len(bs) == 4*self.Nb ), 'Rijndarl blocks must be of size blockSize'
|
||||||
|
return [[ord(bs[4*i]),ord(bs[4*i+1]),ord(bs[4*i+2]),ord(bs[4*i+3])] for i in range(self.Nb)]
|
||||||
|
|
||||||
|
def _toBString(self, block):
|
||||||
|
""" Convert block (array of bytes) to binary string """
|
||||||
|
l = []
|
||||||
|
for col in block:
|
||||||
|
for rowElement in col:
|
||||||
|
l.append(chr(rowElement))
|
||||||
|
return ''.join(l)
|
||||||
|
#-------------------------------------
|
||||||
|
""" Number of rounds Nr = NrTable[Nb][Nk]
|
||||||
|
|
||||||
|
Nb Nk=4 Nk=5 Nk=6 Nk=7 Nk=8
|
||||||
|
------------------------------------- """
|
||||||
|
NrTable = {4: {4:10, 5:11, 6:12, 7:13, 8:14},
|
||||||
|
5: {4:11, 5:11, 6:12, 7:13, 8:14},
|
||||||
|
6: {4:12, 5:12, 6:12, 7:13, 8:14},
|
||||||
|
7: {4:13, 5:13, 6:13, 7:13, 8:14},
|
||||||
|
8: {4:14, 5:14, 6:14, 7:14, 8:14}}
|
||||||
|
#-------------------------------------
|
||||||
|
def keyExpansion(algInstance, keyString):
|
||||||
|
""" Expand a string of size keySize into a larger array """
|
||||||
|
Nk, Nb, Nr = algInstance.Nk, algInstance.Nb, algInstance.Nr # for readability
|
||||||
|
key = [ord(byte) for byte in keyString] # convert string to list
|
||||||
|
w = [[key[4*i],key[4*i+1],key[4*i+2],key[4*i+3]] for i in range(Nk)]
|
||||||
|
for i in range(Nk,Nb*(Nr+1)):
|
||||||
|
temp = w[i-1] # a four byte column
|
||||||
|
if (i%Nk) == 0 :
|
||||||
|
temp = temp[1:]+[temp[0]] # RotWord(temp)
|
||||||
|
temp = [ Sbox[byte] for byte in temp ]
|
||||||
|
temp[0] ^= Rcon[i/Nk]
|
||||||
|
elif Nk > 6 and i%Nk == 4 :
|
||||||
|
temp = [ Sbox[byte] for byte in temp ] # SubWord(temp)
|
||||||
|
w.append( [ w[i-Nk][byte]^temp[byte] for byte in range(4) ] )
|
||||||
|
return w
|
||||||
|
|
||||||
|
Rcon = (0,0x01,0x02,0x04,0x08,0x10,0x20,0x40,0x80,0x1b,0x36, # note extra '0' !!!
|
||||||
|
0x6c,0xd8,0xab,0x4d,0x9a,0x2f,0x5e,0xbc,0x63,0xc6,
|
||||||
|
0x97,0x35,0x6a,0xd4,0xb3,0x7d,0xfa,0xef,0xc5,0x91)
|
||||||
|
|
||||||
|
#-------------------------------------
|
||||||
|
def AddRoundKey(algInstance, keyBlock):
|
||||||
|
""" XOR the algorithm state with a block of key material """
|
||||||
|
for column in range(algInstance.Nb):
|
||||||
|
for row in range(4):
|
||||||
|
algInstance.state[column][row] ^= keyBlock[column][row]
|
||||||
|
#-------------------------------------
|
||||||
|
|
||||||
|
def SubBytes(algInstance):
|
||||||
|
for column in range(algInstance.Nb):
|
||||||
|
for row in range(4):
|
||||||
|
algInstance.state[column][row] = Sbox[algInstance.state[column][row]]
|
||||||
|
|
||||||
|
def InvSubBytes(algInstance):
|
||||||
|
for column in range(algInstance.Nb):
|
||||||
|
for row in range(4):
|
||||||
|
algInstance.state[column][row] = InvSbox[algInstance.state[column][row]]
|
||||||
|
|
||||||
|
Sbox = (0x63,0x7c,0x77,0x7b,0xf2,0x6b,0x6f,0xc5,
|
||||||
|
0x30,0x01,0x67,0x2b,0xfe,0xd7,0xab,0x76,
|
||||||
|
0xca,0x82,0xc9,0x7d,0xfa,0x59,0x47,0xf0,
|
||||||
|
0xad,0xd4,0xa2,0xaf,0x9c,0xa4,0x72,0xc0,
|
||||||
|
0xb7,0xfd,0x93,0x26,0x36,0x3f,0xf7,0xcc,
|
||||||
|
0x34,0xa5,0xe5,0xf1,0x71,0xd8,0x31,0x15,
|
||||||
|
0x04,0xc7,0x23,0xc3,0x18,0x96,0x05,0x9a,
|
||||||
|
0x07,0x12,0x80,0xe2,0xeb,0x27,0xb2,0x75,
|
||||||
|
0x09,0x83,0x2c,0x1a,0x1b,0x6e,0x5a,0xa0,
|
||||||
|
0x52,0x3b,0xd6,0xb3,0x29,0xe3,0x2f,0x84,
|
||||||
|
0x53,0xd1,0x00,0xed,0x20,0xfc,0xb1,0x5b,
|
||||||
|
0x6a,0xcb,0xbe,0x39,0x4a,0x4c,0x58,0xcf,
|
||||||
|
0xd0,0xef,0xaa,0xfb,0x43,0x4d,0x33,0x85,
|
||||||
|
0x45,0xf9,0x02,0x7f,0x50,0x3c,0x9f,0xa8,
|
||||||
|
0x51,0xa3,0x40,0x8f,0x92,0x9d,0x38,0xf5,
|
||||||
|
0xbc,0xb6,0xda,0x21,0x10,0xff,0xf3,0xd2,
|
||||||
|
0xcd,0x0c,0x13,0xec,0x5f,0x97,0x44,0x17,
|
||||||
|
0xc4,0xa7,0x7e,0x3d,0x64,0x5d,0x19,0x73,
|
||||||
|
0x60,0x81,0x4f,0xdc,0x22,0x2a,0x90,0x88,
|
||||||
|
0x46,0xee,0xb8,0x14,0xde,0x5e,0x0b,0xdb,
|
||||||
|
0xe0,0x32,0x3a,0x0a,0x49,0x06,0x24,0x5c,
|
||||||
|
0xc2,0xd3,0xac,0x62,0x91,0x95,0xe4,0x79,
|
||||||
|
0xe7,0xc8,0x37,0x6d,0x8d,0xd5,0x4e,0xa9,
|
||||||
|
0x6c,0x56,0xf4,0xea,0x65,0x7a,0xae,0x08,
|
||||||
|
0xba,0x78,0x25,0x2e,0x1c,0xa6,0xb4,0xc6,
|
||||||
|
0xe8,0xdd,0x74,0x1f,0x4b,0xbd,0x8b,0x8a,
|
||||||
|
0x70,0x3e,0xb5,0x66,0x48,0x03,0xf6,0x0e,
|
||||||
|
0x61,0x35,0x57,0xb9,0x86,0xc1,0x1d,0x9e,
|
||||||
|
0xe1,0xf8,0x98,0x11,0x69,0xd9,0x8e,0x94,
|
||||||
|
0x9b,0x1e,0x87,0xe9,0xce,0x55,0x28,0xdf,
|
||||||
|
0x8c,0xa1,0x89,0x0d,0xbf,0xe6,0x42,0x68,
|
||||||
|
0x41,0x99,0x2d,0x0f,0xb0,0x54,0xbb,0x16)
|
||||||
|
|
||||||
|
InvSbox = (0x52,0x09,0x6a,0xd5,0x30,0x36,0xa5,0x38,
|
||||||
|
0xbf,0x40,0xa3,0x9e,0x81,0xf3,0xd7,0xfb,
|
||||||
|
0x7c,0xe3,0x39,0x82,0x9b,0x2f,0xff,0x87,
|
||||||
|
0x34,0x8e,0x43,0x44,0xc4,0xde,0xe9,0xcb,
|
||||||
|
0x54,0x7b,0x94,0x32,0xa6,0xc2,0x23,0x3d,
|
||||||
|
0xee,0x4c,0x95,0x0b,0x42,0xfa,0xc3,0x4e,
|
||||||
|
0x08,0x2e,0xa1,0x66,0x28,0xd9,0x24,0xb2,
|
||||||
|
0x76,0x5b,0xa2,0x49,0x6d,0x8b,0xd1,0x25,
|
||||||
|
0x72,0xf8,0xf6,0x64,0x86,0x68,0x98,0x16,
|
||||||
|
0xd4,0xa4,0x5c,0xcc,0x5d,0x65,0xb6,0x92,
|
||||||
|
0x6c,0x70,0x48,0x50,0xfd,0xed,0xb9,0xda,
|
||||||
|
0x5e,0x15,0x46,0x57,0xa7,0x8d,0x9d,0x84,
|
||||||
|
0x90,0xd8,0xab,0x00,0x8c,0xbc,0xd3,0x0a,
|
||||||
|
0xf7,0xe4,0x58,0x05,0xb8,0xb3,0x45,0x06,
|
||||||
|
0xd0,0x2c,0x1e,0x8f,0xca,0x3f,0x0f,0x02,
|
||||||
|
0xc1,0xaf,0xbd,0x03,0x01,0x13,0x8a,0x6b,
|
||||||
|
0x3a,0x91,0x11,0x41,0x4f,0x67,0xdc,0xea,
|
||||||
|
0x97,0xf2,0xcf,0xce,0xf0,0xb4,0xe6,0x73,
|
||||||
|
0x96,0xac,0x74,0x22,0xe7,0xad,0x35,0x85,
|
||||||
|
0xe2,0xf9,0x37,0xe8,0x1c,0x75,0xdf,0x6e,
|
||||||
|
0x47,0xf1,0x1a,0x71,0x1d,0x29,0xc5,0x89,
|
||||||
|
0x6f,0xb7,0x62,0x0e,0xaa,0x18,0xbe,0x1b,
|
||||||
|
0xfc,0x56,0x3e,0x4b,0xc6,0xd2,0x79,0x20,
|
||||||
|
0x9a,0xdb,0xc0,0xfe,0x78,0xcd,0x5a,0xf4,
|
||||||
|
0x1f,0xdd,0xa8,0x33,0x88,0x07,0xc7,0x31,
|
||||||
|
0xb1,0x12,0x10,0x59,0x27,0x80,0xec,0x5f,
|
||||||
|
0x60,0x51,0x7f,0xa9,0x19,0xb5,0x4a,0x0d,
|
||||||
|
0x2d,0xe5,0x7a,0x9f,0x93,0xc9,0x9c,0xef,
|
||||||
|
0xa0,0xe0,0x3b,0x4d,0xae,0x2a,0xf5,0xb0,
|
||||||
|
0xc8,0xeb,0xbb,0x3c,0x83,0x53,0x99,0x61,
|
||||||
|
0x17,0x2b,0x04,0x7e,0xba,0x77,0xd6,0x26,
|
||||||
|
0xe1,0x69,0x14,0x63,0x55,0x21,0x0c,0x7d)
|
||||||
|
|
||||||
|
#-------------------------------------
|
||||||
|
""" For each block size (Nb), the ShiftRow operation shifts row i
|
||||||
|
by the amount Ci. Note that row 0 is not shifted.
|
||||||
|
Nb C1 C2 C3
|
||||||
|
------------------- """
|
||||||
|
shiftOffset = { 4 : ( 0, 1, 2, 3),
|
||||||
|
5 : ( 0, 1, 2, 3),
|
||||||
|
6 : ( 0, 1, 2, 3),
|
||||||
|
7 : ( 0, 1, 2, 4),
|
||||||
|
8 : ( 0, 1, 3, 4) }
|
||||||
|
def ShiftRows(algInstance):
|
||||||
|
tmp = [0]*algInstance.Nb # list of size Nb
|
||||||
|
for r in range(1,4): # row 0 reamains unchanged and can be skipped
|
||||||
|
for c in range(algInstance.Nb):
|
||||||
|
tmp[c] = algInstance.state[(c+shiftOffset[algInstance.Nb][r]) % algInstance.Nb][r]
|
||||||
|
for c in range(algInstance.Nb):
|
||||||
|
algInstance.state[c][r] = tmp[c]
|
||||||
|
def InvShiftRows(algInstance):
|
||||||
|
tmp = [0]*algInstance.Nb # list of size Nb
|
||||||
|
for r in range(1,4): # row 0 reamains unchanged and can be skipped
|
||||||
|
for c in range(algInstance.Nb):
|
||||||
|
tmp[c] = algInstance.state[(c+algInstance.Nb-shiftOffset[algInstance.Nb][r]) % algInstance.Nb][r]
|
||||||
|
for c in range(algInstance.Nb):
|
||||||
|
algInstance.state[c][r] = tmp[c]
|
||||||
|
#-------------------------------------
|
||||||
|
def MixColumns(a):
|
||||||
|
Sprime = [0,0,0,0]
|
||||||
|
for j in range(a.Nb): # for each column
|
||||||
|
Sprime[0] = mul(2,a.state[j][0])^mul(3,a.state[j][1])^mul(1,a.state[j][2])^mul(1,a.state[j][3])
|
||||||
|
Sprime[1] = mul(1,a.state[j][0])^mul(2,a.state[j][1])^mul(3,a.state[j][2])^mul(1,a.state[j][3])
|
||||||
|
Sprime[2] = mul(1,a.state[j][0])^mul(1,a.state[j][1])^mul(2,a.state[j][2])^mul(3,a.state[j][3])
|
||||||
|
Sprime[3] = mul(3,a.state[j][0])^mul(1,a.state[j][1])^mul(1,a.state[j][2])^mul(2,a.state[j][3])
|
||||||
|
for i in range(4):
|
||||||
|
a.state[j][i] = Sprime[i]
|
||||||
|
|
||||||
|
def InvMixColumns(a):
|
||||||
|
""" Mix the four bytes of every column in a linear way
|
||||||
|
This is the opposite operation of Mixcolumn """
|
||||||
|
Sprime = [0,0,0,0]
|
||||||
|
for j in range(a.Nb): # for each column
|
||||||
|
Sprime[0] = mul(0x0E,a.state[j][0])^mul(0x0B,a.state[j][1])^mul(0x0D,a.state[j][2])^mul(0x09,a.state[j][3])
|
||||||
|
Sprime[1] = mul(0x09,a.state[j][0])^mul(0x0E,a.state[j][1])^mul(0x0B,a.state[j][2])^mul(0x0D,a.state[j][3])
|
||||||
|
Sprime[2] = mul(0x0D,a.state[j][0])^mul(0x09,a.state[j][1])^mul(0x0E,a.state[j][2])^mul(0x0B,a.state[j][3])
|
||||||
|
Sprime[3] = mul(0x0B,a.state[j][0])^mul(0x0D,a.state[j][1])^mul(0x09,a.state[j][2])^mul(0x0E,a.state[j][3])
|
||||||
|
for i in range(4):
|
||||||
|
a.state[j][i] = Sprime[i]
|
||||||
|
|
||||||
|
#-------------------------------------
|
||||||
|
def mul(a, b):
|
||||||
|
""" Multiply two elements of GF(2^m)
|
||||||
|
needed for MixColumn and InvMixColumn """
|
||||||
|
if (a !=0 and b!=0):
|
||||||
|
return Alogtable[(Logtable[a] + Logtable[b])%255]
|
||||||
|
else:
|
||||||
|
return 0
|
||||||
|
|
||||||
|
Logtable = ( 0, 0, 25, 1, 50, 2, 26, 198, 75, 199, 27, 104, 51, 238, 223, 3,
|
||||||
|
100, 4, 224, 14, 52, 141, 129, 239, 76, 113, 8, 200, 248, 105, 28, 193,
|
||||||
|
125, 194, 29, 181, 249, 185, 39, 106, 77, 228, 166, 114, 154, 201, 9, 120,
|
||||||
|
101, 47, 138, 5, 33, 15, 225, 36, 18, 240, 130, 69, 53, 147, 218, 142,
|
||||||
|
150, 143, 219, 189, 54, 208, 206, 148, 19, 92, 210, 241, 64, 70, 131, 56,
|
||||||
|
102, 221, 253, 48, 191, 6, 139, 98, 179, 37, 226, 152, 34, 136, 145, 16,
|
||||||
|
126, 110, 72, 195, 163, 182, 30, 66, 58, 107, 40, 84, 250, 133, 61, 186,
|
||||||
|
43, 121, 10, 21, 155, 159, 94, 202, 78, 212, 172, 229, 243, 115, 167, 87,
|
||||||
|
175, 88, 168, 80, 244, 234, 214, 116, 79, 174, 233, 213, 231, 230, 173, 232,
|
||||||
|
44, 215, 117, 122, 235, 22, 11, 245, 89, 203, 95, 176, 156, 169, 81, 160,
|
||||||
|
127, 12, 246, 111, 23, 196, 73, 236, 216, 67, 31, 45, 164, 118, 123, 183,
|
||||||
|
204, 187, 62, 90, 251, 96, 177, 134, 59, 82, 161, 108, 170, 85, 41, 157,
|
||||||
|
151, 178, 135, 144, 97, 190, 220, 252, 188, 149, 207, 205, 55, 63, 91, 209,
|
||||||
|
83, 57, 132, 60, 65, 162, 109, 71, 20, 42, 158, 93, 86, 242, 211, 171,
|
||||||
|
68, 17, 146, 217, 35, 32, 46, 137, 180, 124, 184, 38, 119, 153, 227, 165,
|
||||||
|
103, 74, 237, 222, 197, 49, 254, 24, 13, 99, 140, 128, 192, 247, 112, 7)
|
||||||
|
|
||||||
|
Alogtable= ( 1, 3, 5, 15, 17, 51, 85, 255, 26, 46, 114, 150, 161, 248, 19, 53,
|
||||||
|
95, 225, 56, 72, 216, 115, 149, 164, 247, 2, 6, 10, 30, 34, 102, 170,
|
||||||
|
229, 52, 92, 228, 55, 89, 235, 38, 106, 190, 217, 112, 144, 171, 230, 49,
|
||||||
|
83, 245, 4, 12, 20, 60, 68, 204, 79, 209, 104, 184, 211, 110, 178, 205,
|
||||||
|
76, 212, 103, 169, 224, 59, 77, 215, 98, 166, 241, 8, 24, 40, 120, 136,
|
||||||
|
131, 158, 185, 208, 107, 189, 220, 127, 129, 152, 179, 206, 73, 219, 118, 154,
|
||||||
|
181, 196, 87, 249, 16, 48, 80, 240, 11, 29, 39, 105, 187, 214, 97, 163,
|
||||||
|
254, 25, 43, 125, 135, 146, 173, 236, 47, 113, 147, 174, 233, 32, 96, 160,
|
||||||
|
251, 22, 58, 78, 210, 109, 183, 194, 93, 231, 50, 86, 250, 21, 63, 65,
|
||||||
|
195, 94, 226, 61, 71, 201, 64, 192, 91, 237, 44, 116, 156, 191, 218, 117,
|
||||||
|
159, 186, 213, 100, 172, 239, 42, 126, 130, 157, 188, 223, 122, 142, 137, 128,
|
||||||
|
155, 182, 193, 88, 232, 35, 101, 175, 234, 37, 111, 177, 200, 67, 197, 84,
|
||||||
|
252, 31, 33, 99, 165, 244, 7, 9, 27, 45, 119, 153, 176, 203, 70, 202,
|
||||||
|
69, 207, 74, 222, 121, 139, 134, 145, 168, 227, 62, 66, 198, 81, 243, 14,
|
||||||
|
18, 54, 90, 238, 41, 123, 141, 140, 143, 138, 133, 148, 167, 242, 13, 23,
|
||||||
|
57, 75, 221, 124, 132, 151, 162, 253, 28, 36, 108, 180, 199, 82, 246, 1)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
AES Encryption Algorithm
|
||||||
|
The AES algorithm is just Rijndael algorithm restricted to the default
|
||||||
|
blockSize of 128 bits.
|
||||||
|
"""
|
||||||
|
|
||||||
|
class AES(Rijndael):
|
||||||
|
""" The AES algorithm is the Rijndael block cipher restricted to block
|
||||||
|
sizes of 128 bits and key sizes of 128, 192 or 256 bits
|
||||||
|
"""
|
||||||
|
def __init__(self, key = None, padding = padWithPadLen(), keySize=16):
|
||||||
|
""" Initialize AES, keySize is in bytes """
|
||||||
|
if not (keySize == 16 or keySize == 24 or keySize == 32) :
|
||||||
|
raise BadKeySizeError, 'Illegal AES key size, must be 16, 24, or 32 bytes'
|
||||||
|
|
||||||
|
Rijndael.__init__( self, key, padding=padding, keySize=keySize, blockSize=16 )
|
||||||
|
|
||||||
|
self.name = 'AES'
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
CBC mode of encryption for block ciphers.
|
||||||
|
This algorithm mode wraps any BlockCipher to make a
|
||||||
|
Cipher Block Chaining mode.
|
||||||
|
"""
|
||||||
|
from random import Random # should change to crypto.random!!!
|
||||||
|
|
||||||
|
|
||||||
|
class CBC(BlockCipher):
|
||||||
|
""" The CBC class wraps block ciphers to make cipher block chaining (CBC) mode
|
||||||
|
algorithms. The initialization (IV) is automatic if set to None. Padding
|
||||||
|
is also automatic based on the Pad class used to initialize the algorithm
|
||||||
|
"""
|
||||||
|
def __init__(self, blockCipherInstance, padding = padWithPadLen()):
|
||||||
|
""" CBC algorithms are created by initializing with a BlockCipher instance """
|
||||||
|
self.baseCipher = blockCipherInstance
|
||||||
|
self.name = self.baseCipher.name + '_CBC'
|
||||||
|
self.blockSize = self.baseCipher.blockSize
|
||||||
|
self.keySize = self.baseCipher.keySize
|
||||||
|
self.padding = padding
|
||||||
|
self.baseCipher.padding = noPadding() # baseCipher should NOT pad!!
|
||||||
|
self.r = Random() # for IV generation, currently uses
|
||||||
|
# mediocre standard distro version <----------------
|
||||||
|
import time
|
||||||
|
newSeed = time.ctime()+str(self.r) # seed with instance location
|
||||||
|
self.r.seed(newSeed) # to make unique
|
||||||
|
self.reset()
|
||||||
|
|
||||||
|
def setKey(self, key):
|
||||||
|
self.baseCipher.setKey(key)
|
||||||
|
|
||||||
|
# Overload to reset both CBC state and the wrapped baseCipher
|
||||||
|
def resetEncrypt(self):
|
||||||
|
BlockCipher.resetEncrypt(self) # reset CBC encrypt state (super class)
|
||||||
|
self.baseCipher.resetEncrypt() # reset base cipher encrypt state
|
||||||
|
|
||||||
|
def resetDecrypt(self):
|
||||||
|
BlockCipher.resetDecrypt(self) # reset CBC state (super class)
|
||||||
|
self.baseCipher.resetDecrypt() # reset base cipher decrypt state
|
||||||
|
|
||||||
|
def encrypt(self, plainText, iv=None, more=None):
|
||||||
|
""" CBC encryption - overloads baseCipher to allow optional explicit IV
|
||||||
|
when iv=None, iv is auto generated!
|
||||||
|
"""
|
||||||
|
if self.encryptBlockCount == 0:
|
||||||
|
self.iv = iv
|
||||||
|
else:
|
||||||
|
assert(iv==None), 'IV used only on first call to encrypt'
|
||||||
|
|
||||||
|
return BlockCipher.encrypt(self,plainText, more=more)
|
||||||
|
|
||||||
|
def decrypt(self, cipherText, iv=None, more=None):
|
||||||
|
""" CBC decryption - overloads baseCipher to allow optional explicit IV
|
||||||
|
when iv=None, iv is auto generated!
|
||||||
|
"""
|
||||||
|
if self.decryptBlockCount == 0:
|
||||||
|
self.iv = iv
|
||||||
|
else:
|
||||||
|
assert(iv==None), 'IV used only on first call to decrypt'
|
||||||
|
|
||||||
|
return BlockCipher.decrypt(self, cipherText, more=more)
|
||||||
|
|
||||||
|
def encryptBlock(self, plainTextBlock):
|
||||||
|
""" CBC block encryption, IV is set with 'encrypt' """
|
||||||
|
auto_IV = ''
|
||||||
|
if self.encryptBlockCount == 0:
|
||||||
|
if self.iv == None:
|
||||||
|
# generate IV and use
|
||||||
|
self.iv = ''.join([chr(self.r.randrange(256)) for i in range(self.blockSize)])
|
||||||
|
self.prior_encr_CT_block = self.iv
|
||||||
|
auto_IV = self.prior_encr_CT_block # prepend IV if it's automatic
|
||||||
|
else: # application provided IV
|
||||||
|
assert(len(self.iv) == self.blockSize ),'IV must be same length as block'
|
||||||
|
self.prior_encr_CT_block = self.iv
|
||||||
|
""" encrypt the prior CT XORed with the PT """
|
||||||
|
ct = self.baseCipher.encryptBlock( xor(self.prior_encr_CT_block, plainTextBlock) )
|
||||||
|
self.prior_encr_CT_block = ct
|
||||||
|
return auto_IV+ct
|
||||||
|
|
||||||
|
def decryptBlock(self, encryptedBlock):
|
||||||
|
""" Decrypt a single block """
|
||||||
|
|
||||||
|
if self.decryptBlockCount == 0: # first call, process IV
|
||||||
|
if self.iv == None: # auto decrypt IV?
|
||||||
|
self.prior_CT_block = encryptedBlock
|
||||||
|
return ''
|
||||||
|
else:
|
||||||
|
assert(len(self.iv)==self.blockSize),"Bad IV size on CBC decryption"
|
||||||
|
self.prior_CT_block = self.iv
|
||||||
|
|
||||||
|
dct = self.baseCipher.decryptBlock(encryptedBlock)
|
||||||
|
""" XOR the prior decrypted CT with the prior CT """
|
||||||
|
dct_XOR_priorCT = xor( self.prior_CT_block, dct )
|
||||||
|
|
||||||
|
self.prior_CT_block = encryptedBlock
|
||||||
|
|
||||||
|
return dct_XOR_priorCT
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
AES_CBC Encryption Algorithm
|
||||||
|
"""
|
||||||
|
|
||||||
|
class AES_CBC(CBC):
|
||||||
|
""" AES encryption in CBC feedback mode """
|
||||||
|
def __init__(self, key=None, padding=padWithPadLen(), keySize=16):
|
||||||
|
CBC.__init__( self, AES(key, noPadding(), keySize), padding)
|
||||||
|
self.name = 'AES_CBC'
|
||||||
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,290 @@
|
|||||||
|
#! /usr/bin/env python
|
||||||
|
|
||||||
|
import sys, os
|
||||||
|
import hmac
|
||||||
|
from struct import pack
|
||||||
|
import hashlib
|
||||||
|
|
||||||
|
|
||||||
|
# interface to needed routines libalfcrypto
|
||||||
|
def _load_libalfcrypto():
|
||||||
|
import ctypes
|
||||||
|
from ctypes import CDLL, byref, POINTER, c_void_p, c_char_p, c_int, c_long, \
|
||||||
|
Structure, c_ulong, create_string_buffer, addressof, string_at, cast, sizeof
|
||||||
|
|
||||||
|
pointer_size = ctypes.sizeof(ctypes.c_voidp)
|
||||||
|
name_of_lib = None
|
||||||
|
if sys.platform.startswith('darwin'):
|
||||||
|
name_of_lib = 'libalfcrypto.dylib'
|
||||||
|
elif sys.platform.startswith('win'):
|
||||||
|
if pointer_size == 4:
|
||||||
|
name_of_lib = 'alfcrypto.dll'
|
||||||
|
else:
|
||||||
|
name_of_lib = 'alfcrypto64.dll'
|
||||||
|
else:
|
||||||
|
if pointer_size == 4:
|
||||||
|
name_of_lib = 'libalfcrypto32.so'
|
||||||
|
else:
|
||||||
|
name_of_lib = 'libalfcrypto64.so'
|
||||||
|
|
||||||
|
libalfcrypto = sys.path[0] + os.sep + name_of_lib
|
||||||
|
|
||||||
|
if not os.path.isfile(libalfcrypto):
|
||||||
|
raise Exception('libalfcrypto not found')
|
||||||
|
|
||||||
|
libalfcrypto = CDLL(libalfcrypto)
|
||||||
|
|
||||||
|
c_char_pp = POINTER(c_char_p)
|
||||||
|
c_int_p = POINTER(c_int)
|
||||||
|
|
||||||
|
|
||||||
|
def F(restype, name, argtypes):
|
||||||
|
func = getattr(libalfcrypto, name)
|
||||||
|
func.restype = restype
|
||||||
|
func.argtypes = argtypes
|
||||||
|
return func
|
||||||
|
|
||||||
|
# aes cbc decryption
|
||||||
|
#
|
||||||
|
# struct aes_key_st {
|
||||||
|
# unsigned long rd_key[4 *(AES_MAXNR + 1)];
|
||||||
|
# int rounds;
|
||||||
|
# };
|
||||||
|
#
|
||||||
|
# typedef struct aes_key_st AES_KEY;
|
||||||
|
#
|
||||||
|
# int AES_set_decrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key);
|
||||||
|
#
|
||||||
|
#
|
||||||
|
# void AES_cbc_encrypt(const unsigned char *in, unsigned char *out,
|
||||||
|
# const unsigned long length, const AES_KEY *key,
|
||||||
|
# unsigned char *ivec, const int enc);
|
||||||
|
|
||||||
|
AES_MAXNR = 14
|
||||||
|
|
||||||
|
class AES_KEY(Structure):
|
||||||
|
_fields_ = [('rd_key', c_long * (4 * (AES_MAXNR + 1))), ('rounds', c_int)]
|
||||||
|
|
||||||
|
AES_KEY_p = POINTER(AES_KEY)
|
||||||
|
AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',[c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p, c_int])
|
||||||
|
AES_set_decrypt_key = F(c_int, 'AES_set_decrypt_key',[c_char_p, c_int, AES_KEY_p])
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Pukall 1 Cipher
|
||||||
|
# unsigned char *PC1(const unsigned char *key, unsigned int klen, const unsigned char *src,
|
||||||
|
# unsigned char *dest, unsigned int len, int decryption);
|
||||||
|
|
||||||
|
PC1 = F(c_char_p, 'PC1', [c_char_p, c_ulong, c_char_p, c_char_p, c_ulong, c_ulong])
|
||||||
|
|
||||||
|
# Topaz Encryption
|
||||||
|
# typedef struct _TpzCtx {
|
||||||
|
# unsigned int v[2];
|
||||||
|
# } TpzCtx;
|
||||||
|
#
|
||||||
|
# void topazCryptoInit(TpzCtx *ctx, const unsigned char *key, int klen);
|
||||||
|
# void topazCryptoDecrypt(const TpzCtx *ctx, const unsigned char *in, unsigned char *out, int len);
|
||||||
|
|
||||||
|
class TPZ_CTX(Structure):
|
||||||
|
_fields_ = [('v', c_long * 2)]
|
||||||
|
|
||||||
|
TPZ_CTX_p = POINTER(TPZ_CTX)
|
||||||
|
topazCryptoInit = F(None, 'topazCryptoInit', [TPZ_CTX_p, c_char_p, c_ulong])
|
||||||
|
topazCryptoDecrypt = F(None, 'topazCryptoDecrypt', [TPZ_CTX_p, c_char_p, c_char_p, c_ulong])
|
||||||
|
|
||||||
|
|
||||||
|
class AES_CBC(object):
|
||||||
|
def __init__(self):
|
||||||
|
self._blocksize = 0
|
||||||
|
self._keyctx = None
|
||||||
|
self._iv = 0
|
||||||
|
|
||||||
|
def set_decrypt_key(self, userkey, iv):
|
||||||
|
self._blocksize = len(userkey)
|
||||||
|
if (self._blocksize != 16) and (self._blocksize != 24) and (self._blocksize != 32) :
|
||||||
|
raise Exception('AES CBC improper key used')
|
||||||
|
return
|
||||||
|
keyctx = self._keyctx = AES_KEY()
|
||||||
|
self._iv = iv
|
||||||
|
rv = AES_set_decrypt_key(userkey, len(userkey) * 8, keyctx)
|
||||||
|
if rv < 0:
|
||||||
|
raise Exception('Failed to initialize AES CBC key')
|
||||||
|
|
||||||
|
def decrypt(self, data):
|
||||||
|
out = create_string_buffer(len(data))
|
||||||
|
mutable_iv = create_string_buffer(self._iv, len(self._iv))
|
||||||
|
rv = AES_cbc_encrypt(data, out, len(data), self._keyctx, mutable_iv, 0)
|
||||||
|
if rv == 0:
|
||||||
|
raise Exception('AES CBC decryption failed')
|
||||||
|
return out.raw
|
||||||
|
|
||||||
|
class Pukall_Cipher(object):
|
||||||
|
def __init__(self):
|
||||||
|
self.key = None
|
||||||
|
|
||||||
|
def PC1(self, key, src, decryption=True):
|
||||||
|
self.key = key
|
||||||
|
out = create_string_buffer(len(src))
|
||||||
|
de = 0
|
||||||
|
if decryption:
|
||||||
|
de = 1
|
||||||
|
rv = PC1(key, len(key), src, out, len(src), de)
|
||||||
|
return out.raw
|
||||||
|
|
||||||
|
class Topaz_Cipher(object):
|
||||||
|
def __init__(self):
|
||||||
|
self._ctx = None
|
||||||
|
|
||||||
|
def ctx_init(self, key):
|
||||||
|
tpz_ctx = self._ctx = TPZ_CTX()
|
||||||
|
topazCryptoInit(tpz_ctx, key, len(key))
|
||||||
|
return tpz_ctx
|
||||||
|
|
||||||
|
def decrypt(self, data, ctx=None):
|
||||||
|
if ctx == None:
|
||||||
|
ctx = self._ctx
|
||||||
|
out = create_string_buffer(len(data))
|
||||||
|
topazCryptoDecrypt(ctx, data, out, len(data))
|
||||||
|
return out.raw
|
||||||
|
|
||||||
|
print "Using Library AlfCrypto DLL/DYLIB/SO"
|
||||||
|
return (AES_CBC, Pukall_Cipher, Topaz_Cipher)
|
||||||
|
|
||||||
|
|
||||||
|
def _load_python_alfcrypto():
|
||||||
|
|
||||||
|
import aescbc
|
||||||
|
|
||||||
|
class Pukall_Cipher(object):
|
||||||
|
def __init__(self):
|
||||||
|
self.key = None
|
||||||
|
|
||||||
|
def PC1(self, key, src, decryption=True):
|
||||||
|
sum1 = 0;
|
||||||
|
sum2 = 0;
|
||||||
|
keyXorVal = 0;
|
||||||
|
if len(key)!=16:
|
||||||
|
print "Bad key length!"
|
||||||
|
return None
|
||||||
|
wkey = []
|
||||||
|
for i in xrange(8):
|
||||||
|
wkey.append(ord(key[i*2])<<8 | ord(key[i*2+1]))
|
||||||
|
dst = ""
|
||||||
|
for i in xrange(len(src)):
|
||||||
|
temp1 = 0;
|
||||||
|
byteXorVal = 0;
|
||||||
|
for j in xrange(8):
|
||||||
|
temp1 ^= wkey[j]
|
||||||
|
sum2 = (sum2+j)*20021 + sum1
|
||||||
|
sum1 = (temp1*346)&0xFFFF
|
||||||
|
sum2 = (sum2+sum1)&0xFFFF
|
||||||
|
temp1 = (temp1*20021+1)&0xFFFF
|
||||||
|
byteXorVal ^= temp1 ^ sum2
|
||||||
|
curByte = ord(src[i])
|
||||||
|
if not decryption:
|
||||||
|
keyXorVal = curByte * 257;
|
||||||
|
curByte = ((curByte ^ (byteXorVal >> 8)) ^ byteXorVal) & 0xFF
|
||||||
|
if decryption:
|
||||||
|
keyXorVal = curByte * 257;
|
||||||
|
for j in xrange(8):
|
||||||
|
wkey[j] ^= keyXorVal;
|
||||||
|
dst+=chr(curByte)
|
||||||
|
return dst
|
||||||
|
|
||||||
|
class Topaz_Cipher(object):
|
||||||
|
def __init__(self):
|
||||||
|
self._ctx = None
|
||||||
|
|
||||||
|
def ctx_init(self, key):
|
||||||
|
ctx1 = 0x0CAFFE19E
|
||||||
|
for keyChar in key:
|
||||||
|
keyByte = ord(keyChar)
|
||||||
|
ctx2 = ctx1
|
||||||
|
ctx1 = ((((ctx1 >>2) * (ctx1 >>7))&0xFFFFFFFF) ^ (keyByte * keyByte * 0x0F902007)& 0xFFFFFFFF )
|
||||||
|
self._ctx = [ctx1, ctx2]
|
||||||
|
return [ctx1,ctx2]
|
||||||
|
|
||||||
|
def decrypt(self, data, ctx=None):
|
||||||
|
if ctx == None:
|
||||||
|
ctx = self._ctx
|
||||||
|
ctx1 = ctx[0]
|
||||||
|
ctx2 = ctx[1]
|
||||||
|
plainText = ""
|
||||||
|
for dataChar in data:
|
||||||
|
dataByte = ord(dataChar)
|
||||||
|
m = (dataByte ^ ((ctx1 >> 3) &0xFF) ^ ((ctx2<<3) & 0xFF)) &0xFF
|
||||||
|
ctx2 = ctx1
|
||||||
|
ctx1 = (((ctx1 >> 2) * (ctx1 >> 7)) &0xFFFFFFFF) ^((m * m * 0x0F902007) &0xFFFFFFFF)
|
||||||
|
plainText += chr(m)
|
||||||
|
return plainText
|
||||||
|
|
||||||
|
class AES_CBC(object):
|
||||||
|
def __init__(self):
|
||||||
|
self._key = None
|
||||||
|
self._iv = None
|
||||||
|
self.aes = None
|
||||||
|
|
||||||
|
def set_decrypt_key(self, userkey, iv):
|
||||||
|
self._key = userkey
|
||||||
|
self._iv = iv
|
||||||
|
self.aes = aescbc.AES_CBC(userkey, aescbc.noPadding(), len(userkey))
|
||||||
|
|
||||||
|
def decrypt(self, data):
|
||||||
|
iv = self._iv
|
||||||
|
cleartext = self.aes.decrypt(iv + data)
|
||||||
|
return cleartext
|
||||||
|
|
||||||
|
return (AES_CBC, Pukall_Cipher, Topaz_Cipher)
|
||||||
|
|
||||||
|
|
||||||
|
def _load_crypto():
|
||||||
|
AES_CBC = Pukall_Cipher = Topaz_Cipher = None
|
||||||
|
cryptolist = (_load_libalfcrypto, _load_python_alfcrypto)
|
||||||
|
for loader in cryptolist:
|
||||||
|
try:
|
||||||
|
AES_CBC, Pukall_Cipher, Topaz_Cipher = loader()
|
||||||
|
break
|
||||||
|
except (ImportError, Exception):
|
||||||
|
pass
|
||||||
|
return AES_CBC, Pukall_Cipher, Topaz_Cipher
|
||||||
|
|
||||||
|
AES_CBC, Pukall_Cipher, Topaz_Cipher = _load_crypto()
|
||||||
|
|
||||||
|
|
||||||
|
class KeyIVGen(object):
|
||||||
|
# this only exists in openssl so we will use pure python implementation instead
|
||||||
|
# PKCS5_PBKDF2_HMAC_SHA1 = F(c_int, 'PKCS5_PBKDF2_HMAC_SHA1',
|
||||||
|
# [c_char_p, c_ulong, c_char_p, c_ulong, c_ulong, c_ulong, c_char_p])
|
||||||
|
def pbkdf2(self, passwd, salt, iter, keylen):
|
||||||
|
|
||||||
|
def xorstr( a, b ):
|
||||||
|
if len(a) != len(b):
|
||||||
|
raise Exception("xorstr(): lengths differ")
|
||||||
|
return ''.join((chr(ord(x)^ord(y)) for x, y in zip(a, b)))
|
||||||
|
|
||||||
|
def prf( h, data ):
|
||||||
|
hm = h.copy()
|
||||||
|
hm.update( data )
|
||||||
|
return hm.digest()
|
||||||
|
|
||||||
|
def pbkdf2_F( h, salt, itercount, blocknum ):
|
||||||
|
U = prf( h, salt + pack('>i',blocknum ) )
|
||||||
|
T = U
|
||||||
|
for i in range(2, itercount+1):
|
||||||
|
U = prf( h, U )
|
||||||
|
T = xorstr( T, U )
|
||||||
|
return T
|
||||||
|
|
||||||
|
sha = hashlib.sha1
|
||||||
|
digest_size = sha().digest_size
|
||||||
|
# l - number of output blocks to produce
|
||||||
|
l = keylen / digest_size
|
||||||
|
if keylen % digest_size != 0:
|
||||||
|
l += 1
|
||||||
|
h = hmac.new( passwd, None, sha )
|
||||||
|
T = ""
|
||||||
|
for i in range(1, l+1):
|
||||||
|
T += pbkdf2_F( h, salt, iter, i )
|
||||||
|
return T[0: keylen]
|
||||||
|
|
||||||
|
|
||||||
Binary file not shown.
Binary file not shown.
@@ -20,6 +20,8 @@ import getopt
|
|||||||
from struct import pack
|
from struct import pack
|
||||||
from struct import unpack
|
from struct import unpack
|
||||||
|
|
||||||
|
class TpzDRMError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
# Get a 7 bit encoded number from string. The most
|
# Get a 7 bit encoded number from string. The most
|
||||||
# significant byte comes first and has the high bit (8th) set
|
# significant byte comes first and has the high bit (8th) set
|
||||||
@@ -32,11 +34,11 @@ def readEncodedNumber(file):
|
|||||||
data = ord(c)
|
data = ord(c)
|
||||||
|
|
||||||
if data == 0xFF:
|
if data == 0xFF:
|
||||||
flag = True
|
flag = True
|
||||||
c = file.read(1)
|
c = file.read(1)
|
||||||
if (len(c) == 0):
|
if (len(c) == 0):
|
||||||
return None
|
return None
|
||||||
data = ord(c)
|
data = ord(c)
|
||||||
|
|
||||||
if data >= 0x80:
|
if data >= 0x80:
|
||||||
datax = (data & 0x7F)
|
datax = (data & 0x7F)
|
||||||
@@ -49,7 +51,7 @@ def readEncodedNumber(file):
|
|||||||
data = datax
|
data = datax
|
||||||
|
|
||||||
if flag:
|
if flag:
|
||||||
data = -data
|
data = -data
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
|
||||||
@@ -57,29 +59,29 @@ def readEncodedNumber(file):
|
|||||||
# most significant byte first which has the high bit set
|
# most significant byte first which has the high bit set
|
||||||
|
|
||||||
def encodeNumber(number):
|
def encodeNumber(number):
|
||||||
result = ""
|
result = ""
|
||||||
negative = False
|
negative = False
|
||||||
flag = 0
|
flag = 0
|
||||||
|
|
||||||
if number < 0 :
|
if number < 0 :
|
||||||
number = -number + 1
|
number = -number + 1
|
||||||
negative = True
|
negative = True
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
byte = number & 0x7F
|
byte = number & 0x7F
|
||||||
number = number >> 7
|
number = number >> 7
|
||||||
byte += flag
|
byte += flag
|
||||||
result += chr(byte)
|
result += chr(byte)
|
||||||
flag = 0x80
|
flag = 0x80
|
||||||
if number == 0 :
|
if number == 0 :
|
||||||
if (byte == 0xFF and negative == False) :
|
if (byte == 0xFF and negative == False) :
|
||||||
result += chr(0x80)
|
result += chr(0x80)
|
||||||
break
|
break
|
||||||
|
|
||||||
if negative:
|
if negative:
|
||||||
result += chr(0xFF)
|
result += chr(0xFF)
|
||||||
|
|
||||||
return result[::-1]
|
return result[::-1]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -138,7 +140,8 @@ class Dictionary(object):
|
|||||||
return self.stable[self.pos]
|
return self.stable[self.pos]
|
||||||
else:
|
else:
|
||||||
print "Error - %d outside of string table limits" % val
|
print "Error - %d outside of string table limits" % val
|
||||||
sys.exit(-1)
|
raise TpzDRMError('outside of string table limits')
|
||||||
|
# sys.exit(-1)
|
||||||
|
|
||||||
def getSize(self):
|
def getSize(self):
|
||||||
return self.size
|
return self.size
|
||||||
@@ -235,6 +238,7 @@ class PageParser(object):
|
|||||||
|
|
||||||
'group' : (1, 'snippets', 1, 0),
|
'group' : (1, 'snippets', 1, 0),
|
||||||
'group.type' : (1, 'scalar_text', 0, 0),
|
'group.type' : (1, 'scalar_text', 0, 0),
|
||||||
|
'group._tag' : (1, 'scalar_text', 0, 0),
|
||||||
|
|
||||||
'region' : (1, 'snippets', 1, 0),
|
'region' : (1, 'snippets', 1, 0),
|
||||||
'region.type' : (1, 'scalar_text', 0, 0),
|
'region.type' : (1, 'scalar_text', 0, 0),
|
||||||
@@ -257,6 +261,13 @@ class PageParser(object):
|
|||||||
'paragraph.class' : (1, 'scalar_text', 0, 0),
|
'paragraph.class' : (1, 'scalar_text', 0, 0),
|
||||||
'paragraph.firstWord' : (1, 'scalar_number', 0, 0),
|
'paragraph.firstWord' : (1, 'scalar_number', 0, 0),
|
||||||
'paragraph.lastWord' : (1, 'scalar_number', 0, 0),
|
'paragraph.lastWord' : (1, 'scalar_number', 0, 0),
|
||||||
|
'paragraph.lastWord' : (1, 'scalar_number', 0, 0),
|
||||||
|
'paragraph.gridSize' : (1, 'scalar_number', 0, 0),
|
||||||
|
'paragraph.gridBottomCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'paragraph.gridTopCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'paragraph.gridBeginCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'paragraph.gridEndCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
|
||||||
|
|
||||||
'word_semantic' : (1, 'snippets', 1, 1),
|
'word_semantic' : (1, 'snippets', 1, 1),
|
||||||
'word_semantic.type' : (1, 'scalar_text', 0, 0),
|
'word_semantic.type' : (1, 'scalar_text', 0, 0),
|
||||||
@@ -271,11 +282,21 @@ class PageParser(object):
|
|||||||
|
|
||||||
'_span' : (1, 'snippets', 1, 0),
|
'_span' : (1, 'snippets', 1, 0),
|
||||||
'_span.firstWord' : (1, 'scalar_number', 0, 0),
|
'_span.firstWord' : (1, 'scalar_number', 0, 0),
|
||||||
'-span.lastWord' : (1, 'scalar_number', 0, 0),
|
'_span.lastWord' : (1, 'scalar_number', 0, 0),
|
||||||
|
'_span.gridSize' : (1, 'scalar_number', 0, 0),
|
||||||
|
'_span.gridBottomCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'_span.gridTopCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'_span.gridBeginCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'_span.gridEndCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
|
||||||
'span' : (1, 'snippets', 1, 0),
|
'span' : (1, 'snippets', 1, 0),
|
||||||
'span.firstWord' : (1, 'scalar_number', 0, 0),
|
'span.firstWord' : (1, 'scalar_number', 0, 0),
|
||||||
'span.lastWord' : (1, 'scalar_number', 0, 0),
|
'span.lastWord' : (1, 'scalar_number', 0, 0),
|
||||||
|
'span.gridSize' : (1, 'scalar_number', 0, 0),
|
||||||
|
'span.gridBottomCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'span.gridTopCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'span.gridBeginCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
'span.gridEndCenter' : (1, 'scalar_number', 0, 0),
|
||||||
|
|
||||||
'extratokens' : (1, 'snippets', 1, 0),
|
'extratokens' : (1, 'snippets', 1, 0),
|
||||||
'extratokens.type' : (1, 'scalar_text', 0, 0),
|
'extratokens.type' : (1, 'scalar_text', 0, 0),
|
||||||
@@ -594,65 +615,70 @@ class PageParser(object):
|
|||||||
nodename = fullpathname.pop()
|
nodename = fullpathname.pop()
|
||||||
ilvl = len(fullpathname)
|
ilvl = len(fullpathname)
|
||||||
indent = ' ' * (3 * ilvl)
|
indent = ' ' * (3 * ilvl)
|
||||||
result = indent + '<' + nodename + '>'
|
rlst = []
|
||||||
|
rlst.append(indent + '<' + nodename + '>')
|
||||||
if len(argList) > 0:
|
if len(argList) > 0:
|
||||||
argres = ''
|
alst = []
|
||||||
for j in argList:
|
for j in argList:
|
||||||
if (argtype == 'text') or (argtype == 'scalar_text') :
|
if (argtype == 'text') or (argtype == 'scalar_text') :
|
||||||
argres += j + '|'
|
alst.append(j + '|')
|
||||||
else :
|
else :
|
||||||
argres += str(j) + ','
|
alst.append(str(j) + ',')
|
||||||
|
argres = "".join(alst)
|
||||||
argres = argres[0:-1]
|
argres = argres[0:-1]
|
||||||
if argtype == 'snippets' :
|
if argtype == 'snippets' :
|
||||||
result += 'snippets:' + argres
|
rlst.append('snippets:' + argres)
|
||||||
else :
|
else :
|
||||||
result += argres
|
rlst.append(argres)
|
||||||
if len(subtagList) > 0 :
|
if len(subtagList) > 0 :
|
||||||
result += '\n'
|
rlst.append('\n')
|
||||||
for j in subtagList:
|
for j in subtagList:
|
||||||
if len(j) > 0 :
|
if len(j) > 0 :
|
||||||
result += self.formatTag(j)
|
rlst.append(self.formatTag(j))
|
||||||
result += indent + '</' + nodename + '>\n'
|
rlst.append(indent + '</' + nodename + '>\n')
|
||||||
else:
|
else:
|
||||||
result += '</' + nodename + '>\n'
|
rlst.append('</' + nodename + '>\n')
|
||||||
return result
|
return "".join(rlst)
|
||||||
|
|
||||||
|
|
||||||
# flatten tag
|
# flatten tag
|
||||||
def flattenTag(self, node):
|
def flattenTag(self, node):
|
||||||
name = node[0]
|
name = node[0]
|
||||||
subtagList = node[1]
|
subtagList = node[1]
|
||||||
argtype = node[2]
|
argtype = node[2]
|
||||||
argList = node[3]
|
argList = node[3]
|
||||||
result = name
|
rlst = []
|
||||||
|
rlst.append(name)
|
||||||
if (len(argList) > 0):
|
if (len(argList) > 0):
|
||||||
argres = ''
|
alst = []
|
||||||
for j in argList:
|
for j in argList:
|
||||||
if (argtype == 'text') or (argtype == 'scalar_text') :
|
if (argtype == 'text') or (argtype == 'scalar_text') :
|
||||||
argres += j + '|'
|
alst.append(j + '|')
|
||||||
else :
|
else :
|
||||||
argres += str(j) + '|'
|
alst.append(str(j) + '|')
|
||||||
|
argres = "".join(alst)
|
||||||
argres = argres[0:-1]
|
argres = argres[0:-1]
|
||||||
if argtype == 'snippets' :
|
if argtype == 'snippets' :
|
||||||
result += '.snippets=' + argres
|
rlst.append('.snippets=' + argres)
|
||||||
else :
|
else :
|
||||||
result += '=' + argres
|
rlst.append('=' + argres)
|
||||||
result += '\n'
|
rlst.append('\n')
|
||||||
for j in subtagList:
|
for j in subtagList:
|
||||||
if len(j) > 0 :
|
if len(j) > 0 :
|
||||||
result += self.flattenTag(j)
|
rlst.append(self.flattenTag(j))
|
||||||
return result
|
return "".join(rlst)
|
||||||
|
|
||||||
|
|
||||||
# reduce create xml output
|
# reduce create xml output
|
||||||
def formatDoc(self, flat_xml):
|
def formatDoc(self, flat_xml):
|
||||||
result = ''
|
rlst = []
|
||||||
for j in self.doc :
|
for j in self.doc :
|
||||||
if len(j) > 0:
|
if len(j) > 0:
|
||||||
if flat_xml:
|
if flat_xml:
|
||||||
result += self.flattenTag(j)
|
rlst.append(self.flattenTag(j))
|
||||||
else:
|
else:
|
||||||
result += self.formatTag(j)
|
rlst.append(self.formatTag(j))
|
||||||
|
result = "".join(rlst)
|
||||||
if self.debug : print result
|
if self.debug : print result
|
||||||
return result
|
return result
|
||||||
|
|
||||||
@@ -0,0 +1,88 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
|
||||||
|
|
||||||
|
class Unbuffered:
|
||||||
|
def __init__(self, stream):
|
||||||
|
self.stream = stream
|
||||||
|
def write(self, data):
|
||||||
|
self.stream.write(data)
|
||||||
|
self.stream.flush()
|
||||||
|
def __getattr__(self, attr):
|
||||||
|
return getattr(self.stream, attr)
|
||||||
|
|
||||||
|
import sys
|
||||||
|
sys.stdout=Unbuffered(sys.stdout)
|
||||||
|
import os
|
||||||
|
|
||||||
|
import ineptepub
|
||||||
|
import ignobleepub
|
||||||
|
import zipfix
|
||||||
|
import re
|
||||||
|
|
||||||
|
def main(argv=sys.argv):
|
||||||
|
args = argv[1:]
|
||||||
|
if len(args) != 3:
|
||||||
|
return -1
|
||||||
|
infile = args[0]
|
||||||
|
outdir = args[1]
|
||||||
|
rscpath = args[2]
|
||||||
|
errlog = ''
|
||||||
|
|
||||||
|
# first fix the epub to make sure we do not get errors
|
||||||
|
name, ext = os.path.splitext(os.path.basename(infile))
|
||||||
|
bpath = os.path.dirname(infile)
|
||||||
|
zippath = os.path.join(bpath,name + '_temp.zip')
|
||||||
|
rv = zipfix.repairBook(infile, zippath)
|
||||||
|
if rv != 0:
|
||||||
|
print "Error while trying to fix epub"
|
||||||
|
return rv
|
||||||
|
|
||||||
|
# determine a good name for the output file
|
||||||
|
outfile = os.path.join(outdir, name + '_nodrm.epub')
|
||||||
|
|
||||||
|
rv = 1
|
||||||
|
# first try with the Adobe adept epub
|
||||||
|
# try with any keyfiles (*.der) in the rscpath
|
||||||
|
files = os.listdir(rscpath)
|
||||||
|
filefilter = re.compile("\.der$", re.IGNORECASE)
|
||||||
|
files = filter(filefilter.search, files)
|
||||||
|
if files:
|
||||||
|
for filename in files:
|
||||||
|
keypath = os.path.join(rscpath, filename)
|
||||||
|
try:
|
||||||
|
rv = ineptepub.decryptBook(keypath, zippath, outfile)
|
||||||
|
if rv == 0:
|
||||||
|
break
|
||||||
|
except Exception, e:
|
||||||
|
errlog += str(e)
|
||||||
|
rv = 1
|
||||||
|
pass
|
||||||
|
if rv == 0:
|
||||||
|
os.remove(zippath)
|
||||||
|
return 0
|
||||||
|
|
||||||
|
# still no luck
|
||||||
|
# now try with ignoble epub
|
||||||
|
# try with any keyfiles (*.b64) in the rscpath
|
||||||
|
files = os.listdir(rscpath)
|
||||||
|
filefilter = re.compile("\.b64$", re.IGNORECASE)
|
||||||
|
files = filter(filefilter.search, files)
|
||||||
|
if files:
|
||||||
|
for filename in files:
|
||||||
|
keypath = os.path.join(rscpath, filename)
|
||||||
|
try:
|
||||||
|
rv = ignobleepub.decryptBook(keypath, zippath, outfile)
|
||||||
|
if rv == 0:
|
||||||
|
break
|
||||||
|
except Exception, e:
|
||||||
|
errlog += str(e)
|
||||||
|
rv = 1
|
||||||
|
pass
|
||||||
|
os.remove(zippath)
|
||||||
|
if rv != 0:
|
||||||
|
print errlog
|
||||||
|
return rv
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
sys.exit(main())
|
||||||
@@ -0,0 +1,45 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
|
||||||
|
|
||||||
|
class Unbuffered:
|
||||||
|
def __init__(self, stream):
|
||||||
|
self.stream = stream
|
||||||
|
def write(self, data):
|
||||||
|
self.stream.write(data)
|
||||||
|
self.stream.flush()
|
||||||
|
def __getattr__(self, attr):
|
||||||
|
return getattr(self.stream, attr)
|
||||||
|
|
||||||
|
import sys
|
||||||
|
sys.stdout=Unbuffered(sys.stdout)
|
||||||
|
import os
|
||||||
|
|
||||||
|
import erdr2pml
|
||||||
|
|
||||||
|
def main(argv=sys.argv):
|
||||||
|
args = argv[1:]
|
||||||
|
if len(args) != 3:
|
||||||
|
return -1
|
||||||
|
infile = args[0]
|
||||||
|
outdir = args[1]
|
||||||
|
rscpath = args[2]
|
||||||
|
rv = 1
|
||||||
|
socialpath = os.path.join(rscpath,'sdrmlist.txt')
|
||||||
|
if os.path.exists(socialpath):
|
||||||
|
keydata = file(socialpath,'r').read()
|
||||||
|
keydata = keydata.rstrip(os.linesep)
|
||||||
|
ar = keydata.split(',')
|
||||||
|
for i in ar:
|
||||||
|
try:
|
||||||
|
name, cc8 = i.split(':')
|
||||||
|
except ValueError:
|
||||||
|
print ' Error parsing user supplied social drm data.'
|
||||||
|
return 1
|
||||||
|
rv = erdr2pml.decryptBook(infile, outdir, name, cc8, True)
|
||||||
|
if rv == 0:
|
||||||
|
break
|
||||||
|
return rv
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
sys.exit(main())
|
||||||
@@ -0,0 +1,54 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
|
||||||
|
|
||||||
|
class Unbuffered:
|
||||||
|
def __init__(self, stream):
|
||||||
|
self.stream = stream
|
||||||
|
def write(self, data):
|
||||||
|
self.stream.write(data)
|
||||||
|
self.stream.flush()
|
||||||
|
def __getattr__(self, attr):
|
||||||
|
return getattr(self.stream, attr)
|
||||||
|
|
||||||
|
import sys
|
||||||
|
sys.stdout=Unbuffered(sys.stdout)
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import ineptpdf
|
||||||
|
|
||||||
|
def main(argv=sys.argv):
|
||||||
|
args = argv[1:]
|
||||||
|
if len(args) != 3:
|
||||||
|
return -1
|
||||||
|
infile = args[0]
|
||||||
|
outdir = args[1]
|
||||||
|
rscpath = args[2]
|
||||||
|
errlog = ''
|
||||||
|
rv = 1
|
||||||
|
|
||||||
|
# determine a good name for the output file
|
||||||
|
name, ext = os.path.splitext(os.path.basename(infile))
|
||||||
|
outfile = os.path.join(outdir, name + '_nodrm.pdf')
|
||||||
|
|
||||||
|
# try with any keyfiles (*.der) in the rscpath
|
||||||
|
files = os.listdir(rscpath)
|
||||||
|
filefilter = re.compile("\.der$", re.IGNORECASE)
|
||||||
|
files = filter(filefilter.search, files)
|
||||||
|
if files:
|
||||||
|
for filename in files:
|
||||||
|
keypath = os.path.join(rscpath, filename)
|
||||||
|
try:
|
||||||
|
rv = ineptpdf.decryptBook(keypath, infile, outfile)
|
||||||
|
if rv == 0:
|
||||||
|
break
|
||||||
|
except Exception, e:
|
||||||
|
errlog += str(e)
|
||||||
|
rv = 1
|
||||||
|
pass
|
||||||
|
if rv != 0:
|
||||||
|
print errlog
|
||||||
|
return rv
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
sys.exit(main())
|
||||||
@@ -57,8 +57,13 @@
|
|||||||
# 0.16 - convert to use openssl DES (very very fast) or pure python DES if openssl's libcrypto is not available
|
# 0.16 - convert to use openssl DES (very very fast) or pure python DES if openssl's libcrypto is not available
|
||||||
# 0.17 - added support for pycrypto's DES as well
|
# 0.17 - added support for pycrypto's DES as well
|
||||||
# 0.18 - on Windows try PyCrypto first and OpenSSL next
|
# 0.18 - on Windows try PyCrypto first and OpenSSL next
|
||||||
|
# 0.19 - Modify the interface to allow use of import
|
||||||
|
# 0.20 - modify to allow use inside new interface for calibre plugins
|
||||||
|
# 0.21 - Support eReader (drm) version 11.
|
||||||
|
# - Don't reject dictionary format.
|
||||||
|
# - Ignore sidebars for dictionaries (different format?)
|
||||||
|
|
||||||
__version__='0.18'
|
__version__='0.21'
|
||||||
|
|
||||||
class Unbuffered:
|
class Unbuffered:
|
||||||
def __init__(self, stream):
|
def __init__(self, stream):
|
||||||
@@ -70,32 +75,50 @@ class Unbuffered:
|
|||||||
return getattr(self.stream, attr)
|
return getattr(self.stream, attr)
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
sys.stdout=Unbuffered(sys.stdout)
|
|
||||||
|
|
||||||
import struct, binascii, getopt, zlib, os, os.path, urllib, tempfile
|
import struct, binascii, getopt, zlib, os, os.path, urllib, tempfile
|
||||||
|
|
||||||
|
if 'calibre' in sys.modules:
|
||||||
|
inCalibre = True
|
||||||
|
else:
|
||||||
|
inCalibre = False
|
||||||
|
|
||||||
Des = None
|
Des = None
|
||||||
if sys.platform.startswith('win'):
|
if sys.platform.startswith('win'):
|
||||||
# first try with pycrypto
|
# first try with pycrypto
|
||||||
import pycrypto_des
|
if inCalibre:
|
||||||
|
from calibre_plugins.erdrpdb2pml import pycrypto_des
|
||||||
|
else:
|
||||||
|
import pycrypto_des
|
||||||
Des = pycrypto_des.load_pycrypto()
|
Des = pycrypto_des.load_pycrypto()
|
||||||
if Des == None:
|
if Des == None:
|
||||||
# they try with openssl
|
# they try with openssl
|
||||||
import openssl_des
|
if inCalibre:
|
||||||
|
from calibre_plugins.erdrpdb2pml import openssl_des
|
||||||
|
else:
|
||||||
|
import openssl_des
|
||||||
Des = openssl_des.load_libcrypto()
|
Des = openssl_des.load_libcrypto()
|
||||||
else:
|
else:
|
||||||
# first try with openssl
|
# first try with openssl
|
||||||
import openssl_des
|
if inCalibre:
|
||||||
|
from calibre_plugins.erdrpdb2pml import openssl_des
|
||||||
|
else:
|
||||||
|
import openssl_des
|
||||||
Des = openssl_des.load_libcrypto()
|
Des = openssl_des.load_libcrypto()
|
||||||
if Des == None:
|
if Des == None:
|
||||||
# then try with pycrypto
|
# then try with pycrypto
|
||||||
import pycrypto_des
|
if inCalibre:
|
||||||
|
from calibre_plugins.erdrpdb2pml import pycrypto_des
|
||||||
|
else:
|
||||||
|
import pycrypto_des
|
||||||
Des = pycrypto_des.load_pycrypto()
|
Des = pycrypto_des.load_pycrypto()
|
||||||
|
|
||||||
# if that did not work then use pure python implementation
|
# if that did not work then use pure python implementation
|
||||||
# of DES and try to speed it up with Psycho
|
# of DES and try to speed it up with Psycho
|
||||||
if Des == None:
|
if Des == None:
|
||||||
import python_des
|
if inCalibre:
|
||||||
|
from calibre_plugins.erdrpdb2pml import python_des
|
||||||
|
else:
|
||||||
|
import python_des
|
||||||
Des = python_des.Des
|
Des = python_des.Des
|
||||||
# Import Psyco if available
|
# Import Psyco if available
|
||||||
try:
|
try:
|
||||||
@@ -111,19 +134,27 @@ except ImportError:
|
|||||||
# older Python release
|
# older Python release
|
||||||
import sha
|
import sha
|
||||||
sha1 = lambda s: sha.new(s)
|
sha1 = lambda s: sha.new(s)
|
||||||
|
|
||||||
import cgi
|
import cgi
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
logging.basicConfig()
|
logging.basicConfig()
|
||||||
#logging.basicConfig(level=logging.DEBUG)
|
#logging.basicConfig(level=logging.DEBUG)
|
||||||
|
|
||||||
|
|
||||||
class Sectionizer(object):
|
class Sectionizer(object):
|
||||||
|
bkType = "Book"
|
||||||
|
|
||||||
def __init__(self, filename, ident):
|
def __init__(self, filename, ident):
|
||||||
self.contents = file(filename, 'rb').read()
|
self.contents = file(filename, 'rb').read()
|
||||||
self.header = self.contents[0:72]
|
self.header = self.contents[0:72]
|
||||||
self.num_sections, = struct.unpack('>H', self.contents[76:78])
|
self.num_sections, = struct.unpack('>H', self.contents[76:78])
|
||||||
|
# Dictionary or normal content (TODO: Not hard-coded)
|
||||||
if self.header[0x3C:0x3C+8] != ident:
|
if self.header[0x3C:0x3C+8] != ident:
|
||||||
raise ValueError('Invalid file format')
|
if self.header[0x3C:0x3C+8] == "PDctPPrs":
|
||||||
|
self.bkType = "Dict"
|
||||||
|
else:
|
||||||
|
raise ValueError('Invalid file format')
|
||||||
self.sections = []
|
self.sections = []
|
||||||
for i in xrange(self.num_sections):
|
for i in xrange(self.num_sections):
|
||||||
offset, a1,a2,a3,a4 = struct.unpack('>LBBBB', self.contents[78+i*8:78+i*8+8])
|
offset, a1,a2,a3,a4 = struct.unpack('>LBBBB', self.contents[78+i*8:78+i*8+8])
|
||||||
@@ -147,7 +178,7 @@ def sanitizeFileName(s):
|
|||||||
def fixKey(key):
|
def fixKey(key):
|
||||||
def fixByte(b):
|
def fixByte(b):
|
||||||
return b ^ ((b ^ (b<<1) ^ (b<<2) ^ (b<<3) ^ (b<<4) ^ (b<<5) ^ (b<<6) ^ (b<<7) ^ 0x80) & 0x80)
|
return b ^ ((b ^ (b<<1) ^ (b<<2) ^ (b<<3) ^ (b<<4) ^ (b<<5) ^ (b<<6) ^ (b<<7) ^ 0x80) & 0x80)
|
||||||
return "".join([chr(fixByte(ord(a))) for a in key])
|
return "".join([chr(fixByte(ord(a))) for a in key])
|
||||||
|
|
||||||
def deXOR(text, sp, table):
|
def deXOR(text, sp, table):
|
||||||
r=''
|
r=''
|
||||||
@@ -160,15 +191,15 @@ def deXOR(text, sp, table):
|
|||||||
return r
|
return r
|
||||||
|
|
||||||
class EreaderProcessor(object):
|
class EreaderProcessor(object):
|
||||||
def __init__(self, section_reader, username, creditcard):
|
def __init__(self, sect, username, creditcard):
|
||||||
self.section_reader = section_reader
|
self.section_reader = sect.loadSection
|
||||||
data = section_reader(0)
|
data = self.section_reader(0)
|
||||||
version, = struct.unpack('>H', data[0:2])
|
version, = struct.unpack('>H', data[0:2])
|
||||||
self.version = version
|
self.version = version
|
||||||
logging.info('eReader file format version %s', version)
|
logging.info('eReader file format version %s', version)
|
||||||
if version != 272 and version != 260 and version != 259:
|
if version != 272 and version != 260 and version != 259:
|
||||||
raise ValueError('incorrect eReader version %d (error 1)' % version)
|
raise ValueError('incorrect eReader version %d (error 1)' % version)
|
||||||
data = section_reader(1)
|
data = self.section_reader(1)
|
||||||
self.data = data
|
self.data = data
|
||||||
des = Des(fixKey(data[0:8]))
|
des = Des(fixKey(data[0:8]))
|
||||||
cookie_shuf, cookie_size = struct.unpack('>LL', des.decrypt(data[-8:]))
|
cookie_shuf, cookie_size = struct.unpack('>LL', des.decrypt(data[-8:]))
|
||||||
@@ -181,7 +212,7 @@ class EreaderProcessor(object):
|
|||||||
for i in xrange(len(data)):
|
for i in xrange(len(data)):
|
||||||
j = (j + shuf) % len(data)
|
j = (j + shuf) % len(data)
|
||||||
r[j] = data[i]
|
r[j] = data[i]
|
||||||
assert len("".join(r)) == len(data)
|
assert len("".join(r)) == len(data)
|
||||||
return "".join(r)
|
return "".join(r)
|
||||||
r = unshuff(input[0:-8], cookie_shuf)
|
r = unshuff(input[0:-8], cookie_shuf)
|
||||||
|
|
||||||
@@ -197,11 +228,17 @@ class EreaderProcessor(object):
|
|||||||
self.num_text_pages = struct.unpack('>H', r[2:4])[0] - 1
|
self.num_text_pages = struct.unpack('>H', r[2:4])[0] - 1
|
||||||
self.num_image_pages = struct.unpack('>H', r[26:26+2])[0]
|
self.num_image_pages = struct.unpack('>H', r[26:26+2])[0]
|
||||||
self.first_image_page = struct.unpack('>H', r[24:24+2])[0]
|
self.first_image_page = struct.unpack('>H', r[24:24+2])[0]
|
||||||
|
# Default values
|
||||||
|
self.num_footnote_pages = 0
|
||||||
|
self.num_sidebar_pages = 0
|
||||||
|
self.first_footnote_page = -1
|
||||||
|
self.first_sidebar_page = -1
|
||||||
if self.version == 272:
|
if self.version == 272:
|
||||||
self.num_footnote_pages = struct.unpack('>H', r[46:46+2])[0]
|
self.num_footnote_pages = struct.unpack('>H', r[46:46+2])[0]
|
||||||
self.first_footnote_page = struct.unpack('>H', r[44:44+2])[0]
|
self.first_footnote_page = struct.unpack('>H', r[44:44+2])[0]
|
||||||
self.num_sidebar_pages = struct.unpack('>H', r[38:38+2])[0]
|
if (sect.bkType == "Book"):
|
||||||
self.first_sidebar_page = struct.unpack('>H', r[36:36+2])[0]
|
self.num_sidebar_pages = struct.unpack('>H', r[38:38+2])[0]
|
||||||
|
self.first_sidebar_page = struct.unpack('>H', r[36:36+2])[0]
|
||||||
# self.num_bookinfo_pages = struct.unpack('>H', r[34:34+2])[0]
|
# self.num_bookinfo_pages = struct.unpack('>H', r[34:34+2])[0]
|
||||||
# self.first_bookinfo_page = struct.unpack('>H', r[32:32+2])[0]
|
# self.first_bookinfo_page = struct.unpack('>H', r[32:32+2])[0]
|
||||||
# self.num_chapter_pages = struct.unpack('>H', r[22:22+2])[0]
|
# self.num_chapter_pages = struct.unpack('>H', r[22:22+2])[0]
|
||||||
@@ -217,10 +254,8 @@ class EreaderProcessor(object):
|
|||||||
self.xortable_size = struct.unpack('>H', r[42:42+2])[0]
|
self.xortable_size = struct.unpack('>H', r[42:42+2])[0]
|
||||||
self.xortable = self.data[self.xortable_offset:self.xortable_offset + self.xortable_size]
|
self.xortable = self.data[self.xortable_offset:self.xortable_offset + self.xortable_size]
|
||||||
else:
|
else:
|
||||||
self.num_footnote_pages = 0
|
# Nothing needs to be done
|
||||||
self.num_sidebar_pages = 0
|
pass
|
||||||
self.first_footnote_page = -1
|
|
||||||
self.first_sidebar_page = -1
|
|
||||||
# self.num_bookinfo_pages = 0
|
# self.num_bookinfo_pages = 0
|
||||||
# self.num_chapter_pages = 0
|
# self.num_chapter_pages = 0
|
||||||
# self.num_link_pages = 0
|
# self.num_link_pages = 0
|
||||||
@@ -245,10 +280,14 @@ class EreaderProcessor(object):
|
|||||||
encrypted_key_sha = r[44:44+20]
|
encrypted_key_sha = r[44:44+20]
|
||||||
encrypted_key = r[64:64+8]
|
encrypted_key = r[64:64+8]
|
||||||
elif version == 260:
|
elif version == 260:
|
||||||
if drm_sub_version != 13:
|
if drm_sub_version != 13 and drm_sub_version != 11:
|
||||||
raise ValueError('incorrect eReader version %d (error 3)' % drm_sub_version)
|
raise ValueError('incorrect eReader version %d (error 3)' % drm_sub_version)
|
||||||
encrypted_key = r[44:44+8]
|
if drm_sub_version == 13:
|
||||||
encrypted_key_sha = r[52:52+20]
|
encrypted_key = r[44:44+8]
|
||||||
|
encrypted_key_sha = r[52:52+20]
|
||||||
|
else:
|
||||||
|
encrypted_key = r[64:64+8]
|
||||||
|
encrypted_key_sha = r[44:44+20]
|
||||||
elif version == 272:
|
elif version == 272:
|
||||||
encrypted_key = r[172:172+8]
|
encrypted_key = r[172:172+8]
|
||||||
encrypted_key_sha = r[56:56+20]
|
encrypted_key_sha = r[56:56+20]
|
||||||
@@ -334,6 +373,12 @@ class EreaderProcessor(object):
|
|||||||
r += fmarker
|
r += fmarker
|
||||||
fnote_ids = fnote_ids[id_len+4:]
|
fnote_ids = fnote_ids[id_len+4:]
|
||||||
|
|
||||||
|
# TODO: Handle dictionary index (?) pages - which are also marked as
|
||||||
|
# sidebar_pages (?). For now dictionary sidebars are ignored
|
||||||
|
# For dictionaries - record 0 is null terminated strings, followed by
|
||||||
|
# blocks of around 62000 bytes and a final block. Not sure of the
|
||||||
|
# encoding
|
||||||
|
|
||||||
# now handle sidebar pages
|
# now handle sidebar pages
|
||||||
if self.num_sidebar_pages > 0:
|
if self.num_sidebar_pages > 0:
|
||||||
r += '\n'
|
r += '\n'
|
||||||
@@ -346,7 +391,7 @@ class EreaderProcessor(object):
|
|||||||
id_len = ord(sbar_ids[2])
|
id_len = ord(sbar_ids[2])
|
||||||
id = sbar_ids[3:3+id_len]
|
id = sbar_ids[3:3+id_len]
|
||||||
smarker = '<sidebar id="%s">\n' % id
|
smarker = '<sidebar id="%s">\n' % id
|
||||||
smarker += zlib.decompress(des.decrypt(self.section_reader(self.first_footnote_page + i)))
|
smarker += zlib.decompress(des.decrypt(self.section_reader(self.first_sidebar_page + i)))
|
||||||
smarker += '\n</sidebar>\n'
|
smarker += '\n</sidebar>\n'
|
||||||
r += smarker
|
r += smarker
|
||||||
sbar_ids = sbar_ids[id_len+4:]
|
sbar_ids = sbar_ids[id_len+4:]
|
||||||
@@ -354,20 +399,20 @@ class EreaderProcessor(object):
|
|||||||
return r
|
return r
|
||||||
|
|
||||||
def cleanPML(pml):
|
def cleanPML(pml):
|
||||||
# Convert special characters to proper PML code. High ASCII start at (\x80, \a128) and go up to (\xff, \a255)
|
# Convert special characters to proper PML code. High ASCII start at (\x80, \a128) and go up to (\xff, \a255)
|
||||||
pml2 = pml
|
pml2 = pml
|
||||||
for k in xrange(128,256):
|
for k in xrange(128,256):
|
||||||
badChar = chr(k)
|
badChar = chr(k)
|
||||||
pml2 = pml2.replace(badChar, '\\a%03d' % k)
|
pml2 = pml2.replace(badChar, '\\a%03d' % k)
|
||||||
return pml2
|
return pml2
|
||||||
|
|
||||||
def convertEreaderToPml(infile, name, cc, outdir):
|
def convertEreaderToPml(infile, name, cc, outdir):
|
||||||
if not os.path.exists(outdir):
|
if not os.path.exists(outdir):
|
||||||
os.makedirs(outdir)
|
os.makedirs(outdir)
|
||||||
|
bookname = os.path.splitext(os.path.basename(infile))[0]
|
||||||
print " Decoding File"
|
print " Decoding File"
|
||||||
sect = Sectionizer(infile, 'PNRdPPrs')
|
sect = Sectionizer(infile, 'PNRdPPrs')
|
||||||
er = EreaderProcessor(sect.loadSection, name, cc)
|
er = EreaderProcessor(sect, name, cc)
|
||||||
|
|
||||||
if er.getNumImages() > 0:
|
if er.getNumImages() > 0:
|
||||||
print " Extracting images"
|
print " Extracting images"
|
||||||
@@ -390,6 +435,47 @@ def convertEreaderToPml(infile, name, cc, outdir):
|
|||||||
# file(os.path.join(outdir, 'bookinfo.txt'),'wb').write(bkinfo)
|
# file(os.path.join(outdir, 'bookinfo.txt'),'wb').write(bkinfo)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def decryptBook(infile, outdir, name, cc, make_pmlz):
|
||||||
|
if make_pmlz :
|
||||||
|
# ignore specified outdir, use tempdir instead
|
||||||
|
outdir = tempfile.mkdtemp()
|
||||||
|
try:
|
||||||
|
print "Processing..."
|
||||||
|
convertEreaderToPml(infile, name, cc, outdir)
|
||||||
|
if make_pmlz :
|
||||||
|
import zipfile
|
||||||
|
import shutil
|
||||||
|
print " Creating PMLZ file"
|
||||||
|
zipname = infile[:-4] + '.pmlz'
|
||||||
|
myZipFile = zipfile.ZipFile(zipname,'w',zipfile.ZIP_STORED, False)
|
||||||
|
list = os.listdir(outdir)
|
||||||
|
for file in list:
|
||||||
|
localname = file
|
||||||
|
filePath = os.path.join(outdir,file)
|
||||||
|
if os.path.isfile(filePath):
|
||||||
|
myZipFile.write(filePath, localname)
|
||||||
|
elif os.path.isdir(filePath):
|
||||||
|
imageList = os.listdir(filePath)
|
||||||
|
localimgdir = os.path.basename(filePath)
|
||||||
|
for image in imageList:
|
||||||
|
localname = os.path.join(localimgdir,image)
|
||||||
|
imagePath = os.path.join(filePath,image)
|
||||||
|
if os.path.isfile(imagePath):
|
||||||
|
myZipFile.write(imagePath, localname)
|
||||||
|
myZipFile.close()
|
||||||
|
# remove temporary directory
|
||||||
|
shutil.rmtree(outdir, True)
|
||||||
|
print 'output is %s' % zipname
|
||||||
|
else :
|
||||||
|
print 'output in %s' % outdir
|
||||||
|
print "done"
|
||||||
|
except ValueError, e:
|
||||||
|
print "Error: %s" % e
|
||||||
|
return 1
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
def usage():
|
def usage():
|
||||||
print "Converts DRMed eReader books to PML Source"
|
print "Converts DRMed eReader books to PML Source"
|
||||||
print "Usage:"
|
print "Usage:"
|
||||||
@@ -404,8 +490,8 @@ def usage():
|
|||||||
print " It's enough to enter the last 8 digits of the credit card number"
|
print " It's enough to enter the last 8 digits of the credit card number"
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|
||||||
def main(argv=None):
|
def main(argv=None):
|
||||||
global bookname
|
|
||||||
try:
|
try:
|
||||||
opts, args = getopt.getopt(sys.argv[1:], "h", ["make-pmlz"])
|
opts, args = getopt.getopt(sys.argv[1:], "h", ["make-pmlz"])
|
||||||
except getopt.GetoptError, err:
|
except getopt.GetoptError, err:
|
||||||
@@ -413,76 +499,28 @@ def main(argv=None):
|
|||||||
usage()
|
usage()
|
||||||
return 1
|
return 1
|
||||||
make_pmlz = False
|
make_pmlz = False
|
||||||
zipname = None
|
|
||||||
for o, a in opts:
|
for o, a in opts:
|
||||||
if o == "-h":
|
if o == "-h":
|
||||||
usage()
|
usage()
|
||||||
return 0
|
return 0
|
||||||
elif o == "--make-pmlz":
|
elif o == "--make-pmlz":
|
||||||
make_pmlz = True
|
make_pmlz = True
|
||||||
zipname = ''
|
|
||||||
|
|
||||||
print "eRdr2Pml v%s. Copyright (c) 2009 The Dark Reverser" % __version__
|
print "eRdr2Pml v%s. Copyright (c) 2009 The Dark Reverser" % __version__
|
||||||
|
|
||||||
if len(args)!=3 and len(args)!=4:
|
if len(args)!=3 and len(args)!=4:
|
||||||
usage()
|
usage()
|
||||||
return 1
|
return 1
|
||||||
else:
|
|
||||||
if len(args)==3:
|
|
||||||
infile, name, cc = args[0], args[1], args[2]
|
|
||||||
outdir = infile[:-4] + '_Source'
|
|
||||||
elif len(args)==4:
|
|
||||||
infile, outdir, name, cc = args[0], args[1], args[2], args[3]
|
|
||||||
|
|
||||||
if make_pmlz :
|
if len(args)==3:
|
||||||
# ignore specified outdir, use tempdir instead
|
infile, name, cc = args[0], args[1], args[2]
|
||||||
outdir = tempfile.mkdtemp()
|
outdir = infile[:-4] + '_Source'
|
||||||
|
elif len(args)==4:
|
||||||
|
infile, outdir, name, cc = args[0], args[1], args[2], args[3]
|
||||||
|
|
||||||
bookname = os.path.splitext(os.path.basename(infile))[0]
|
return decryptBook(infile, outdir, name, cc, make_pmlz)
|
||||||
|
|
||||||
try:
|
|
||||||
print "Processing..."
|
|
||||||
import time
|
|
||||||
start_time = time.time()
|
|
||||||
convertEreaderToPml(infile, name, cc, outdir)
|
|
||||||
|
|
||||||
if make_pmlz :
|
|
||||||
import zipfile
|
|
||||||
import shutil
|
|
||||||
print " Creating PMLZ file"
|
|
||||||
zipname = infile[:-4] + '.pmlz'
|
|
||||||
myZipFile = zipfile.ZipFile(zipname,'w',zipfile.ZIP_STORED, False)
|
|
||||||
list = os.listdir(outdir)
|
|
||||||
for file in list:
|
|
||||||
localname = file
|
|
||||||
filePath = os.path.join(outdir,file)
|
|
||||||
if os.path.isfile(filePath):
|
|
||||||
myZipFile.write(filePath, localname)
|
|
||||||
elif os.path.isdir(filePath):
|
|
||||||
imageList = os.listdir(filePath)
|
|
||||||
localimgdir = os.path.basename(filePath)
|
|
||||||
for image in imageList:
|
|
||||||
localname = os.path.join(localimgdir,image)
|
|
||||||
imagePath = os.path.join(filePath,image)
|
|
||||||
if os.path.isfile(imagePath):
|
|
||||||
myZipFile.write(imagePath, localname)
|
|
||||||
myZipFile.close()
|
|
||||||
# remove temporary directory
|
|
||||||
shutil.rmtree(outdir, True)
|
|
||||||
|
|
||||||
end_time = time.time()
|
|
||||||
search_time = end_time - start_time
|
|
||||||
print 'elapsed time: %.2f seconds' % (search_time, )
|
|
||||||
if make_pmlz :
|
|
||||||
print 'output is %s' % zipname
|
|
||||||
else :
|
|
||||||
print 'output in %s' % outdir
|
|
||||||
print "done"
|
|
||||||
except ValueError, e:
|
|
||||||
print "Error: %s" % e
|
|
||||||
return 1
|
|
||||||
return 0
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
sys.stdout=Unbuffered(sys.stdout)
|
||||||
sys.exit(main())
|
sys.exit(main())
|
||||||
|
|
||||||
@@ -68,7 +68,7 @@ class DocParser(object):
|
|||||||
ys = []
|
ys = []
|
||||||
gdefs = []
|
gdefs = []
|
||||||
|
|
||||||
# get path defintions, positions, dimensions for ecah glyph
|
# get path defintions, positions, dimensions for each glyph
|
||||||
# that makes up the image, and find min x and min y to reposition origin
|
# that makes up the image, and find min x and min y to reposition origin
|
||||||
minx = -1
|
minx = -1
|
||||||
miny = -1
|
miny = -1
|
||||||
@@ -271,6 +271,9 @@ class DocParser(object):
|
|||||||
|
|
||||||
pclass = self.getClass(pclass)
|
pclass = self.getClass(pclass)
|
||||||
|
|
||||||
|
# if paragraph uses extratokens (extra glyphs) then make it fixed
|
||||||
|
(pos, extraglyphs) = self.findinDoc('paragraph.extratokens',start,end)
|
||||||
|
|
||||||
# build up a description of the paragraph in result and return it
|
# build up a description of the paragraph in result and return it
|
||||||
# first check for the basic - all words paragraph
|
# first check for the basic - all words paragraph
|
||||||
(pos, sfirst) = self.findinDoc('paragraph.firstWord',start,end)
|
(pos, sfirst) = self.findinDoc('paragraph.firstWord',start,end)
|
||||||
@@ -280,6 +283,7 @@ class DocParser(object):
|
|||||||
last = int(slast)
|
last = int(slast)
|
||||||
|
|
||||||
makeImage = (regtype == 'vertical') or (regtype == 'table')
|
makeImage = (regtype == 'vertical') or (regtype == 'table')
|
||||||
|
makeImage = makeImage or (extraglyphs != None)
|
||||||
if self.fixedimage:
|
if self.fixedimage:
|
||||||
makeImage = makeImage or (regtype == 'fixed')
|
makeImage = makeImage or (regtype == 'fixed')
|
||||||
|
|
||||||
@@ -288,6 +292,11 @@ class DocParser(object):
|
|||||||
if self.fixedimage :
|
if self.fixedimage :
|
||||||
makeImage = makeImage or (pclass.find('cl-f-') >= 0)
|
makeImage = makeImage or (pclass.find('cl-f-') >= 0)
|
||||||
|
|
||||||
|
# before creating an image make sure glyph info exists
|
||||||
|
gidList = self.getData('info.glyph.glyphID',0,-1)
|
||||||
|
|
||||||
|
makeImage = makeImage & (len(gidList) > 0)
|
||||||
|
|
||||||
if not makeImage :
|
if not makeImage :
|
||||||
# standard all word paragraph
|
# standard all word paragraph
|
||||||
for wordnum in xrange(first, last):
|
for wordnum in xrange(first, last):
|
||||||
@@ -305,6 +314,15 @@ class DocParser(object):
|
|||||||
lastGlyph = firstglyphList[last]
|
lastGlyph = firstglyphList[last]
|
||||||
else :
|
else :
|
||||||
lastGlyph = len(gidList)
|
lastGlyph = len(gidList)
|
||||||
|
|
||||||
|
# handle case of white sapce paragraphs with no actual glyphs in them
|
||||||
|
# by reverting to text based paragraph
|
||||||
|
if firstGlyph >= lastGlyph:
|
||||||
|
# revert to standard text based paragraph
|
||||||
|
for wordnum in xrange(first, last):
|
||||||
|
result.append(('ocr', wordnum))
|
||||||
|
return pclass, result
|
||||||
|
|
||||||
for glyphnum in xrange(firstGlyph, lastGlyph):
|
for glyphnum in xrange(firstGlyph, lastGlyph):
|
||||||
glyphList.append(glyphnum)
|
glyphList.append(glyphnum)
|
||||||
# include any extratokens if they exist
|
# include any extratokens if they exist
|
||||||
@@ -344,6 +362,8 @@ class DocParser(object):
|
|||||||
|
|
||||||
word_class = ''
|
word_class = ''
|
||||||
|
|
||||||
|
word_semantic_type = ''
|
||||||
|
|
||||||
while (line < end) :
|
while (line < end) :
|
||||||
|
|
||||||
(name, argres) = self.lineinDoc(line)
|
(name, argres) = self.lineinDoc(line)
|
||||||
@@ -367,10 +387,10 @@ class DocParser(object):
|
|||||||
ws_last = int(argres)
|
ws_last = int(argres)
|
||||||
|
|
||||||
elif name.endswith('word.class'):
|
elif name.endswith('word.class'):
|
||||||
(cname, space) = argres.split('-',1)
|
(cname, space) = argres.split('-',1)
|
||||||
if space == '' : space = '0'
|
if space == '' : space = '0'
|
||||||
if (cname == 'spaceafter') and (int(space) > 0) :
|
if (cname == 'spaceafter') and (int(space) > 0) :
|
||||||
word_class = 'sa'
|
word_class = 'sa'
|
||||||
|
|
||||||
elif name.endswith('word.img.src'):
|
elif name.endswith('word.img.src'):
|
||||||
result.append(('img' + word_class, int(argres)))
|
result.append(('img' + word_class, int(argres)))
|
||||||
@@ -503,13 +523,80 @@ class DocParser(object):
|
|||||||
return parares
|
return parares
|
||||||
|
|
||||||
|
|
||||||
|
def buildTOCEntry(self, pdesc) :
|
||||||
|
parares = ''
|
||||||
|
sep =''
|
||||||
|
tocentry = ''
|
||||||
|
handle_links = len(self.link_id) > 0
|
||||||
|
|
||||||
|
lstart = 0
|
||||||
|
|
||||||
|
cnt = len(pdesc)
|
||||||
|
for j in xrange( 0, cnt) :
|
||||||
|
|
||||||
|
(wtype, num) = pdesc[j]
|
||||||
|
|
||||||
|
if wtype == 'ocr' :
|
||||||
|
word = self.ocrtext[num]
|
||||||
|
sep = ' '
|
||||||
|
|
||||||
|
if handle_links:
|
||||||
|
link = self.link_id[num]
|
||||||
|
if (link > 0):
|
||||||
|
linktype = self.link_type[link-1]
|
||||||
|
title = self.link_title[link-1]
|
||||||
|
title = title.rstrip('. ')
|
||||||
|
alt_title = parares[lstart:]
|
||||||
|
alt_title = alt_title.strip()
|
||||||
|
# now strip off the actual printed page number
|
||||||
|
alt_title = alt_title.rstrip('01234567890ivxldIVXLD-.')
|
||||||
|
alt_title = alt_title.rstrip('. ')
|
||||||
|
# skip over any external links - can't have them in a books toc
|
||||||
|
if linktype == 'external' :
|
||||||
|
title = ''
|
||||||
|
alt_title = ''
|
||||||
|
linkpage = ''
|
||||||
|
else :
|
||||||
|
if len(self.link_page) >= link :
|
||||||
|
ptarget = self.link_page[link-1] - 1
|
||||||
|
linkpage = '%04d' % ptarget
|
||||||
|
else :
|
||||||
|
# just link to the current page
|
||||||
|
linkpage = self.id[4:]
|
||||||
|
if len(alt_title) >= len(title):
|
||||||
|
title = alt_title
|
||||||
|
if title != '' and linkpage != '':
|
||||||
|
tocentry += title + '|' + linkpage + '\n'
|
||||||
|
lstart = len(parares)
|
||||||
|
if word == '_link_' : word = ''
|
||||||
|
elif (link < 0) :
|
||||||
|
if word == '_link_' : word = ''
|
||||||
|
|
||||||
|
if word == '_lb_':
|
||||||
|
word = ''
|
||||||
|
sep = ''
|
||||||
|
|
||||||
|
if num in self.dehyphen_rootid :
|
||||||
|
word = word[0:-1]
|
||||||
|
sep = ''
|
||||||
|
|
||||||
|
parares += word + sep
|
||||||
|
|
||||||
|
else :
|
||||||
|
continue
|
||||||
|
|
||||||
|
return tocentry
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# walk the document tree collecting the information needed
|
# walk the document tree collecting the information needed
|
||||||
# to build an html page using the ocrText
|
# to build an html page using the ocrText
|
||||||
|
|
||||||
def process(self):
|
def process(self):
|
||||||
|
|
||||||
htmlpage = ''
|
tocinfo = ''
|
||||||
|
hlst = []
|
||||||
|
|
||||||
# get the ocr text
|
# get the ocr text
|
||||||
(pos, argres) = self.findinDoc('info.word.ocrText',0,-1)
|
(pos, argres) = self.findinDoc('info.word.ocrText',0,-1)
|
||||||
@@ -566,8 +653,8 @@ class DocParser(object):
|
|||||||
|
|
||||||
# set anchor for link target on this page
|
# set anchor for link target on this page
|
||||||
if not anchorSet and not first_para_continued:
|
if not anchorSet and not first_para_continued:
|
||||||
htmlpage += '<div style="visibility: hidden; height: 0; width: 0;" id="'
|
hlst.append('<div style="visibility: hidden; height: 0; width: 0;" id="')
|
||||||
htmlpage += self.id + '" title="pagetype_' + pagetype + '"></div>\n'
|
hlst.append(self.id + '" title="pagetype_' + pagetype + '"></div>\n')
|
||||||
anchorSet = True
|
anchorSet = True
|
||||||
|
|
||||||
# handle groups of graphics with text captions
|
# handle groups of graphics with text captions
|
||||||
@@ -576,12 +663,12 @@ class DocParser(object):
|
|||||||
if grptype != None:
|
if grptype != None:
|
||||||
if grptype == 'graphic':
|
if grptype == 'graphic':
|
||||||
gcstr = ' class="' + grptype + '"'
|
gcstr = ' class="' + grptype + '"'
|
||||||
htmlpage += '<div' + gcstr + '>'
|
hlst.append('<div' + gcstr + '>')
|
||||||
inGroup = True
|
inGroup = True
|
||||||
|
|
||||||
elif (etype == 'grpend'):
|
elif (etype == 'grpend'):
|
||||||
if inGroup:
|
if inGroup:
|
||||||
htmlpage += '</div>\n'
|
hlst.append('</div>\n')
|
||||||
inGroup = False
|
inGroup = False
|
||||||
|
|
||||||
else:
|
else:
|
||||||
@@ -591,25 +678,25 @@ class DocParser(object):
|
|||||||
(pos, simgsrc) = self.findinDoc('img.src',start,end)
|
(pos, simgsrc) = self.findinDoc('img.src',start,end)
|
||||||
if simgsrc:
|
if simgsrc:
|
||||||
if inGroup:
|
if inGroup:
|
||||||
htmlpage += '<img src="img/img%04d.jpg" alt="" />' % int(simgsrc)
|
hlst.append('<img src="img/img%04d.jpg" alt="" />' % int(simgsrc))
|
||||||
else:
|
else:
|
||||||
htmlpage += '<div class="graphic"><img src="img/img%04d.jpg" alt="" /></div>' % int(simgsrc)
|
hlst.append('<div class="graphic"><img src="img/img%04d.jpg" alt="" /></div>' % int(simgsrc))
|
||||||
|
|
||||||
elif regtype == 'chapterheading' :
|
elif regtype == 'chapterheading' :
|
||||||
(pclass, pdesc) = self.getParaDescription(start,end, regtype)
|
(pclass, pdesc) = self.getParaDescription(start,end, regtype)
|
||||||
if not breakSet:
|
if not breakSet:
|
||||||
htmlpage += '<div style="page-break-after: always;"> </div>\n'
|
hlst.append('<div style="page-break-after: always;"> </div>\n')
|
||||||
breakSet = True
|
breakSet = True
|
||||||
tag = 'h1'
|
tag = 'h1'
|
||||||
if pclass and (len(pclass) >= 7):
|
if pclass and (len(pclass) >= 7):
|
||||||
if pclass[3:7] == 'ch1-' : tag = 'h1'
|
if pclass[3:7] == 'ch1-' : tag = 'h1'
|
||||||
if pclass[3:7] == 'ch2-' : tag = 'h2'
|
if pclass[3:7] == 'ch2-' : tag = 'h2'
|
||||||
if pclass[3:7] == 'ch3-' : tag = 'h3'
|
if pclass[3:7] == 'ch3-' : tag = 'h3'
|
||||||
htmlpage += '<' + tag + ' class="' + pclass + '">'
|
hlst.append('<' + tag + ' class="' + pclass + '">')
|
||||||
else:
|
else:
|
||||||
htmlpage += '<' + tag + '>'
|
hlst.append('<' + tag + '>')
|
||||||
htmlpage += self.buildParagraph(pclass, pdesc, 'middle', regtype)
|
hlst.append(self.buildParagraph(pclass, pdesc, 'middle', regtype))
|
||||||
htmlpage += '</' + tag + '>'
|
hlst.append('</' + tag + '>')
|
||||||
|
|
||||||
elif (regtype == 'text') or (regtype == 'fixed') or (regtype == 'insert') or (regtype == 'listitem'):
|
elif (regtype == 'text') or (regtype == 'fixed') or (regtype == 'insert') or (regtype == 'listitem'):
|
||||||
ptype = 'full'
|
ptype = 'full'
|
||||||
@@ -623,11 +710,11 @@ class DocParser(object):
|
|||||||
if pclass[3:6] == 'h1-' : tag = 'h4'
|
if pclass[3:6] == 'h1-' : tag = 'h4'
|
||||||
if pclass[3:6] == 'h2-' : tag = 'h5'
|
if pclass[3:6] == 'h2-' : tag = 'h5'
|
||||||
if pclass[3:6] == 'h3-' : tag = 'h6'
|
if pclass[3:6] == 'h3-' : tag = 'h6'
|
||||||
htmlpage += '<' + tag + ' class="' + pclass + '">'
|
hlst.append('<' + tag + ' class="' + pclass + '">')
|
||||||
htmlpage += self.buildParagraph(pclass, pdesc, 'middle', regtype)
|
hlst.append(self.buildParagraph(pclass, pdesc, 'middle', regtype))
|
||||||
htmlpage += '</' + tag + '>'
|
hlst.append('</' + tag + '>')
|
||||||
else :
|
else :
|
||||||
htmlpage += self.buildParagraph(pclass, pdesc, ptype, regtype)
|
hlst.append(self.buildParagraph(pclass, pdesc, ptype, regtype))
|
||||||
|
|
||||||
elif (regtype == 'tocentry') :
|
elif (regtype == 'tocentry') :
|
||||||
ptype = 'full'
|
ptype = 'full'
|
||||||
@@ -635,8 +722,8 @@ class DocParser(object):
|
|||||||
ptype = 'end'
|
ptype = 'end'
|
||||||
first_para_continued = False
|
first_para_continued = False
|
||||||
(pclass, pdesc) = self.getParaDescription(start,end, regtype)
|
(pclass, pdesc) = self.getParaDescription(start,end, regtype)
|
||||||
htmlpage += self.buildParagraph(pclass, pdesc, ptype, regtype)
|
tocinfo += self.buildTOCEntry(pdesc)
|
||||||
|
hlst.append(self.buildParagraph(pclass, pdesc, ptype, regtype))
|
||||||
|
|
||||||
elif (regtype == 'vertical') or (regtype == 'table') :
|
elif (regtype == 'vertical') or (regtype == 'table') :
|
||||||
ptype = 'full'
|
ptype = 'full'
|
||||||
@@ -646,13 +733,13 @@ class DocParser(object):
|
|||||||
ptype = 'end'
|
ptype = 'end'
|
||||||
first_para_continued = False
|
first_para_continued = False
|
||||||
(pclass, pdesc) = self.getParaDescription(start, end, regtype)
|
(pclass, pdesc) = self.getParaDescription(start, end, regtype)
|
||||||
htmlpage += self.buildParagraph(pclass, pdesc, ptype, regtype)
|
hlst.append(self.buildParagraph(pclass, pdesc, ptype, regtype))
|
||||||
|
|
||||||
|
|
||||||
elif (regtype == 'synth_fcvr.center'):
|
elif (regtype == 'synth_fcvr.center'):
|
||||||
(pos, simgsrc) = self.findinDoc('img.src',start,end)
|
(pos, simgsrc) = self.findinDoc('img.src',start,end)
|
||||||
if simgsrc:
|
if simgsrc:
|
||||||
htmlpage += '<div class="graphic"><img src="img/img%04d.jpg" alt="" /></div>' % int(simgsrc)
|
hlst.append('<div class="graphic"><img src="img/img%04d.jpg" alt="" /></div>' % int(simgsrc))
|
||||||
|
|
||||||
else :
|
else :
|
||||||
print ' Making region type', regtype,
|
print ' Making region type', regtype,
|
||||||
@@ -678,29 +765,29 @@ class DocParser(object):
|
|||||||
if pclass[3:6] == 'h1-' : tag = 'h4'
|
if pclass[3:6] == 'h1-' : tag = 'h4'
|
||||||
if pclass[3:6] == 'h2-' : tag = 'h5'
|
if pclass[3:6] == 'h2-' : tag = 'h5'
|
||||||
if pclass[3:6] == 'h3-' : tag = 'h6'
|
if pclass[3:6] == 'h3-' : tag = 'h6'
|
||||||
htmlpage += '<' + tag + ' class="' + pclass + '">'
|
hlst.append('<' + tag + ' class="' + pclass + '">')
|
||||||
htmlpage += self.buildParagraph(pclass, pdesc, 'middle', regtype)
|
hlst.append(self.buildParagraph(pclass, pdesc, 'middle', regtype))
|
||||||
htmlpage += '</' + tag + '>'
|
hlst.append('</' + tag + '>')
|
||||||
else :
|
else :
|
||||||
htmlpage += self.buildParagraph(pclass, pdesc, ptype, regtype)
|
hlst.append(self.buildParagraph(pclass, pdesc, ptype, regtype))
|
||||||
else :
|
else :
|
||||||
print ' a "graphic" region'
|
print ' a "graphic" region'
|
||||||
(pos, simgsrc) = self.findinDoc('img.src',start,end)
|
(pos, simgsrc) = self.findinDoc('img.src',start,end)
|
||||||
if simgsrc:
|
if simgsrc:
|
||||||
htmlpage += '<div class="graphic"><img src="img/img%04d.jpg" alt="" /></div>' % int(simgsrc)
|
hlst.append('<div class="graphic"><img src="img/img%04d.jpg" alt="" /></div>' % int(simgsrc))
|
||||||
|
|
||||||
|
|
||||||
|
htmlpage = "".join(hlst)
|
||||||
if last_para_continued :
|
if last_para_continued :
|
||||||
if htmlpage[-4:] == '</p>':
|
if htmlpage[-4:] == '</p>':
|
||||||
htmlpage = htmlpage[0:-4]
|
htmlpage = htmlpage[0:-4]
|
||||||
last_para_continued = False
|
last_para_continued = False
|
||||||
|
|
||||||
return htmlpage
|
return htmlpage, tocinfo
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def convert2HTML(flatxml, classlst, fileid, bookDir, gdict, fixedimage):
|
def convert2HTML(flatxml, classlst, fileid, bookDir, gdict, fixedimage):
|
||||||
# create a document parser
|
# create a document parser
|
||||||
dp = DocParser(flatxml, classlst, fileid, bookDir, gdict, fixedimage)
|
dp = DocParser(flatxml, classlst, fileid, bookDir, gdict, fixedimage)
|
||||||
htmlpage = dp.process()
|
htmlpage, tocinfo = dp.process()
|
||||||
return htmlpage
|
return htmlpage, tocinfo
|
||||||
@@ -0,0 +1,249 @@
|
|||||||
|
#! /usr/bin/python
|
||||||
|
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import csv
|
||||||
|
import os
|
||||||
|
import getopt
|
||||||
|
from struct import pack
|
||||||
|
from struct import unpack
|
||||||
|
|
||||||
|
|
||||||
|
class PParser(object):
|
||||||
|
def __init__(self, gd, flatxml, meta_array):
|
||||||
|
self.gd = gd
|
||||||
|
self.flatdoc = flatxml.split('\n')
|
||||||
|
self.docSize = len(self.flatdoc)
|
||||||
|
self.temp = []
|
||||||
|
|
||||||
|
self.ph = -1
|
||||||
|
self.pw = -1
|
||||||
|
startpos = self.posinDoc('page.h') or self.posinDoc('book.h')
|
||||||
|
for p in startpos:
|
||||||
|
(name, argres) = self.lineinDoc(p)
|
||||||
|
self.ph = max(self.ph, int(argres))
|
||||||
|
startpos = self.posinDoc('page.w') or self.posinDoc('book.w')
|
||||||
|
for p in startpos:
|
||||||
|
(name, argres) = self.lineinDoc(p)
|
||||||
|
self.pw = max(self.pw, int(argres))
|
||||||
|
|
||||||
|
if self.ph <= 0:
|
||||||
|
self.ph = int(meta_array.get('pageHeight', '11000'))
|
||||||
|
if self.pw <= 0:
|
||||||
|
self.pw = int(meta_array.get('pageWidth', '8500'))
|
||||||
|
|
||||||
|
res = []
|
||||||
|
startpos = self.posinDoc('info.glyph.x')
|
||||||
|
for p in startpos:
|
||||||
|
argres = self.getDataatPos('info.glyph.x', p)
|
||||||
|
res.extend(argres)
|
||||||
|
self.gx = res
|
||||||
|
|
||||||
|
res = []
|
||||||
|
startpos = self.posinDoc('info.glyph.y')
|
||||||
|
for p in startpos:
|
||||||
|
argres = self.getDataatPos('info.glyph.y', p)
|
||||||
|
res.extend(argres)
|
||||||
|
self.gy = res
|
||||||
|
|
||||||
|
res = []
|
||||||
|
startpos = self.posinDoc('info.glyph.glyphID')
|
||||||
|
for p in startpos:
|
||||||
|
argres = self.getDataatPos('info.glyph.glyphID', p)
|
||||||
|
res.extend(argres)
|
||||||
|
self.gid = res
|
||||||
|
|
||||||
|
|
||||||
|
# return tag at line pos in document
|
||||||
|
def lineinDoc(self, pos) :
|
||||||
|
if (pos >= 0) and (pos < self.docSize) :
|
||||||
|
item = self.flatdoc[pos]
|
||||||
|
if item.find('=') >= 0:
|
||||||
|
(name, argres) = item.split('=',1)
|
||||||
|
else :
|
||||||
|
name = item
|
||||||
|
argres = ''
|
||||||
|
return name, argres
|
||||||
|
|
||||||
|
# find tag in doc if within pos to end inclusive
|
||||||
|
def findinDoc(self, tagpath, pos, end) :
|
||||||
|
result = None
|
||||||
|
if end == -1 :
|
||||||
|
end = self.docSize
|
||||||
|
else:
|
||||||
|
end = min(self.docSize, end)
|
||||||
|
foundat = -1
|
||||||
|
for j in xrange(pos, end):
|
||||||
|
item = self.flatdoc[j]
|
||||||
|
if item.find('=') >= 0:
|
||||||
|
(name, argres) = item.split('=',1)
|
||||||
|
else :
|
||||||
|
name = item
|
||||||
|
argres = ''
|
||||||
|
if name.endswith(tagpath) :
|
||||||
|
result = argres
|
||||||
|
foundat = j
|
||||||
|
break
|
||||||
|
return foundat, result
|
||||||
|
|
||||||
|
# return list of start positions for the tagpath
|
||||||
|
def posinDoc(self, tagpath):
|
||||||
|
startpos = []
|
||||||
|
pos = 0
|
||||||
|
res = ""
|
||||||
|
while res != None :
|
||||||
|
(foundpos, res) = self.findinDoc(tagpath, pos, -1)
|
||||||
|
if res != None :
|
||||||
|
startpos.append(foundpos)
|
||||||
|
pos = foundpos + 1
|
||||||
|
return startpos
|
||||||
|
|
||||||
|
def getData(self, path):
|
||||||
|
result = None
|
||||||
|
cnt = len(self.flatdoc)
|
||||||
|
for j in xrange(cnt):
|
||||||
|
item = self.flatdoc[j]
|
||||||
|
if item.find('=') >= 0:
|
||||||
|
(name, argt) = item.split('=')
|
||||||
|
argres = argt.split('|')
|
||||||
|
else:
|
||||||
|
name = item
|
||||||
|
argres = []
|
||||||
|
if (name.endswith(path)):
|
||||||
|
result = argres
|
||||||
|
break
|
||||||
|
if (len(argres) > 0) :
|
||||||
|
for j in xrange(0,len(argres)):
|
||||||
|
argres[j] = int(argres[j])
|
||||||
|
return result
|
||||||
|
|
||||||
|
def getDataatPos(self, path, pos):
|
||||||
|
result = None
|
||||||
|
item = self.flatdoc[pos]
|
||||||
|
if item.find('=') >= 0:
|
||||||
|
(name, argt) = item.split('=')
|
||||||
|
argres = argt.split('|')
|
||||||
|
else:
|
||||||
|
name = item
|
||||||
|
argres = []
|
||||||
|
if (len(argres) > 0) :
|
||||||
|
for j in xrange(0,len(argres)):
|
||||||
|
argres[j] = int(argres[j])
|
||||||
|
if (name.endswith(path)):
|
||||||
|
result = argres
|
||||||
|
return result
|
||||||
|
|
||||||
|
def getDataTemp(self, path):
|
||||||
|
result = None
|
||||||
|
cnt = len(self.temp)
|
||||||
|
for j in xrange(cnt):
|
||||||
|
item = self.temp[j]
|
||||||
|
if item.find('=') >= 0:
|
||||||
|
(name, argt) = item.split('=')
|
||||||
|
argres = argt.split('|')
|
||||||
|
else:
|
||||||
|
name = item
|
||||||
|
argres = []
|
||||||
|
if (name.endswith(path)):
|
||||||
|
result = argres
|
||||||
|
self.temp.pop(j)
|
||||||
|
break
|
||||||
|
if (len(argres) > 0) :
|
||||||
|
for j in xrange(0,len(argres)):
|
||||||
|
argres[j] = int(argres[j])
|
||||||
|
return result
|
||||||
|
|
||||||
|
def getImages(self):
|
||||||
|
result = []
|
||||||
|
self.temp = self.flatdoc
|
||||||
|
while (self.getDataTemp('img') != None):
|
||||||
|
h = self.getDataTemp('img.h')[0]
|
||||||
|
w = self.getDataTemp('img.w')[0]
|
||||||
|
x = self.getDataTemp('img.x')[0]
|
||||||
|
y = self.getDataTemp('img.y')[0]
|
||||||
|
src = self.getDataTemp('img.src')[0]
|
||||||
|
result.append('<image xlink:href="../img/img%04d.jpg" x="%d" y="%d" width="%d" height="%d" />\n' % (src, x, y, w, h))
|
||||||
|
return result
|
||||||
|
|
||||||
|
def getGlyphs(self):
|
||||||
|
result = []
|
||||||
|
if (self.gid != None) and (len(self.gid) > 0):
|
||||||
|
glyphs = []
|
||||||
|
for j in set(self.gid):
|
||||||
|
glyphs.append(j)
|
||||||
|
glyphs.sort()
|
||||||
|
for gid in glyphs:
|
||||||
|
id='id="gl%d"' % gid
|
||||||
|
path = self.gd.lookup(id)
|
||||||
|
if path:
|
||||||
|
result.append(id + ' ' + path)
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def convert2SVG(gdict, flat_xml, pageid, previd, nextid, svgDir, raw, meta_array, scaledpi):
|
||||||
|
mlst = []
|
||||||
|
pp = PParser(gdict, flat_xml, meta_array)
|
||||||
|
mlst.append('<?xml version="1.0" standalone="no"?>\n')
|
||||||
|
if (raw):
|
||||||
|
mlst.append('<!DOCTYPE svg PUBLIC "-//W3C/DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">\n')
|
||||||
|
mlst.append('<svg width="%fin" height="%fin" viewBox="0 0 %d %d" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1">\n' % (pp.pw / scaledpi, pp.ph / scaledpi, pp.pw -1, pp.ph -1))
|
||||||
|
mlst.append('<title>Page %d - %s by %s</title>\n' % (pageid, meta_array['Title'],meta_array['Authors']))
|
||||||
|
else:
|
||||||
|
mlst.append('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n')
|
||||||
|
mlst.append('<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" ><head>\n')
|
||||||
|
mlst.append('<title>Page %d - %s by %s</title>\n' % (pageid, meta_array['Title'],meta_array['Authors']))
|
||||||
|
mlst.append('<script><![CDATA[\n')
|
||||||
|
mlst.append('function gd(){var p=window.location.href.replace(/^.*\?dpi=(\d+).*$/i,"$1");return p;}\n')
|
||||||
|
mlst.append('var dpi=%d;\n' % scaledpi)
|
||||||
|
if (previd) :
|
||||||
|
mlst.append('var prevpage="page%04d.xhtml";\n' % (previd))
|
||||||
|
if (nextid) :
|
||||||
|
mlst.append('var nextpage="page%04d.xhtml";\n' % (nextid))
|
||||||
|
mlst.append('var pw=%d;var ph=%d;' % (pp.pw, pp.ph))
|
||||||
|
mlst.append('function zoomin(){dpi=dpi*(0.8);setsize();}\n')
|
||||||
|
mlst.append('function zoomout(){dpi=dpi*1.25;setsize();}\n')
|
||||||
|
mlst.append('function setsize(){var svg=document.getElementById("svgimg");var prev=document.getElementById("prevsvg");var next=document.getElementById("nextsvg");var width=(pw/dpi)+"in";var height=(ph/dpi)+"in";svg.setAttribute("width",width);svg.setAttribute("height",height);prev.setAttribute("height",height);prev.setAttribute("width","50px");next.setAttribute("height",height);next.setAttribute("width","50px");}\n')
|
||||||
|
mlst.append('function ppage(){window.location.href=prevpage+"?dpi="+Math.round(dpi);}\n')
|
||||||
|
mlst.append('function npage(){window.location.href=nextpage+"?dpi="+Math.round(dpi);}\n')
|
||||||
|
mlst.append('var gt=gd();if(gt>0){dpi=gt;}\n')
|
||||||
|
mlst.append('window.onload=setsize;\n')
|
||||||
|
mlst.append(']]></script>\n')
|
||||||
|
mlst.append('</head>\n')
|
||||||
|
mlst.append('<body onLoad="setsize();" style="background-color:#777;text-align:center;">\n')
|
||||||
|
mlst.append('<div style="white-space:nowrap;">\n')
|
||||||
|
if previd == None:
|
||||||
|
mlst.append('<a href="javascript:ppage();"><svg id="prevsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"></svg></a>\n')
|
||||||
|
else:
|
||||||
|
mlst.append('<a href="javascript:ppage();"><svg id="prevsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"><polygon points="5,150,95,5,95,295" fill="#AAAAAA" /></svg></a>\n')
|
||||||
|
|
||||||
|
mlst.append('<a href="javascript:npage();"><svg id="svgimg" viewBox="0 0 %d %d" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" style="background-color:#FFF;border:1px solid black;">' % (pp.pw, pp.ph))
|
||||||
|
if (pp.gid != None):
|
||||||
|
mlst.append('<defs>\n')
|
||||||
|
gdefs = pp.getGlyphs()
|
||||||
|
for j in xrange(0,len(gdefs)):
|
||||||
|
mlst.append(gdefs[j])
|
||||||
|
mlst.append('</defs>\n')
|
||||||
|
img = pp.getImages()
|
||||||
|
if (img != None):
|
||||||
|
for j in xrange(0,len(img)):
|
||||||
|
mlst.append(img[j])
|
||||||
|
if (pp.gid != None):
|
||||||
|
for j in xrange(0,len(pp.gid)):
|
||||||
|
mlst.append('<use xlink:href="#gl%d" x="%d" y="%d" />\n' % (pp.gid[j], pp.gx[j], pp.gy[j]))
|
||||||
|
if (img == None or len(img) == 0) and (pp.gid == None or len(pp.gid) == 0):
|
||||||
|
xpos = "%d" % (pp.pw // 3)
|
||||||
|
ypos = "%d" % (pp.ph // 3)
|
||||||
|
mlst.append('<text x="' + xpos + '" y="' + ypos + '" font-size="' + meta_array['fontSize'] + '" font-family="Helvetica" stroke="black">This page intentionally left blank.</text>\n')
|
||||||
|
if (raw) :
|
||||||
|
mlst.append('</svg>')
|
||||||
|
else :
|
||||||
|
mlst.append('</svg></a>\n')
|
||||||
|
if nextid == None:
|
||||||
|
mlst.append('<a href="javascript:npage();"><svg id="nextsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"></svg></a>\n')
|
||||||
|
else :
|
||||||
|
mlst.append('<a href="javascript:npage();"><svg id="nextsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"><polygon points="5,5,5,295,95,150" fill="#AAAAAA" /></svg></a>\n')
|
||||||
|
mlst.append('</div>\n')
|
||||||
|
mlst.append('<div><a href="javascript:zoomin();">zoom in</a> - <a href="javascript:zoomout();">zoom out</a></div>\n')
|
||||||
|
mlst.append('</body>\n')
|
||||||
|
mlst.append('</html>\n')
|
||||||
|
return "".join(mlst)
|
||||||
@@ -19,13 +19,28 @@ import getopt
|
|||||||
from struct import pack
|
from struct import pack
|
||||||
from struct import unpack
|
from struct import unpack
|
||||||
|
|
||||||
|
class TpzDRMError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
# local support routines
|
# local support routines
|
||||||
import convert2xml
|
if 'calibre' in sys.modules:
|
||||||
import flatxml2html
|
inCalibre = True
|
||||||
import flatxml2svg
|
else:
|
||||||
import stylexml2css
|
inCalibre = False
|
||||||
|
|
||||||
|
if inCalibre :
|
||||||
|
from calibre_plugins.k4mobidedrm import convert2xml
|
||||||
|
from calibre_plugins.k4mobidedrm import flatxml2html
|
||||||
|
from calibre_plugins.k4mobidedrm import flatxml2svg
|
||||||
|
from calibre_plugins.k4mobidedrm import stylexml2css
|
||||||
|
else :
|
||||||
|
import convert2xml
|
||||||
|
import flatxml2html
|
||||||
|
import flatxml2svg
|
||||||
|
import stylexml2css
|
||||||
|
|
||||||
|
# global switch
|
||||||
|
buildXML = False
|
||||||
|
|
||||||
# Get a 7 bit encoded number from a file
|
# Get a 7 bit encoded number from a file
|
||||||
def readEncodedNumber(file):
|
def readEncodedNumber(file):
|
||||||
@@ -35,11 +50,11 @@ def readEncodedNumber(file):
|
|||||||
return None
|
return None
|
||||||
data = ord(c)
|
data = ord(c)
|
||||||
if data == 0xFF:
|
if data == 0xFF:
|
||||||
flag = True
|
flag = True
|
||||||
c = file.read(1)
|
c = file.read(1)
|
||||||
if (len(c) == 0):
|
if (len(c) == 0):
|
||||||
return None
|
return None
|
||||||
data = ord(c)
|
data = ord(c)
|
||||||
if data >= 0x80:
|
if data >= 0x80:
|
||||||
datax = (data & 0x7F)
|
datax = (data & 0x7F)
|
||||||
while data >= 0x80 :
|
while data >= 0x80 :
|
||||||
@@ -50,7 +65,7 @@ def readEncodedNumber(file):
|
|||||||
datax = (datax <<7) + (data & 0x7F)
|
datax = (datax <<7) + (data & 0x7F)
|
||||||
data = datax
|
data = datax
|
||||||
if flag:
|
if flag:
|
||||||
data = -data
|
data = -data
|
||||||
return data
|
return data
|
||||||
|
|
||||||
# Get a length prefixed string from the file
|
# Get a length prefixed string from the file
|
||||||
@@ -103,7 +118,8 @@ class Dictionary(object):
|
|||||||
return self.stable[self.pos]
|
return self.stable[self.pos]
|
||||||
else:
|
else:
|
||||||
print "Error - %d outside of string table limits" % val
|
print "Error - %d outside of string table limits" % val
|
||||||
sys.exit(-1)
|
raise TpzDRMError('outside or string table limits')
|
||||||
|
# sys.exit(-1)
|
||||||
def getSize(self):
|
def getSize(self):
|
||||||
return self.size
|
return self.size
|
||||||
def getPos(self):
|
def getPos(self):
|
||||||
@@ -192,6 +208,8 @@ class GParser(object):
|
|||||||
argres[j] = int(argres[j])
|
argres[j] = int(argres[j])
|
||||||
return result
|
return result
|
||||||
def getGlyphDim(self, gly):
|
def getGlyphDim(self, gly):
|
||||||
|
if self.gdpi[gly] == 0:
|
||||||
|
return 0, 0
|
||||||
maxh = (self.gh[gly] * self.dpi) / self.gdpi[gly]
|
maxh = (self.gh[gly] * self.dpi) / self.gdpi[gly]
|
||||||
maxw = (self.gw[gly] * self.dpi) / self.gdpi[gly]
|
maxw = (self.gw[gly] * self.dpi) / self.gdpi[gly]
|
||||||
return maxh, maxw
|
return maxh, maxw
|
||||||
@@ -282,9 +300,10 @@ def generateBook(bookDir, raw, fixedimage):
|
|||||||
if not os.path.exists(svgDir) :
|
if not os.path.exists(svgDir) :
|
||||||
os.makedirs(svgDir)
|
os.makedirs(svgDir)
|
||||||
|
|
||||||
xmlDir = os.path.join(bookDir,'xml')
|
if buildXML:
|
||||||
if not os.path.exists(xmlDir) :
|
xmlDir = os.path.join(bookDir,'xml')
|
||||||
os.makedirs(xmlDir)
|
if not os.path.exists(xmlDir) :
|
||||||
|
os.makedirs(xmlDir)
|
||||||
|
|
||||||
otherFile = os.path.join(bookDir,'other0000.dat')
|
otherFile = os.path.join(bookDir,'other0000.dat')
|
||||||
if not os.path.exists(otherFile) :
|
if not os.path.exists(otherFile) :
|
||||||
@@ -320,11 +339,26 @@ def generateBook(bookDir, raw, fixedimage):
|
|||||||
print 'Processing Meta Data and creating OPF'
|
print 'Processing Meta Data and creating OPF'
|
||||||
meta_array = getMetaArray(metaFile)
|
meta_array = getMetaArray(metaFile)
|
||||||
|
|
||||||
xname = os.path.join(xmlDir, 'metadata.xml')
|
# replace special chars in title and authors like & < >
|
||||||
metastr = ''
|
title = meta_array.get('Title','No Title Provided')
|
||||||
for key in meta_array:
|
title = title.replace('&','&')
|
||||||
metastr += '<meta name="' + key + '" content="' + meta_array[key] + '" />\n'
|
title = title.replace('<','<')
|
||||||
file(xname, 'wb').write(metastr)
|
title = title.replace('>','>')
|
||||||
|
meta_array['Title'] = title
|
||||||
|
authors = meta_array.get('Authors','No Authors Provided')
|
||||||
|
authors = authors.replace('&','&')
|
||||||
|
authors = authors.replace('<','<')
|
||||||
|
authors = authors.replace('>','>')
|
||||||
|
meta_array['Authors'] = authors
|
||||||
|
|
||||||
|
if buildXML:
|
||||||
|
xname = os.path.join(xmlDir, 'metadata.xml')
|
||||||
|
mlst = []
|
||||||
|
for key in meta_array:
|
||||||
|
mlst.append('<meta name="' + key + '" content="' + meta_array[key] + '" />\n')
|
||||||
|
metastr = "".join(mlst)
|
||||||
|
mlst = None
|
||||||
|
file(xname, 'wb').write(metastr)
|
||||||
|
|
||||||
print 'Processing StyleSheet'
|
print 'Processing StyleSheet'
|
||||||
# get some scaling info from metadata to use while processing styles
|
# get some scaling info from metadata to use while processing styles
|
||||||
@@ -346,14 +380,39 @@ def generateBook(bookDir, raw, fixedimage):
|
|||||||
(ph, pw) = getPageDim(flat_xml)
|
(ph, pw) = getPageDim(flat_xml)
|
||||||
if (ph == '-1') or (ph == '0') : ph = '11000'
|
if (ph == '-1') or (ph == '0') : ph = '11000'
|
||||||
if (pw == '-1') or (pw == '0') : pw = '8500'
|
if (pw == '-1') or (pw == '0') : pw = '8500'
|
||||||
|
meta_array['pageHeight'] = ph
|
||||||
|
meta_array['pageWidth'] = pw
|
||||||
|
if 'fontSize' not in meta_array.keys():
|
||||||
|
meta_array['fontSize'] = fontsize
|
||||||
|
|
||||||
# print ' ', 'other0000.dat'
|
# process other.dat for css info and for map of page files to svg images
|
||||||
|
# this map is needed because some pages actually are made up of multiple
|
||||||
|
# pageXXXX.xml files
|
||||||
xname = os.path.join(bookDir, 'style.css')
|
xname = os.path.join(bookDir, 'style.css')
|
||||||
flat_xml = convert2xml.fromData(dict, otherFile)
|
flat_xml = convert2xml.fromData(dict, otherFile)
|
||||||
|
|
||||||
|
# extract info.original.pid to get original page information
|
||||||
|
pageIDMap = {}
|
||||||
|
pageidnums = stylexml2css.getpageIDMap(flat_xml)
|
||||||
|
if len(pageidnums) == 0:
|
||||||
|
filenames = os.listdir(pageDir)
|
||||||
|
numfiles = len(filenames)
|
||||||
|
for k in range(numfiles):
|
||||||
|
pageidnums.append(k)
|
||||||
|
# create a map from page ids to list of page file nums to process for that page
|
||||||
|
for i in range(len(pageidnums)):
|
||||||
|
id = pageidnums[i]
|
||||||
|
if id in pageIDMap.keys():
|
||||||
|
pageIDMap[id].append(i)
|
||||||
|
else:
|
||||||
|
pageIDMap[id] = [i]
|
||||||
|
|
||||||
|
# now get the css info
|
||||||
cssstr , classlst = stylexml2css.convert2CSS(flat_xml, fontsize, ph, pw)
|
cssstr , classlst = stylexml2css.convert2CSS(flat_xml, fontsize, ph, pw)
|
||||||
file(xname, 'wb').write(cssstr)
|
file(xname, 'wb').write(cssstr)
|
||||||
xname = os.path.join(xmlDir, 'other0000.xml')
|
if buildXML:
|
||||||
file(xname, 'wb').write(convert2xml.getXML(dict, otherFile))
|
xname = os.path.join(xmlDir, 'other0000.xml')
|
||||||
|
file(xname, 'wb').write(convert2xml.getXML(dict, otherFile))
|
||||||
|
|
||||||
print 'Processing Glyphs'
|
print 'Processing Glyphs'
|
||||||
gd = GlyphDict()
|
gd = GlyphDict()
|
||||||
@@ -373,8 +432,9 @@ def generateBook(bookDir, raw, fixedimage):
|
|||||||
fname = os.path.join(glyphsDir,filename)
|
fname = os.path.join(glyphsDir,filename)
|
||||||
flat_xml = convert2xml.fromData(dict, fname)
|
flat_xml = convert2xml.fromData(dict, fname)
|
||||||
|
|
||||||
xname = os.path.join(xmlDir, filename.replace('.dat','.xml'))
|
if buildXML:
|
||||||
file(xname, 'wb').write(convert2xml.getXML(dict, fname))
|
xname = os.path.join(xmlDir, filename.replace('.dat','.xml'))
|
||||||
|
file(xname, 'wb').write(convert2xml.getXML(dict, fname))
|
||||||
|
|
||||||
gp = GParser(flat_xml)
|
gp = GParser(flat_xml)
|
||||||
for i in xrange(0, gp.count):
|
for i in xrange(0, gp.count):
|
||||||
@@ -389,101 +449,188 @@ def generateBook(bookDir, raw, fixedimage):
|
|||||||
glyfile.close()
|
glyfile.close()
|
||||||
print " "
|
print " "
|
||||||
|
|
||||||
|
|
||||||
# start up the html
|
# start up the html
|
||||||
|
# also build up tocentries while processing html
|
||||||
htmlFileName = "book.html"
|
htmlFileName = "book.html"
|
||||||
htmlstr = '<?xml version="1.0" encoding="utf-8"?>\n'
|
hlst = []
|
||||||
htmlstr += '<!DOCTYPE HTML PUBLIC "-//W3C//DTD XHTML 1.1 Strict//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11-strict.dtd">\n'
|
hlst.append('<?xml version="1.0" encoding="utf-8"?>\n')
|
||||||
htmlstr += '<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en">\n'
|
hlst.append('<!DOCTYPE HTML PUBLIC "-//W3C//DTD XHTML 1.1 Strict//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11-strict.dtd">\n')
|
||||||
htmlstr += '<head>\n'
|
hlst.append('<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en">\n')
|
||||||
htmlstr += '<meta http-equiv="content-type" content="text/html; charset=utf-8"/>\n'
|
hlst.append('<head>\n')
|
||||||
htmlstr += '<title>' + meta_array['Title'] + ' by ' + meta_array['Authors'] + '</title>\n'
|
hlst.append('<meta http-equiv="content-type" content="text/html; charset=utf-8"/>\n')
|
||||||
htmlstr += '<meta name="Author" content="' + meta_array['Authors'] + '" />\n'
|
hlst.append('<title>' + meta_array['Title'] + ' by ' + meta_array['Authors'] + '</title>\n')
|
||||||
htmlstr += '<meta name="Title" content="' + meta_array['Title'] + '" />\n'
|
hlst.append('<meta name="Author" content="' + meta_array['Authors'] + '" />\n')
|
||||||
htmlstr += '<meta name="ASIN" content="' + meta_array['ASIN'] + '" />\n'
|
hlst.append('<meta name="Title" content="' + meta_array['Title'] + '" />\n')
|
||||||
htmlstr += '<meta name="GUID" content="' + meta_array['GUID'] + '" />\n'
|
if 'ASIN' in meta_array:
|
||||||
htmlstr += '<link href="style.css" rel="stylesheet" type="text/css" />\n'
|
hlst.append('<meta name="ASIN" content="' + meta_array['ASIN'] + '" />\n')
|
||||||
htmlstr += '</head>\n<body>\n'
|
if 'GUID' in meta_array:
|
||||||
|
hlst.append('<meta name="GUID" content="' + meta_array['GUID'] + '" />\n')
|
||||||
|
hlst.append('<link href="style.css" rel="stylesheet" type="text/css" />\n')
|
||||||
|
hlst.append('</head>\n<body>\n')
|
||||||
|
|
||||||
print 'Processing Pages'
|
print 'Processing Pages'
|
||||||
# Books are at 1440 DPI. This is rendering at twice that size for
|
# Books are at 1440 DPI. This is rendering at twice that size for
|
||||||
# readability when rendering to the screen.
|
# readability when rendering to the screen.
|
||||||
scaledpi = 1440.0
|
scaledpi = 1440.0
|
||||||
|
|
||||||
svgindex = '<?xml version="1.0" encoding="utf-8"?>\n'
|
|
||||||
svgindex += '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n'
|
|
||||||
svgindex += '<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" >'
|
|
||||||
svgindex += '<head>\n'
|
|
||||||
svgindex += '<title>' + meta_array['Title'] + '</title>\n'
|
|
||||||
svgindex += '<meta name="Author" content="' + meta_array['Authors'] + '" />\n'
|
|
||||||
svgindex += '<meta name="Title" content="' + meta_array['Title'] + '" />\n'
|
|
||||||
svgindex += '<meta name="ASIN" content="' + meta_array['ASIN'] + '" />\n'
|
|
||||||
svgindex += '<meta name="GUID" content="' + meta_array['GUID'] + '" />\n'
|
|
||||||
svgindex += '</head>\n'
|
|
||||||
svgindex += '<body>\n'
|
|
||||||
|
|
||||||
filenames = os.listdir(pageDir)
|
filenames = os.listdir(pageDir)
|
||||||
filenames = sorted(filenames)
|
filenames = sorted(filenames)
|
||||||
numfiles = len(filenames)
|
numfiles = len(filenames)
|
||||||
counter = 0
|
|
||||||
|
xmllst = []
|
||||||
|
elst = []
|
||||||
|
|
||||||
for filename in filenames:
|
for filename in filenames:
|
||||||
# print ' ', filename
|
# print ' ', filename
|
||||||
print ".",
|
print ".",
|
||||||
|
|
||||||
fname = os.path.join(pageDir,filename)
|
fname = os.path.join(pageDir,filename)
|
||||||
flat_xml = convert2xml.fromData(dict, fname)
|
flat_xml = convert2xml.fromData(dict, fname)
|
||||||
|
|
||||||
xname = os.path.join(xmlDir, filename.replace('.dat','.xml'))
|
# keep flat_xml for later svg processing
|
||||||
file(xname, 'wb').write(convert2xml.getXML(dict, fname))
|
xmllst.append(flat_xml)
|
||||||
|
|
||||||
|
if buildXML:
|
||||||
|
xname = os.path.join(xmlDir, filename.replace('.dat','.xml'))
|
||||||
|
file(xname, 'wb').write(convert2xml.getXML(dict, fname))
|
||||||
|
|
||||||
# first get the html
|
# first get the html
|
||||||
htmlstr += flatxml2html.convert2HTML(flat_xml, classlst, fname, bookDir, gd, fixedimage)
|
pagehtml, tocinfo = flatxml2html.convert2HTML(flat_xml, classlst, fname, bookDir, gd, fixedimage)
|
||||||
|
elst.append(tocinfo)
|
||||||
|
hlst.append(pagehtml)
|
||||||
|
|
||||||
# now get the svg image of the page
|
# finish up the html string and output it
|
||||||
svgxml = flatxml2svg.convert2SVG(gd, flat_xml, counter, numfiles, svgDir, raw, meta_array, scaledpi)
|
hlst.append('</body>\n</html>\n')
|
||||||
|
htmlstr = "".join(hlst)
|
||||||
|
hlst = None
|
||||||
|
file(os.path.join(bookDir, htmlFileName), 'wb').write(htmlstr)
|
||||||
|
|
||||||
|
print " "
|
||||||
|
print 'Extracting Table of Contents from Amazon OCR'
|
||||||
|
|
||||||
|
# first create a table of contents file for the svg images
|
||||||
|
tlst = []
|
||||||
|
tlst.append('<?xml version="1.0" encoding="utf-8"?>\n')
|
||||||
|
tlst.append('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n')
|
||||||
|
tlst.append('<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" >')
|
||||||
|
tlst.append('<head>\n')
|
||||||
|
tlst.append('<title>' + meta_array['Title'] + '</title>\n')
|
||||||
|
tlst.append('<meta name="Author" content="' + meta_array['Authors'] + '" />\n')
|
||||||
|
tlst.append('<meta name="Title" content="' + meta_array['Title'] + '" />\n')
|
||||||
|
if 'ASIN' in meta_array:
|
||||||
|
tlst.append('<meta name="ASIN" content="' + meta_array['ASIN'] + '" />\n')
|
||||||
|
if 'GUID' in meta_array:
|
||||||
|
tlst.append('<meta name="GUID" content="' + meta_array['GUID'] + '" />\n')
|
||||||
|
tlst.append('</head>\n')
|
||||||
|
tlst.append('<body>\n')
|
||||||
|
|
||||||
|
tlst.append('<h2>Table of Contents</h2>\n')
|
||||||
|
start = pageidnums[0]
|
||||||
|
if (raw):
|
||||||
|
startname = 'page%04d.svg' % start
|
||||||
|
else:
|
||||||
|
startname = 'page%04d.xhtml' % start
|
||||||
|
|
||||||
|
tlst.append('<h3><a href="' + startname + '">Start of Book</a></h3>\n')
|
||||||
|
# build up a table of contents for the svg xhtml output
|
||||||
|
tocentries = "".join(elst)
|
||||||
|
elst = None
|
||||||
|
toclst = tocentries.split('\n')
|
||||||
|
toclst.pop()
|
||||||
|
for entry in toclst:
|
||||||
|
print entry
|
||||||
|
title, pagenum = entry.split('|')
|
||||||
|
id = pageidnums[int(pagenum)]
|
||||||
|
if (raw):
|
||||||
|
fname = 'page%04d.svg' % id
|
||||||
|
else:
|
||||||
|
fname = 'page%04d.xhtml' % id
|
||||||
|
tlst.append('<h3><a href="'+ fname + '">' + title + '</a></h3>\n')
|
||||||
|
tlst.append('</body>\n')
|
||||||
|
tlst.append('</html>\n')
|
||||||
|
tochtml = "".join(tlst)
|
||||||
|
file(os.path.join(svgDir, 'toc.xhtml'), 'wb').write(tochtml)
|
||||||
|
|
||||||
|
|
||||||
|
# now create index_svg.xhtml that points to all required files
|
||||||
|
slst = []
|
||||||
|
slst.append('<?xml version="1.0" encoding="utf-8"?>\n')
|
||||||
|
slst.append('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n')
|
||||||
|
slst.append('<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" >')
|
||||||
|
slst.append('<head>\n')
|
||||||
|
slst.append('<title>' + meta_array['Title'] + '</title>\n')
|
||||||
|
slst.append('<meta name="Author" content="' + meta_array['Authors'] + '" />\n')
|
||||||
|
slst.append('<meta name="Title" content="' + meta_array['Title'] + '" />\n')
|
||||||
|
if 'ASIN' in meta_array:
|
||||||
|
slst.append('<meta name="ASIN" content="' + meta_array['ASIN'] + '" />\n')
|
||||||
|
if 'GUID' in meta_array:
|
||||||
|
slst.append('<meta name="GUID" content="' + meta_array['GUID'] + '" />\n')
|
||||||
|
slst.append('</head>\n')
|
||||||
|
slst.append('<body>\n')
|
||||||
|
|
||||||
|
print "Building svg images of each book page"
|
||||||
|
slst.append('<h2>List of Pages</h2>\n')
|
||||||
|
slst.append('<div>\n')
|
||||||
|
idlst = sorted(pageIDMap.keys())
|
||||||
|
numids = len(idlst)
|
||||||
|
cnt = len(idlst)
|
||||||
|
previd = None
|
||||||
|
for j in range(cnt):
|
||||||
|
pageid = idlst[j]
|
||||||
|
if j < cnt - 1:
|
||||||
|
nextid = idlst[j+1]
|
||||||
|
else:
|
||||||
|
nextid = None
|
||||||
|
print '.',
|
||||||
|
pagelst = pageIDMap[pageid]
|
||||||
|
flst = []
|
||||||
|
for page in pagelst:
|
||||||
|
flst.append(xmllst[page])
|
||||||
|
flat_svg = "".join(flst)
|
||||||
|
flst=None
|
||||||
|
svgxml = flatxml2svg.convert2SVG(gd, flat_svg, pageid, previd, nextid, svgDir, raw, meta_array, scaledpi)
|
||||||
if (raw) :
|
if (raw) :
|
||||||
pfile = open(os.path.join(svgDir,filename.replace('.dat','.svg')), 'w')
|
pfile = open(os.path.join(svgDir,'page%04d.svg' % pageid),'w')
|
||||||
svgindex += '<a href="svg/page%04d.svg">Page %d</a>\n' % (counter, counter)
|
slst.append('<a href="svg/page%04d.svg">Page %d</a>\n' % (pageid, pageid))
|
||||||
else :
|
else :
|
||||||
pfile = open(os.path.join(svgDir,'page%04d.xhtml' % counter), 'w')
|
pfile = open(os.path.join(svgDir,'page%04d.xhtml' % pageid), 'w')
|
||||||
svgindex += '<a href="svg/page%04d.xhtml">Page %d</a>\n' % (counter, counter)
|
slst.append('<a href="svg/page%04d.xhtml">Page %d</a>\n' % (pageid, pageid))
|
||||||
|
previd = pageid
|
||||||
|
|
||||||
pfile.write(svgxml)
|
pfile.write(svgxml)
|
||||||
pfile.close()
|
pfile.close()
|
||||||
|
|
||||||
counter += 1
|
counter += 1
|
||||||
|
slst.append('</div>\n')
|
||||||
|
slst.append('<h2><a href="svg/toc.xhtml">Table of Contents</a></h2>\n')
|
||||||
|
slst.append('</body>\n</html>\n')
|
||||||
|
svgindex = "".join(slst)
|
||||||
|
slst = None
|
||||||
|
file(os.path.join(bookDir, 'index_svg.xhtml'), 'wb').write(svgindex)
|
||||||
|
|
||||||
print " "
|
print " "
|
||||||
|
|
||||||
# finish up the html string and output it
|
|
||||||
htmlstr += '</body>\n</html>\n'
|
|
||||||
file(os.path.join(bookDir, htmlFileName), 'wb').write(htmlstr)
|
|
||||||
|
|
||||||
# finish up the svg index string and output it
|
|
||||||
svgindex += '</body>\n</html>\n'
|
|
||||||
file(os.path.join(bookDir, 'index_svg.xhtml'), 'wb').write(svgindex)
|
|
||||||
|
|
||||||
# build the opf file
|
# build the opf file
|
||||||
opfname = os.path.join(bookDir, 'book.opf')
|
opfname = os.path.join(bookDir, 'book.opf')
|
||||||
opfstr = '<?xml version="1.0" encoding="utf-8"?>\n'
|
olst = []
|
||||||
opfstr += '<package xmlns="http://www.idpf.org/2007/opf" unique-identifier="guid_id">\n'
|
olst.append('<?xml version="1.0" encoding="utf-8"?>\n')
|
||||||
|
olst.append('<package xmlns="http://www.idpf.org/2007/opf" unique-identifier="guid_id">\n')
|
||||||
# adding metadata
|
# adding metadata
|
||||||
opfstr += ' <metadata xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:opf="http://www.idpf.org/2007/opf">\n'
|
olst.append(' <metadata xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:opf="http://www.idpf.org/2007/opf">\n')
|
||||||
opfstr += ' <dc:identifier opf:scheme="GUID" id="guid_id">' + meta_array['GUID'] + '</dc:identifier>\n'
|
if 'GUID' in meta_array:
|
||||||
opfstr += ' <dc:identifier opf:scheme="ASIN">' + meta_array['ASIN'] + '</dc:identifier>\n'
|
olst.append(' <dc:identifier opf:scheme="GUID" id="guid_id">' + meta_array['GUID'] + '</dc:identifier>\n')
|
||||||
opfstr += ' <dc:identifier opf:scheme="oASIN">' + meta_array['oASIN'] + '</dc:identifier>\n'
|
if 'ASIN' in meta_array:
|
||||||
opfstr += ' <dc:title>' + meta_array['Title'] + '</dc:title>\n'
|
olst.append(' <dc:identifier opf:scheme="ASIN">' + meta_array['ASIN'] + '</dc:identifier>\n')
|
||||||
opfstr += ' <dc:creator opf:role="aut">' + meta_array['Authors'] + '</dc:creator>\n'
|
if 'oASIN' in meta_array:
|
||||||
opfstr += ' <dc:language>en</dc:language>\n'
|
olst.append(' <dc:identifier opf:scheme="oASIN">' + meta_array['oASIN'] + '</dc:identifier>\n')
|
||||||
opfstr += ' <dc:date>' + meta_array['UpdateTime'] + '</dc:date>\n'
|
olst.append(' <dc:title>' + meta_array['Title'] + '</dc:title>\n')
|
||||||
|
olst.append(' <dc:creator opf:role="aut">' + meta_array['Authors'] + '</dc:creator>\n')
|
||||||
|
olst.append(' <dc:language>en</dc:language>\n')
|
||||||
|
olst.append(' <dc:date>' + meta_array['UpdateTime'] + '</dc:date>\n')
|
||||||
if isCover:
|
if isCover:
|
||||||
opfstr += ' <meta name="cover" content="bookcover"/>\n'
|
olst.append(' <meta name="cover" content="bookcover"/>\n')
|
||||||
opfstr += ' </metadata>\n'
|
olst.append(' </metadata>\n')
|
||||||
opfstr += '<manifest>\n'
|
olst.append('<manifest>\n')
|
||||||
opfstr += ' <item id="book" href="book.html" media-type="application/xhtml+xml"/>\n'
|
olst.append(' <item id="book" href="book.html" media-type="application/xhtml+xml"/>\n')
|
||||||
opfstr += ' <item id="stylesheet" href="style.css" media-type="text.css"/>\n'
|
olst.append(' <item id="stylesheet" href="style.css" media-type="text/css"/>\n')
|
||||||
# adding image files to manifest
|
# adding image files to manifest
|
||||||
filenames = os.listdir(imgDir)
|
filenames = os.listdir(imgDir)
|
||||||
filenames = sorted(filenames)
|
filenames = sorted(filenames)
|
||||||
@@ -493,17 +640,19 @@ def generateBook(bookDir, raw, fixedimage):
|
|||||||
imgext = 'jpeg'
|
imgext = 'jpeg'
|
||||||
if imgext == '.svg':
|
if imgext == '.svg':
|
||||||
imgext = 'svg+xml'
|
imgext = 'svg+xml'
|
||||||
opfstr += ' <item id="' + imgname + '" href="img/' + filename + '" media-type="image/' + imgext + '"/>\n'
|
olst.append(' <item id="' + imgname + '" href="img/' + filename + '" media-type="image/' + imgext + '"/>\n')
|
||||||
if isCover:
|
if isCover:
|
||||||
opfstr += ' <item id="bookcover" href="cover.jpg" media-type="image/jpeg" />\n'
|
olst.append(' <item id="bookcover" href="cover.jpg" media-type="image/jpeg" />\n')
|
||||||
opfstr += '</manifest>\n'
|
olst.append('</manifest>\n')
|
||||||
# adding spine
|
# adding spine
|
||||||
opfstr += '<spine>\n <itemref idref="book" />\n</spine>\n'
|
olst.append('<spine>\n <itemref idref="book" />\n</spine>\n')
|
||||||
if isCover:
|
if isCover:
|
||||||
opfstr += ' <guide>\n'
|
olst.append(' <guide>\n')
|
||||||
opfstr += ' <reference href="cover.jpg" type="cover" title="Cover"/>\n'
|
olst.append(' <reference href="cover.jpg" type="cover" title="Cover"/>\n')
|
||||||
opfstr += ' </guide>\n'
|
olst.append(' </guide>\n')
|
||||||
opfstr += '</package>\n'
|
olst.append('</package>\n')
|
||||||
|
opfstr = "".join(olst)
|
||||||
|
olst = None
|
||||||
file(opfname, 'wb').write(opfstr)
|
file(opfname, 'wb').write(opfstr)
|
||||||
|
|
||||||
print 'Processing Complete'
|
print 'Processing Complete'
|
||||||
@@ -524,7 +673,6 @@ def usage():
|
|||||||
|
|
||||||
def main(argv):
|
def main(argv):
|
||||||
bookDir = ''
|
bookDir = ''
|
||||||
|
|
||||||
if len(argv) == 0:
|
if len(argv) == 0:
|
||||||
argv = sys.argv
|
argv = sys.argv
|
||||||
|
|
||||||
@@ -541,7 +689,7 @@ def main(argv):
|
|||||||
return 1
|
return 1
|
||||||
|
|
||||||
raw = 0
|
raw = 0
|
||||||
fixedimage = False
|
fixedimage = True
|
||||||
for o, a in opts:
|
for o, a in opts:
|
||||||
if o =="-h":
|
if o =="-h":
|
||||||
usage()
|
usage()
|
||||||
@@ -0,0 +1,336 @@
|
|||||||
|
#! /usr/bin/python
|
||||||
|
|
||||||
|
from __future__ import with_statement
|
||||||
|
|
||||||
|
# ignobleepub.pyw, version 3.4
|
||||||
|
|
||||||
|
# To run this program install Python 2.6 from <http://www.python.org/download/>
|
||||||
|
# and OpenSSL or PyCrypto from http://www.voidspace.org.uk/python/modules.shtml#pycrypto
|
||||||
|
# (make sure to install the version for Python 2.6). Save this script file as
|
||||||
|
# ignobleepub.pyw and double-click on it to run it.
|
||||||
|
|
||||||
|
# Revision history:
|
||||||
|
# 1 - Initial release
|
||||||
|
# 2 - Added OS X support by using OpenSSL when available
|
||||||
|
# 3 - screen out improper key lengths to prevent segfaults on Linux
|
||||||
|
# 3.1 - Allow Windows versions of libcrypto to be found
|
||||||
|
# 3.2 - add support for encoding to 'utf-8' when building up list of files to cecrypt from encryption.xml
|
||||||
|
# 3.3 - On Windows try PyCrypto first and OpenSSL next
|
||||||
|
# 3.4 - Modify interace to allow use with import
|
||||||
|
|
||||||
|
|
||||||
|
__license__ = 'GPL v3'
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
import zlib
|
||||||
|
import zipfile
|
||||||
|
from zipfile import ZipFile, ZIP_STORED, ZIP_DEFLATED
|
||||||
|
from contextlib import closing
|
||||||
|
import xml.etree.ElementTree as etree
|
||||||
|
import Tkinter
|
||||||
|
import Tkconstants
|
||||||
|
import tkFileDialog
|
||||||
|
import tkMessageBox
|
||||||
|
|
||||||
|
class IGNOBLEError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def _load_crypto_libcrypto():
|
||||||
|
from ctypes import CDLL, POINTER, c_void_p, c_char_p, c_int, c_long, \
|
||||||
|
Structure, c_ulong, create_string_buffer, cast
|
||||||
|
from ctypes.util import find_library
|
||||||
|
|
||||||
|
if sys.platform.startswith('win'):
|
||||||
|
libcrypto = find_library('libeay32')
|
||||||
|
else:
|
||||||
|
libcrypto = find_library('crypto')
|
||||||
|
if libcrypto is None:
|
||||||
|
raise IGNOBLEError('libcrypto not found')
|
||||||
|
libcrypto = CDLL(libcrypto)
|
||||||
|
|
||||||
|
AES_MAXNR = 14
|
||||||
|
|
||||||
|
c_char_pp = POINTER(c_char_p)
|
||||||
|
c_int_p = POINTER(c_int)
|
||||||
|
|
||||||
|
class AES_KEY(Structure):
|
||||||
|
_fields_ = [('rd_key', c_long * (4 * (AES_MAXNR + 1))),
|
||||||
|
('rounds', c_int)]
|
||||||
|
AES_KEY_p = POINTER(AES_KEY)
|
||||||
|
|
||||||
|
def F(restype, name, argtypes):
|
||||||
|
func = getattr(libcrypto, name)
|
||||||
|
func.restype = restype
|
||||||
|
func.argtypes = argtypes
|
||||||
|
return func
|
||||||
|
|
||||||
|
AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',
|
||||||
|
[c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p,
|
||||||
|
c_int])
|
||||||
|
AES_set_decrypt_key = F(c_int, 'AES_set_decrypt_key',
|
||||||
|
[c_char_p, c_int, AES_KEY_p])
|
||||||
|
AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',
|
||||||
|
[c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p,
|
||||||
|
c_int])
|
||||||
|
|
||||||
|
class AES(object):
|
||||||
|
def __init__(self, userkey):
|
||||||
|
self._blocksize = len(userkey)
|
||||||
|
if (self._blocksize != 16) and (self._blocksize != 24) and (self._blocksize != 32) :
|
||||||
|
raise IGNOBLEError('AES improper key used')
|
||||||
|
return
|
||||||
|
key = self._key = AES_KEY()
|
||||||
|
rv = AES_set_decrypt_key(userkey, len(userkey) * 8, key)
|
||||||
|
if rv < 0:
|
||||||
|
raise IGNOBLEError('Failed to initialize AES key')
|
||||||
|
|
||||||
|
def decrypt(self, data):
|
||||||
|
out = create_string_buffer(len(data))
|
||||||
|
iv = ("\x00" * self._blocksize)
|
||||||
|
rv = AES_cbc_encrypt(data, out, len(data), self._key, iv, 0)
|
||||||
|
if rv == 0:
|
||||||
|
raise IGNOBLEError('AES decryption failed')
|
||||||
|
return out.raw
|
||||||
|
|
||||||
|
return AES
|
||||||
|
|
||||||
|
def _load_crypto_pycrypto():
|
||||||
|
from Crypto.Cipher import AES as _AES
|
||||||
|
|
||||||
|
class AES(object):
|
||||||
|
def __init__(self, key):
|
||||||
|
self._aes = _AES.new(key, _AES.MODE_CBC)
|
||||||
|
|
||||||
|
def decrypt(self, data):
|
||||||
|
return self._aes.decrypt(data)
|
||||||
|
|
||||||
|
return AES
|
||||||
|
|
||||||
|
def _load_crypto():
|
||||||
|
AES = None
|
||||||
|
cryptolist = (_load_crypto_libcrypto, _load_crypto_pycrypto)
|
||||||
|
if sys.platform.startswith('win'):
|
||||||
|
cryptolist = (_load_crypto_pycrypto, _load_crypto_libcrypto)
|
||||||
|
for loader in cryptolist:
|
||||||
|
try:
|
||||||
|
AES = loader()
|
||||||
|
break
|
||||||
|
except (ImportError, IGNOBLEError):
|
||||||
|
pass
|
||||||
|
return AES
|
||||||
|
|
||||||
|
AES = _load_crypto()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
Decrypt Barnes & Noble ADEPT encrypted EPUB books.
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
META_NAMES = ('mimetype', 'META-INF/rights.xml', 'META-INF/encryption.xml')
|
||||||
|
NSMAP = {'adept': 'http://ns.adobe.com/adept',
|
||||||
|
'enc': 'http://www.w3.org/2001/04/xmlenc#'}
|
||||||
|
|
||||||
|
class ZipInfo(zipfile.ZipInfo):
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
if 'compress_type' in kwargs:
|
||||||
|
compress_type = kwargs.pop('compress_type')
|
||||||
|
super(ZipInfo, self).__init__(*args, **kwargs)
|
||||||
|
self.compress_type = compress_type
|
||||||
|
|
||||||
|
class Decryptor(object):
|
||||||
|
def __init__(self, bookkey, encryption):
|
||||||
|
enc = lambda tag: '{%s}%s' % (NSMAP['enc'], tag)
|
||||||
|
# self._aes = AES.new(bookkey, AES.MODE_CBC)
|
||||||
|
self._aes = AES(bookkey)
|
||||||
|
encryption = etree.fromstring(encryption)
|
||||||
|
self._encrypted = encrypted = set()
|
||||||
|
expr = './%s/%s/%s' % (enc('EncryptedData'), enc('CipherData'),
|
||||||
|
enc('CipherReference'))
|
||||||
|
for elem in encryption.findall(expr):
|
||||||
|
path = elem.get('URI', None)
|
||||||
|
path = path.encode('utf-8')
|
||||||
|
if path is not None:
|
||||||
|
encrypted.add(path)
|
||||||
|
|
||||||
|
def decompress(self, bytes):
|
||||||
|
dc = zlib.decompressobj(-15)
|
||||||
|
bytes = dc.decompress(bytes)
|
||||||
|
ex = dc.decompress('Z') + dc.flush()
|
||||||
|
if ex:
|
||||||
|
bytes = bytes + ex
|
||||||
|
return bytes
|
||||||
|
|
||||||
|
def decrypt(self, path, data):
|
||||||
|
if path in self._encrypted:
|
||||||
|
data = self._aes.decrypt(data)[16:]
|
||||||
|
data = data[:-ord(data[-1])]
|
||||||
|
data = self.decompress(data)
|
||||||
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
class DecryptionDialog(Tkinter.Frame):
|
||||||
|
def __init__(self, root):
|
||||||
|
Tkinter.Frame.__init__(self, root, border=5)
|
||||||
|
self.status = Tkinter.Label(self, text='Select files for decryption')
|
||||||
|
self.status.pack(fill=Tkconstants.X, expand=1)
|
||||||
|
body = Tkinter.Frame(self)
|
||||||
|
body.pack(fill=Tkconstants.X, expand=1)
|
||||||
|
sticky = Tkconstants.E + Tkconstants.W
|
||||||
|
body.grid_columnconfigure(1, weight=2)
|
||||||
|
Tkinter.Label(body, text='Key file').grid(row=0)
|
||||||
|
self.keypath = Tkinter.Entry(body, width=30)
|
||||||
|
self.keypath.grid(row=0, column=1, sticky=sticky)
|
||||||
|
if os.path.exists('bnepubkey.b64'):
|
||||||
|
self.keypath.insert(0, 'bnepubkey.b64')
|
||||||
|
button = Tkinter.Button(body, text="...", command=self.get_keypath)
|
||||||
|
button.grid(row=0, column=2)
|
||||||
|
Tkinter.Label(body, text='Input file').grid(row=1)
|
||||||
|
self.inpath = Tkinter.Entry(body, width=30)
|
||||||
|
self.inpath.grid(row=1, column=1, sticky=sticky)
|
||||||
|
button = Tkinter.Button(body, text="...", command=self.get_inpath)
|
||||||
|
button.grid(row=1, column=2)
|
||||||
|
Tkinter.Label(body, text='Output file').grid(row=2)
|
||||||
|
self.outpath = Tkinter.Entry(body, width=30)
|
||||||
|
self.outpath.grid(row=2, column=1, sticky=sticky)
|
||||||
|
button = Tkinter.Button(body, text="...", command=self.get_outpath)
|
||||||
|
button.grid(row=2, column=2)
|
||||||
|
buttons = Tkinter.Frame(self)
|
||||||
|
buttons.pack()
|
||||||
|
botton = Tkinter.Button(
|
||||||
|
buttons, text="Decrypt", width=10, command=self.decrypt)
|
||||||
|
botton.pack(side=Tkconstants.LEFT)
|
||||||
|
Tkinter.Frame(buttons, width=10).pack(side=Tkconstants.LEFT)
|
||||||
|
button = Tkinter.Button(
|
||||||
|
buttons, text="Quit", width=10, command=self.quit)
|
||||||
|
button.pack(side=Tkconstants.RIGHT)
|
||||||
|
|
||||||
|
def get_keypath(self):
|
||||||
|
keypath = tkFileDialog.askopenfilename(
|
||||||
|
parent=None, title='Select B&N EPUB key file',
|
||||||
|
defaultextension='.b64',
|
||||||
|
filetypes=[('base64-encoded files', '.b64'),
|
||||||
|
('All Files', '.*')])
|
||||||
|
if keypath:
|
||||||
|
keypath = os.path.normpath(keypath)
|
||||||
|
self.keypath.delete(0, Tkconstants.END)
|
||||||
|
self.keypath.insert(0, keypath)
|
||||||
|
return
|
||||||
|
|
||||||
|
def get_inpath(self):
|
||||||
|
inpath = tkFileDialog.askopenfilename(
|
||||||
|
parent=None, title='Select B&N-encrypted EPUB file to decrypt',
|
||||||
|
defaultextension='.epub', filetypes=[('EPUB files', '.epub'),
|
||||||
|
('All files', '.*')])
|
||||||
|
if inpath:
|
||||||
|
inpath = os.path.normpath(inpath)
|
||||||
|
self.inpath.delete(0, Tkconstants.END)
|
||||||
|
self.inpath.insert(0, inpath)
|
||||||
|
return
|
||||||
|
|
||||||
|
def get_outpath(self):
|
||||||
|
outpath = tkFileDialog.asksaveasfilename(
|
||||||
|
parent=None, title='Select unencrypted EPUB file to produce',
|
||||||
|
defaultextension='.epub', filetypes=[('EPUB files', '.epub'),
|
||||||
|
('All files', '.*')])
|
||||||
|
if outpath:
|
||||||
|
outpath = os.path.normpath(outpath)
|
||||||
|
self.outpath.delete(0, Tkconstants.END)
|
||||||
|
self.outpath.insert(0, outpath)
|
||||||
|
return
|
||||||
|
|
||||||
|
def decrypt(self):
|
||||||
|
keypath = self.keypath.get()
|
||||||
|
inpath = self.inpath.get()
|
||||||
|
outpath = self.outpath.get()
|
||||||
|
if not keypath or not os.path.exists(keypath):
|
||||||
|
self.status['text'] = 'Specified key file does not exist'
|
||||||
|
return
|
||||||
|
if not inpath or not os.path.exists(inpath):
|
||||||
|
self.status['text'] = 'Specified input file does not exist'
|
||||||
|
return
|
||||||
|
if not outpath:
|
||||||
|
self.status['text'] = 'Output file not specified'
|
||||||
|
return
|
||||||
|
if inpath == outpath:
|
||||||
|
self.status['text'] = 'Must have different input and output files'
|
||||||
|
return
|
||||||
|
argv = [sys.argv[0], keypath, inpath, outpath]
|
||||||
|
self.status['text'] = 'Decrypting...'
|
||||||
|
try:
|
||||||
|
cli_main(argv)
|
||||||
|
except Exception, e:
|
||||||
|
self.status['text'] = 'Error: ' + str(e)
|
||||||
|
return
|
||||||
|
self.status['text'] = 'File successfully decrypted'
|
||||||
|
|
||||||
|
|
||||||
|
def decryptBook(keypath, inpath, outpath):
|
||||||
|
with open(keypath, 'rb') as f:
|
||||||
|
keyb64 = f.read()
|
||||||
|
key = keyb64.decode('base64')[:16]
|
||||||
|
# aes = AES.new(key, AES.MODE_CBC)
|
||||||
|
aes = AES(key)
|
||||||
|
|
||||||
|
with closing(ZipFile(open(inpath, 'rb'))) as inf:
|
||||||
|
namelist = set(inf.namelist())
|
||||||
|
if 'META-INF/rights.xml' not in namelist or \
|
||||||
|
'META-INF/encryption.xml' not in namelist:
|
||||||
|
raise IGNOBLEError('%s: not an B&N ADEPT EPUB' % (inpath,))
|
||||||
|
for name in META_NAMES:
|
||||||
|
namelist.remove(name)
|
||||||
|
rights = etree.fromstring(inf.read('META-INF/rights.xml'))
|
||||||
|
adept = lambda tag: '{%s}%s' % (NSMAP['adept'], tag)
|
||||||
|
expr = './/%s' % (adept('encryptedKey'),)
|
||||||
|
bookkey = ''.join(rights.findtext(expr))
|
||||||
|
bookkey = aes.decrypt(bookkey.decode('base64'))
|
||||||
|
bookkey = bookkey[:-ord(bookkey[-1])]
|
||||||
|
encryption = inf.read('META-INF/encryption.xml')
|
||||||
|
decryptor = Decryptor(bookkey[-16:], encryption)
|
||||||
|
kwds = dict(compression=ZIP_DEFLATED, allowZip64=False)
|
||||||
|
with closing(ZipFile(open(outpath, 'wb'), 'w', **kwds)) as outf:
|
||||||
|
zi = ZipInfo('mimetype', compress_type=ZIP_STORED)
|
||||||
|
outf.writestr(zi, inf.read('mimetype'))
|
||||||
|
for path in namelist:
|
||||||
|
data = inf.read(path)
|
||||||
|
outf.writestr(path, decryptor.decrypt(path, data))
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
def cli_main(argv=sys.argv):
|
||||||
|
progname = os.path.basename(argv[0])
|
||||||
|
if AES is None:
|
||||||
|
print "%s: This script requires OpenSSL or PyCrypto, which must be installed " \
|
||||||
|
"separately. Read the top-of-script comment for details." % \
|
||||||
|
(progname,)
|
||||||
|
return 1
|
||||||
|
if len(argv) != 4:
|
||||||
|
print "usage: %s KEYFILE INBOOK OUTBOOK" % (progname,)
|
||||||
|
return 1
|
||||||
|
keypath, inpath, outpath = argv[1:]
|
||||||
|
return decryptBook(keypath, inpath, outpath)
|
||||||
|
|
||||||
|
|
||||||
|
def gui_main():
|
||||||
|
root = Tkinter.Tk()
|
||||||
|
if AES is None:
|
||||||
|
root.withdraw()
|
||||||
|
tkMessageBox.showerror(
|
||||||
|
"Ignoble EPUB Decrypter",
|
||||||
|
"This script requires OpenSSL or PyCrypto, which must be installed "
|
||||||
|
"separately. Read the top-of-script comment for details.")
|
||||||
|
return 1
|
||||||
|
root.title('Ignoble EPUB Decrypter')
|
||||||
|
root.resizable(True, False)
|
||||||
|
root.minsize(300, 0)
|
||||||
|
DecryptionDialog(root).pack(fill=Tkconstants.X, expand=1)
|
||||||
|
root.mainloop()
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
if len(sys.argv) > 1:
|
||||||
|
sys.exit(cli_main())
|
||||||
|
sys.exit(gui_main())
|
||||||
@@ -0,0 +1,239 @@
|
|||||||
|
#! /usr/bin/python
|
||||||
|
|
||||||
|
from __future__ import with_statement
|
||||||
|
|
||||||
|
# ignoblekeygen.pyw, version 2.3
|
||||||
|
|
||||||
|
# To run this program install Python 2.6 from <http://www.python.org/download/>
|
||||||
|
# and OpenSSL or PyCrypto from http://www.voidspace.org.uk/python/modules.shtml#pycrypto
|
||||||
|
# (make sure to install the version for Python 2.6). Save this script file as
|
||||||
|
# ignoblekeygen.pyw and double-click on it to run it.
|
||||||
|
|
||||||
|
# Revision history:
|
||||||
|
# 1 - Initial release
|
||||||
|
# 2 - Add OS X support by using OpenSSL when available (taken/modified from ineptepub v5)
|
||||||
|
# 2.1 - Allow Windows versions of libcrypto to be found
|
||||||
|
# 2.2 - On Windows try PyCrypto first and then OpenSSL next
|
||||||
|
# 2.3 - Modify interface to allow use of import
|
||||||
|
|
||||||
|
"""
|
||||||
|
Generate Barnes & Noble EPUB user key from name and credit card number.
|
||||||
|
"""
|
||||||
|
|
||||||
|
__license__ = 'GPL v3'
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
import hashlib
|
||||||
|
import Tkinter
|
||||||
|
import Tkconstants
|
||||||
|
import tkFileDialog
|
||||||
|
import tkMessageBox
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# use openssl's libcrypt if it exists in place of pycrypto
|
||||||
|
# code extracted from the Adobe Adept DRM removal code also by I HeartCabbages
|
||||||
|
class IGNOBLEError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def _load_crypto_libcrypto():
|
||||||
|
from ctypes import CDLL, POINTER, c_void_p, c_char_p, c_int, c_long, \
|
||||||
|
Structure, c_ulong, create_string_buffer, cast
|
||||||
|
from ctypes.util import find_library
|
||||||
|
|
||||||
|
if sys.platform.startswith('win'):
|
||||||
|
libcrypto = find_library('libeay32')
|
||||||
|
else:
|
||||||
|
libcrypto = find_library('crypto')
|
||||||
|
if libcrypto is None:
|
||||||
|
print 'libcrypto not found'
|
||||||
|
raise IGNOBLEError('libcrypto not found')
|
||||||
|
libcrypto = CDLL(libcrypto)
|
||||||
|
|
||||||
|
AES_MAXNR = 14
|
||||||
|
|
||||||
|
c_char_pp = POINTER(c_char_p)
|
||||||
|
c_int_p = POINTER(c_int)
|
||||||
|
|
||||||
|
class AES_KEY(Structure):
|
||||||
|
_fields_ = [('rd_key', c_long * (4 * (AES_MAXNR + 1))),
|
||||||
|
('rounds', c_int)]
|
||||||
|
AES_KEY_p = POINTER(AES_KEY)
|
||||||
|
|
||||||
|
def F(restype, name, argtypes):
|
||||||
|
func = getattr(libcrypto, name)
|
||||||
|
func.restype = restype
|
||||||
|
func.argtypes = argtypes
|
||||||
|
return func
|
||||||
|
|
||||||
|
AES_set_encrypt_key = F(c_int, 'AES_set_encrypt_key',
|
||||||
|
[c_char_p, c_int, AES_KEY_p])
|
||||||
|
AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',
|
||||||
|
[c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p,
|
||||||
|
c_int])
|
||||||
|
class AES(object):
|
||||||
|
def __init__(self, userkey, iv):
|
||||||
|
self._blocksize = len(userkey)
|
||||||
|
self._iv = iv
|
||||||
|
key = self._key = AES_KEY()
|
||||||
|
rv = AES_set_encrypt_key(userkey, len(userkey) * 8, key)
|
||||||
|
if rv < 0:
|
||||||
|
raise IGNOBLEError('Failed to initialize AES Encrypt key')
|
||||||
|
|
||||||
|
def encrypt(self, data):
|
||||||
|
out = create_string_buffer(len(data))
|
||||||
|
rv = AES_cbc_encrypt(data, out, len(data), self._key, self._iv, 1)
|
||||||
|
if rv == 0:
|
||||||
|
raise IGNOBLEError('AES encryption failed')
|
||||||
|
return out.raw
|
||||||
|
|
||||||
|
return AES
|
||||||
|
|
||||||
|
|
||||||
|
def _load_crypto_pycrypto():
|
||||||
|
from Crypto.Cipher import AES as _AES
|
||||||
|
|
||||||
|
class AES(object):
|
||||||
|
def __init__(self, key, iv):
|
||||||
|
self._aes = _AES.new(key, _AES.MODE_CBC, iv)
|
||||||
|
|
||||||
|
def encrypt(self, data):
|
||||||
|
return self._aes.encrypt(data)
|
||||||
|
|
||||||
|
return AES
|
||||||
|
|
||||||
|
def _load_crypto():
|
||||||
|
AES = None
|
||||||
|
cryptolist = (_load_crypto_libcrypto, _load_crypto_pycrypto)
|
||||||
|
if sys.platform.startswith('win'):
|
||||||
|
cryptolist = (_load_crypto_pycrypto, _load_crypto_libcrypto)
|
||||||
|
for loader in cryptolist:
|
||||||
|
try:
|
||||||
|
AES = loader()
|
||||||
|
break
|
||||||
|
except (ImportError, IGNOBLEError):
|
||||||
|
pass
|
||||||
|
return AES
|
||||||
|
|
||||||
|
AES = _load_crypto()
|
||||||
|
|
||||||
|
def normalize_name(name):
|
||||||
|
return ''.join(x for x in name.lower() if x != ' ')
|
||||||
|
|
||||||
|
|
||||||
|
def generate_keyfile(name, ccn, outpath):
|
||||||
|
name = normalize_name(name) + '\x00'
|
||||||
|
ccn = ccn + '\x00'
|
||||||
|
name_sha = hashlib.sha1(name).digest()[:16]
|
||||||
|
ccn_sha = hashlib.sha1(ccn).digest()[:16]
|
||||||
|
both_sha = hashlib.sha1(name + ccn).digest()
|
||||||
|
aes = AES(ccn_sha, name_sha)
|
||||||
|
crypt = aes.encrypt(both_sha + ('\x0c' * 0x0c))
|
||||||
|
userkey = hashlib.sha1(crypt).digest()
|
||||||
|
with open(outpath, 'wb') as f:
|
||||||
|
f.write(userkey.encode('base64'))
|
||||||
|
return userkey
|
||||||
|
|
||||||
|
|
||||||
|
class DecryptionDialog(Tkinter.Frame):
|
||||||
|
def __init__(self, root):
|
||||||
|
Tkinter.Frame.__init__(self, root, border=5)
|
||||||
|
self.status = Tkinter.Label(self, text='Enter parameters')
|
||||||
|
self.status.pack(fill=Tkconstants.X, expand=1)
|
||||||
|
body = Tkinter.Frame(self)
|
||||||
|
body.pack(fill=Tkconstants.X, expand=1)
|
||||||
|
sticky = Tkconstants.E + Tkconstants.W
|
||||||
|
body.grid_columnconfigure(1, weight=2)
|
||||||
|
Tkinter.Label(body, text='Name').grid(row=1)
|
||||||
|
self.name = Tkinter.Entry(body, width=30)
|
||||||
|
self.name.grid(row=1, column=1, sticky=sticky)
|
||||||
|
Tkinter.Label(body, text='CC#').grid(row=2)
|
||||||
|
self.ccn = Tkinter.Entry(body, width=30)
|
||||||
|
self.ccn.grid(row=2, column=1, sticky=sticky)
|
||||||
|
Tkinter.Label(body, text='Output file').grid(row=0)
|
||||||
|
self.keypath = Tkinter.Entry(body, width=30)
|
||||||
|
self.keypath.grid(row=0, column=1, sticky=sticky)
|
||||||
|
self.keypath.insert(0, 'bnepubkey.b64')
|
||||||
|
button = Tkinter.Button(body, text="...", command=self.get_keypath)
|
||||||
|
button.grid(row=0, column=2)
|
||||||
|
buttons = Tkinter.Frame(self)
|
||||||
|
buttons.pack()
|
||||||
|
botton = Tkinter.Button(
|
||||||
|
buttons, text="Generate", width=10, command=self.generate)
|
||||||
|
botton.pack(side=Tkconstants.LEFT)
|
||||||
|
Tkinter.Frame(buttons, width=10).pack(side=Tkconstants.LEFT)
|
||||||
|
button = Tkinter.Button(
|
||||||
|
buttons, text="Quit", width=10, command=self.quit)
|
||||||
|
button.pack(side=Tkconstants.RIGHT)
|
||||||
|
|
||||||
|
def get_keypath(self):
|
||||||
|
keypath = tkFileDialog.asksaveasfilename(
|
||||||
|
parent=None, title='Select B&N EPUB key file to produce',
|
||||||
|
defaultextension='.b64',
|
||||||
|
filetypes=[('base64-encoded files', '.b64'),
|
||||||
|
('All Files', '.*')])
|
||||||
|
if keypath:
|
||||||
|
keypath = os.path.normpath(keypath)
|
||||||
|
self.keypath.delete(0, Tkconstants.END)
|
||||||
|
self.keypath.insert(0, keypath)
|
||||||
|
return
|
||||||
|
|
||||||
|
def generate(self):
|
||||||
|
name = self.name.get()
|
||||||
|
ccn = self.ccn.get()
|
||||||
|
keypath = self.keypath.get()
|
||||||
|
if not name:
|
||||||
|
self.status['text'] = 'Name not specified'
|
||||||
|
return
|
||||||
|
if not ccn:
|
||||||
|
self.status['text'] = 'Credit card number not specified'
|
||||||
|
return
|
||||||
|
if not keypath:
|
||||||
|
self.status['text'] = 'Output keyfile path not specified'
|
||||||
|
return
|
||||||
|
self.status['text'] = 'Generating...'
|
||||||
|
try:
|
||||||
|
generate_keyfile(name, ccn, keypath)
|
||||||
|
except Exception, e:
|
||||||
|
self.status['text'] = 'Error: ' + str(e)
|
||||||
|
return
|
||||||
|
self.status['text'] = 'Keyfile successfully generated'
|
||||||
|
|
||||||
|
|
||||||
|
def cli_main(argv=sys.argv):
|
||||||
|
progname = os.path.basename(argv[0])
|
||||||
|
if AES is None:
|
||||||
|
print "%s: This script requires OpenSSL or PyCrypto, which must be installed " \
|
||||||
|
"separately. Read the top-of-script comment for details." % \
|
||||||
|
(progname,)
|
||||||
|
return 1
|
||||||
|
if len(argv) != 4:
|
||||||
|
print "usage: %s NAME CC# OUTFILE" % (progname,)
|
||||||
|
return 1
|
||||||
|
name, ccn, outpath = argv[1:]
|
||||||
|
generate_keyfile(name, ccn, outpath)
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
def gui_main():
|
||||||
|
root = Tkinter.Tk()
|
||||||
|
if AES is None:
|
||||||
|
root.withdraw()
|
||||||
|
tkMessageBox.showerror(
|
||||||
|
"Ignoble EPUB Keyfile Generator",
|
||||||
|
"This script requires OpenSSL or PyCrypto, which must be installed "
|
||||||
|
"separately. Read the top-of-script comment for details.")
|
||||||
|
return 1
|
||||||
|
root.title('Ignoble EPUB Keyfile Generator')
|
||||||
|
root.resizable(True, False)
|
||||||
|
root.minsize(300, 0)
|
||||||
|
DecryptionDialog(root).pack(fill=Tkconstants.X, expand=1)
|
||||||
|
root.mainloop()
|
||||||
|
return 0
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
if len(sys.argv) > 1:
|
||||||
|
sys.exit(cli_main())
|
||||||
|
sys.exit(gui_main())
|
||||||
@@ -0,0 +1,476 @@
|
|||||||
|
#! /usr/bin/python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
from __future__ import with_statement
|
||||||
|
|
||||||
|
# ineptepub.pyw, version 5.6
|
||||||
|
# Copyright © 2009-2010 i♥cabbages
|
||||||
|
|
||||||
|
# Released under the terms of the GNU General Public Licence, version 3 or
|
||||||
|
# later. <http://www.gnu.org/licenses/>
|
||||||
|
|
||||||
|
# Windows users: Before running this program, you must first install Python 2.6
|
||||||
|
# from <http://www.python.org/download/> and PyCrypto from
|
||||||
|
# <http://www.voidspace.org.uk/python/modules.shtml#pycrypto> (make sure to
|
||||||
|
# install the version for Python 2.6). Save this script file as
|
||||||
|
# ineptepub.pyw and double-click on it to run it.
|
||||||
|
#
|
||||||
|
# Mac OS X users: Save this script file as ineptepub.pyw. You can run this
|
||||||
|
# program from the command line (pythonw ineptepub.pyw) or by double-clicking
|
||||||
|
# it when it has been associated with PythonLauncher.
|
||||||
|
|
||||||
|
# Revision history:
|
||||||
|
# 1 - Initial release
|
||||||
|
# 2 - Rename to INEPT, fix exit code
|
||||||
|
# 5 - Version bump to avoid (?) confusion;
|
||||||
|
# Improve OS X support by using OpenSSL when available
|
||||||
|
# 5.1 - Improve OpenSSL error checking
|
||||||
|
# 5.2 - Fix ctypes error causing segfaults on some systems
|
||||||
|
# 5.3 - add support for OpenSSL on Windows, fix bug with some versions of libcrypto 0.9.8 prior to path level o
|
||||||
|
# 5.4 - add support for encoding to 'utf-8' when building up list of files to decrypt from encryption.xml
|
||||||
|
# 5.5 - On Windows try PyCrypto first, OpenSSL next
|
||||||
|
# 5.6 - Modify interface to allow use with import
|
||||||
|
"""
|
||||||
|
Decrypt Adobe ADEPT-encrypted EPUB books.
|
||||||
|
"""
|
||||||
|
|
||||||
|
__license__ = 'GPL v3'
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
import zlib
|
||||||
|
import zipfile
|
||||||
|
from zipfile import ZipFile, ZIP_STORED, ZIP_DEFLATED
|
||||||
|
from contextlib import closing
|
||||||
|
import xml.etree.ElementTree as etree
|
||||||
|
import Tkinter
|
||||||
|
import Tkconstants
|
||||||
|
import tkFileDialog
|
||||||
|
import tkMessageBox
|
||||||
|
|
||||||
|
class ADEPTError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def _load_crypto_libcrypto():
|
||||||
|
from ctypes import CDLL, POINTER, c_void_p, c_char_p, c_int, c_long, \
|
||||||
|
Structure, c_ulong, create_string_buffer, cast
|
||||||
|
from ctypes.util import find_library
|
||||||
|
|
||||||
|
if sys.platform.startswith('win'):
|
||||||
|
libcrypto = find_library('libeay32')
|
||||||
|
else:
|
||||||
|
libcrypto = find_library('crypto')
|
||||||
|
|
||||||
|
if libcrypto is None:
|
||||||
|
raise ADEPTError('libcrypto not found')
|
||||||
|
libcrypto = CDLL(libcrypto)
|
||||||
|
|
||||||
|
RSA_NO_PADDING = 3
|
||||||
|
AES_MAXNR = 14
|
||||||
|
|
||||||
|
c_char_pp = POINTER(c_char_p)
|
||||||
|
c_int_p = POINTER(c_int)
|
||||||
|
|
||||||
|
class RSA(Structure):
|
||||||
|
pass
|
||||||
|
RSA_p = POINTER(RSA)
|
||||||
|
|
||||||
|
class AES_KEY(Structure):
|
||||||
|
_fields_ = [('rd_key', c_long * (4 * (AES_MAXNR + 1))),
|
||||||
|
('rounds', c_int)]
|
||||||
|
AES_KEY_p = POINTER(AES_KEY)
|
||||||
|
|
||||||
|
def F(restype, name, argtypes):
|
||||||
|
func = getattr(libcrypto, name)
|
||||||
|
func.restype = restype
|
||||||
|
func.argtypes = argtypes
|
||||||
|
return func
|
||||||
|
|
||||||
|
d2i_RSAPrivateKey = F(RSA_p, 'd2i_RSAPrivateKey',
|
||||||
|
[RSA_p, c_char_pp, c_long])
|
||||||
|
RSA_size = F(c_int, 'RSA_size', [RSA_p])
|
||||||
|
RSA_private_decrypt = F(c_int, 'RSA_private_decrypt',
|
||||||
|
[c_int, c_char_p, c_char_p, RSA_p, c_int])
|
||||||
|
RSA_free = F(None, 'RSA_free', [RSA_p])
|
||||||
|
AES_set_decrypt_key = F(c_int, 'AES_set_decrypt_key',
|
||||||
|
[c_char_p, c_int, AES_KEY_p])
|
||||||
|
AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',
|
||||||
|
[c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p,
|
||||||
|
c_int])
|
||||||
|
|
||||||
|
class RSA(object):
|
||||||
|
def __init__(self, der):
|
||||||
|
buf = create_string_buffer(der)
|
||||||
|
pp = c_char_pp(cast(buf, c_char_p))
|
||||||
|
rsa = self._rsa = d2i_RSAPrivateKey(None, pp, len(der))
|
||||||
|
if rsa is None:
|
||||||
|
raise ADEPTError('Error parsing ADEPT user key DER')
|
||||||
|
|
||||||
|
def decrypt(self, from_):
|
||||||
|
rsa = self._rsa
|
||||||
|
to = create_string_buffer(RSA_size(rsa))
|
||||||
|
dlen = RSA_private_decrypt(len(from_), from_, to, rsa,
|
||||||
|
RSA_NO_PADDING)
|
||||||
|
if dlen < 0:
|
||||||
|
raise ADEPTError('RSA decryption failed')
|
||||||
|
return to[:dlen]
|
||||||
|
|
||||||
|
def __del__(self):
|
||||||
|
if self._rsa is not None:
|
||||||
|
RSA_free(self._rsa)
|
||||||
|
self._rsa = None
|
||||||
|
|
||||||
|
class AES(object):
|
||||||
|
def __init__(self, userkey):
|
||||||
|
self._blocksize = len(userkey)
|
||||||
|
if (self._blocksize != 16) and (self._blocksize != 24) and (self._blocksize != 32) :
|
||||||
|
raise ADEPTError('AES improper key used')
|
||||||
|
return
|
||||||
|
key = self._key = AES_KEY()
|
||||||
|
rv = AES_set_decrypt_key(userkey, len(userkey) * 8, key)
|
||||||
|
if rv < 0:
|
||||||
|
raise ADEPTError('Failed to initialize AES key')
|
||||||
|
|
||||||
|
def decrypt(self, data):
|
||||||
|
out = create_string_buffer(len(data))
|
||||||
|
iv = ("\x00" * self._blocksize)
|
||||||
|
rv = AES_cbc_encrypt(data, out, len(data), self._key, iv, 0)
|
||||||
|
if rv == 0:
|
||||||
|
raise ADEPTError('AES decryption failed')
|
||||||
|
return out.raw
|
||||||
|
|
||||||
|
return (AES, RSA)
|
||||||
|
|
||||||
|
def _load_crypto_pycrypto():
|
||||||
|
from Crypto.Cipher import AES as _AES
|
||||||
|
from Crypto.PublicKey import RSA as _RSA
|
||||||
|
|
||||||
|
# ASN.1 parsing code from tlslite
|
||||||
|
class ASN1Error(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
class ASN1Parser(object):
|
||||||
|
class Parser(object):
|
||||||
|
def __init__(self, bytes):
|
||||||
|
self.bytes = bytes
|
||||||
|
self.index = 0
|
||||||
|
|
||||||
|
def get(self, length):
|
||||||
|
if self.index + length > len(self.bytes):
|
||||||
|
raise ASN1Error("Error decoding ASN.1")
|
||||||
|
x = 0
|
||||||
|
for count in range(length):
|
||||||
|
x <<= 8
|
||||||
|
x |= self.bytes[self.index]
|
||||||
|
self.index += 1
|
||||||
|
return x
|
||||||
|
|
||||||
|
def getFixBytes(self, lengthBytes):
|
||||||
|
bytes = self.bytes[self.index : self.index+lengthBytes]
|
||||||
|
self.index += lengthBytes
|
||||||
|
return bytes
|
||||||
|
|
||||||
|
def getVarBytes(self, lengthLength):
|
||||||
|
lengthBytes = self.get(lengthLength)
|
||||||
|
return self.getFixBytes(lengthBytes)
|
||||||
|
|
||||||
|
def getFixList(self, length, lengthList):
|
||||||
|
l = [0] * lengthList
|
||||||
|
for x in range(lengthList):
|
||||||
|
l[x] = self.get(length)
|
||||||
|
return l
|
||||||
|
|
||||||
|
def getVarList(self, length, lengthLength):
|
||||||
|
lengthList = self.get(lengthLength)
|
||||||
|
if lengthList % length != 0:
|
||||||
|
raise ASN1Error("Error decoding ASN.1")
|
||||||
|
lengthList = int(lengthList/length)
|
||||||
|
l = [0] * lengthList
|
||||||
|
for x in range(lengthList):
|
||||||
|
l[x] = self.get(length)
|
||||||
|
return l
|
||||||
|
|
||||||
|
def startLengthCheck(self, lengthLength):
|
||||||
|
self.lengthCheck = self.get(lengthLength)
|
||||||
|
self.indexCheck = self.index
|
||||||
|
|
||||||
|
def setLengthCheck(self, length):
|
||||||
|
self.lengthCheck = length
|
||||||
|
self.indexCheck = self.index
|
||||||
|
|
||||||
|
def stopLengthCheck(self):
|
||||||
|
if (self.index - self.indexCheck) != self.lengthCheck:
|
||||||
|
raise ASN1Error("Error decoding ASN.1")
|
||||||
|
|
||||||
|
def atLengthCheck(self):
|
||||||
|
if (self.index - self.indexCheck) < self.lengthCheck:
|
||||||
|
return False
|
||||||
|
elif (self.index - self.indexCheck) == self.lengthCheck:
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
raise ASN1Error("Error decoding ASN.1")
|
||||||
|
|
||||||
|
def __init__(self, bytes):
|
||||||
|
p = self.Parser(bytes)
|
||||||
|
p.get(1)
|
||||||
|
self.length = self._getASN1Length(p)
|
||||||
|
self.value = p.getFixBytes(self.length)
|
||||||
|
|
||||||
|
def getChild(self, which):
|
||||||
|
p = self.Parser(self.value)
|
||||||
|
for x in range(which+1):
|
||||||
|
markIndex = p.index
|
||||||
|
p.get(1)
|
||||||
|
length = self._getASN1Length(p)
|
||||||
|
p.getFixBytes(length)
|
||||||
|
return ASN1Parser(p.bytes[markIndex:p.index])
|
||||||
|
|
||||||
|
def _getASN1Length(self, p):
|
||||||
|
firstLength = p.get(1)
|
||||||
|
if firstLength<=127:
|
||||||
|
return firstLength
|
||||||
|
else:
|
||||||
|
lengthLength = firstLength & 0x7F
|
||||||
|
return p.get(lengthLength)
|
||||||
|
|
||||||
|
class AES(object):
|
||||||
|
def __init__(self, key):
|
||||||
|
self._aes = _AES.new(key, _AES.MODE_CBC)
|
||||||
|
|
||||||
|
def decrypt(self, data):
|
||||||
|
return self._aes.decrypt(data)
|
||||||
|
|
||||||
|
class RSA(object):
|
||||||
|
def __init__(self, der):
|
||||||
|
key = ASN1Parser([ord(x) for x in der])
|
||||||
|
key = [key.getChild(x).value for x in xrange(1, 4)]
|
||||||
|
key = [self.bytesToNumber(v) for v in key]
|
||||||
|
self._rsa = _RSA.construct(key)
|
||||||
|
|
||||||
|
def bytesToNumber(self, bytes):
|
||||||
|
total = 0L
|
||||||
|
for byte in bytes:
|
||||||
|
total = (total << 8) + byte
|
||||||
|
return total
|
||||||
|
|
||||||
|
def decrypt(self, data):
|
||||||
|
return self._rsa.decrypt(data)
|
||||||
|
|
||||||
|
return (AES, RSA)
|
||||||
|
|
||||||
|
def _load_crypto():
|
||||||
|
AES = RSA = None
|
||||||
|
cryptolist = (_load_crypto_libcrypto, _load_crypto_pycrypto)
|
||||||
|
if sys.platform.startswith('win'):
|
||||||
|
cryptolist = (_load_crypto_pycrypto, _load_crypto_libcrypto)
|
||||||
|
for loader in cryptolist:
|
||||||
|
try:
|
||||||
|
AES, RSA = loader()
|
||||||
|
break
|
||||||
|
except (ImportError, ADEPTError):
|
||||||
|
pass
|
||||||
|
return (AES, RSA)
|
||||||
|
AES, RSA = _load_crypto()
|
||||||
|
|
||||||
|
META_NAMES = ('mimetype', 'META-INF/rights.xml', 'META-INF/encryption.xml')
|
||||||
|
NSMAP = {'adept': 'http://ns.adobe.com/adept',
|
||||||
|
'enc': 'http://www.w3.org/2001/04/xmlenc#'}
|
||||||
|
|
||||||
|
class ZipInfo(zipfile.ZipInfo):
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
if 'compress_type' in kwargs:
|
||||||
|
compress_type = kwargs.pop('compress_type')
|
||||||
|
super(ZipInfo, self).__init__(*args, **kwargs)
|
||||||
|
self.compress_type = compress_type
|
||||||
|
|
||||||
|
class Decryptor(object):
|
||||||
|
def __init__(self, bookkey, encryption):
|
||||||
|
enc = lambda tag: '{%s}%s' % (NSMAP['enc'], tag)
|
||||||
|
self._aes = AES(bookkey)
|
||||||
|
encryption = etree.fromstring(encryption)
|
||||||
|
self._encrypted = encrypted = set()
|
||||||
|
expr = './%s/%s/%s' % (enc('EncryptedData'), enc('CipherData'),
|
||||||
|
enc('CipherReference'))
|
||||||
|
for elem in encryption.findall(expr):
|
||||||
|
path = elem.get('URI', None)
|
||||||
|
if path is not None:
|
||||||
|
path = path.encode('utf-8')
|
||||||
|
encrypted.add(path)
|
||||||
|
|
||||||
|
def decompress(self, bytes):
|
||||||
|
dc = zlib.decompressobj(-15)
|
||||||
|
bytes = dc.decompress(bytes)
|
||||||
|
ex = dc.decompress('Z') + dc.flush()
|
||||||
|
if ex:
|
||||||
|
bytes = bytes + ex
|
||||||
|
return bytes
|
||||||
|
|
||||||
|
def decrypt(self, path, data):
|
||||||
|
if path in self._encrypted:
|
||||||
|
data = self._aes.decrypt(data)[16:]
|
||||||
|
data = data[:-ord(data[-1])]
|
||||||
|
data = self.decompress(data)
|
||||||
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
class DecryptionDialog(Tkinter.Frame):
|
||||||
|
def __init__(self, root):
|
||||||
|
Tkinter.Frame.__init__(self, root, border=5)
|
||||||
|
self.status = Tkinter.Label(self, text='Select files for decryption')
|
||||||
|
self.status.pack(fill=Tkconstants.X, expand=1)
|
||||||
|
body = Tkinter.Frame(self)
|
||||||
|
body.pack(fill=Tkconstants.X, expand=1)
|
||||||
|
sticky = Tkconstants.E + Tkconstants.W
|
||||||
|
body.grid_columnconfigure(1, weight=2)
|
||||||
|
Tkinter.Label(body, text='Key file').grid(row=0)
|
||||||
|
self.keypath = Tkinter.Entry(body, width=30)
|
||||||
|
self.keypath.grid(row=0, column=1, sticky=sticky)
|
||||||
|
if os.path.exists('adeptkey.der'):
|
||||||
|
self.keypath.insert(0, 'adeptkey.der')
|
||||||
|
button = Tkinter.Button(body, text="...", command=self.get_keypath)
|
||||||
|
button.grid(row=0, column=2)
|
||||||
|
Tkinter.Label(body, text='Input file').grid(row=1)
|
||||||
|
self.inpath = Tkinter.Entry(body, width=30)
|
||||||
|
self.inpath.grid(row=1, column=1, sticky=sticky)
|
||||||
|
button = Tkinter.Button(body, text="...", command=self.get_inpath)
|
||||||
|
button.grid(row=1, column=2)
|
||||||
|
Tkinter.Label(body, text='Output file').grid(row=2)
|
||||||
|
self.outpath = Tkinter.Entry(body, width=30)
|
||||||
|
self.outpath.grid(row=2, column=1, sticky=sticky)
|
||||||
|
button = Tkinter.Button(body, text="...", command=self.get_outpath)
|
||||||
|
button.grid(row=2, column=2)
|
||||||
|
buttons = Tkinter.Frame(self)
|
||||||
|
buttons.pack()
|
||||||
|
botton = Tkinter.Button(
|
||||||
|
buttons, text="Decrypt", width=10, command=self.decrypt)
|
||||||
|
botton.pack(side=Tkconstants.LEFT)
|
||||||
|
Tkinter.Frame(buttons, width=10).pack(side=Tkconstants.LEFT)
|
||||||
|
button = Tkinter.Button(
|
||||||
|
buttons, text="Quit", width=10, command=self.quit)
|
||||||
|
button.pack(side=Tkconstants.RIGHT)
|
||||||
|
|
||||||
|
def get_keypath(self):
|
||||||
|
keypath = tkFileDialog.askopenfilename(
|
||||||
|
parent=None, title='Select ADEPT key file',
|
||||||
|
defaultextension='.der', filetypes=[('DER-encoded files', '.der'),
|
||||||
|
('All Files', '.*')])
|
||||||
|
if keypath:
|
||||||
|
keypath = os.path.normpath(keypath)
|
||||||
|
self.keypath.delete(0, Tkconstants.END)
|
||||||
|
self.keypath.insert(0, keypath)
|
||||||
|
return
|
||||||
|
|
||||||
|
def get_inpath(self):
|
||||||
|
inpath = tkFileDialog.askopenfilename(
|
||||||
|
parent=None, title='Select ADEPT-encrypted EPUB file to decrypt',
|
||||||
|
defaultextension='.epub', filetypes=[('EPUB files', '.epub'),
|
||||||
|
('All files', '.*')])
|
||||||
|
if inpath:
|
||||||
|
inpath = os.path.normpath(inpath)
|
||||||
|
self.inpath.delete(0, Tkconstants.END)
|
||||||
|
self.inpath.insert(0, inpath)
|
||||||
|
return
|
||||||
|
|
||||||
|
def get_outpath(self):
|
||||||
|
outpath = tkFileDialog.asksaveasfilename(
|
||||||
|
parent=None, title='Select unencrypted EPUB file to produce',
|
||||||
|
defaultextension='.epub', filetypes=[('EPUB files', '.epub'),
|
||||||
|
('All files', '.*')])
|
||||||
|
if outpath:
|
||||||
|
outpath = os.path.normpath(outpath)
|
||||||
|
self.outpath.delete(0, Tkconstants.END)
|
||||||
|
self.outpath.insert(0, outpath)
|
||||||
|
return
|
||||||
|
|
||||||
|
def decrypt(self):
|
||||||
|
keypath = self.keypath.get()
|
||||||
|
inpath = self.inpath.get()
|
||||||
|
outpath = self.outpath.get()
|
||||||
|
if not keypath or not os.path.exists(keypath):
|
||||||
|
self.status['text'] = 'Specified key file does not exist'
|
||||||
|
return
|
||||||
|
if not inpath or not os.path.exists(inpath):
|
||||||
|
self.status['text'] = 'Specified input file does not exist'
|
||||||
|
return
|
||||||
|
if not outpath:
|
||||||
|
self.status['text'] = 'Output file not specified'
|
||||||
|
return
|
||||||
|
if inpath == outpath:
|
||||||
|
self.status['text'] = 'Must have different input and output files'
|
||||||
|
return
|
||||||
|
argv = [sys.argv[0], keypath, inpath, outpath]
|
||||||
|
self.status['text'] = 'Decrypting...'
|
||||||
|
try:
|
||||||
|
cli_main(argv)
|
||||||
|
except Exception, e:
|
||||||
|
self.status['text'] = 'Error: ' + str(e)
|
||||||
|
return
|
||||||
|
self.status['text'] = 'File successfully decrypted'
|
||||||
|
|
||||||
|
|
||||||
|
def decryptBook(keypath, inpath, outpath):
|
||||||
|
with open(keypath, 'rb') as f:
|
||||||
|
keyder = f.read()
|
||||||
|
rsa = RSA(keyder)
|
||||||
|
with closing(ZipFile(open(inpath, 'rb'))) as inf:
|
||||||
|
namelist = set(inf.namelist())
|
||||||
|
if 'META-INF/rights.xml' not in namelist or \
|
||||||
|
'META-INF/encryption.xml' not in namelist:
|
||||||
|
raise ADEPTError('%s: not an ADEPT EPUB' % (inpath,))
|
||||||
|
for name in META_NAMES:
|
||||||
|
namelist.remove(name)
|
||||||
|
rights = etree.fromstring(inf.read('META-INF/rights.xml'))
|
||||||
|
adept = lambda tag: '{%s}%s' % (NSMAP['adept'], tag)
|
||||||
|
expr = './/%s' % (adept('encryptedKey'),)
|
||||||
|
bookkey = ''.join(rights.findtext(expr))
|
||||||
|
bookkey = rsa.decrypt(bookkey.decode('base64'))
|
||||||
|
# Padded as per RSAES-PKCS1-v1_5
|
||||||
|
if bookkey[-17] != '\x00':
|
||||||
|
raise ADEPTError('problem decrypting session key')
|
||||||
|
encryption = inf.read('META-INF/encryption.xml')
|
||||||
|
decryptor = Decryptor(bookkey[-16:], encryption)
|
||||||
|
kwds = dict(compression=ZIP_DEFLATED, allowZip64=False)
|
||||||
|
with closing(ZipFile(open(outpath, 'wb'), 'w', **kwds)) as outf:
|
||||||
|
zi = ZipInfo('mimetype', compress_type=ZIP_STORED)
|
||||||
|
outf.writestr(zi, inf.read('mimetype'))
|
||||||
|
for path in namelist:
|
||||||
|
data = inf.read(path)
|
||||||
|
outf.writestr(path, decryptor.decrypt(path, data))
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
def cli_main(argv=sys.argv):
|
||||||
|
progname = os.path.basename(argv[0])
|
||||||
|
if AES is None:
|
||||||
|
print "%s: This script requires OpenSSL or PyCrypto, which must be" \
|
||||||
|
" installed separately. Read the top-of-script comment for" \
|
||||||
|
" details." % (progname,)
|
||||||
|
return 1
|
||||||
|
if len(argv) != 4:
|
||||||
|
print "usage: %s KEYFILE INBOOK OUTBOOK" % (progname,)
|
||||||
|
return 1
|
||||||
|
keypath, inpath, outpath = argv[1:]
|
||||||
|
return decryptBook(keypath, inpath, outpath)
|
||||||
|
|
||||||
|
|
||||||
|
def gui_main():
|
||||||
|
root = Tkinter.Tk()
|
||||||
|
if AES is None:
|
||||||
|
root.withdraw()
|
||||||
|
tkMessageBox.showerror(
|
||||||
|
"INEPT EPUB Decrypter",
|
||||||
|
"This script requires OpenSSL or PyCrypto, which must be"
|
||||||
|
" installed separately. Read the top-of-script comment for"
|
||||||
|
" details.")
|
||||||
|
return 1
|
||||||
|
root.title('INEPT EPUB Decrypter')
|
||||||
|
root.resizable(True, False)
|
||||||
|
root.minsize(300, 0)
|
||||||
|
DecryptionDialog(root).pack(fill=Tkconstants.X, expand=1)
|
||||||
|
root.mainloop()
|
||||||
|
return 0
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
if len(sys.argv) > 1:
|
||||||
|
sys.exit(cli_main())
|
||||||
|
sys.exit(gui_main())
|
||||||
467
DeDRM_Windows_Application/DeDRM_WinApp/DeDRM_lib/lib/ineptkey.py
Normal file
467
DeDRM_Windows_Application/DeDRM_WinApp/DeDRM_lib/lib/ineptkey.py
Normal file
@@ -0,0 +1,467 @@
|
|||||||
|
#! /usr/bin/python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
from __future__ import with_statement
|
||||||
|
|
||||||
|
# ineptkey.pyw, version 5.4
|
||||||
|
# Copyright © 2009-2010 i♥cabbages
|
||||||
|
|
||||||
|
# Released under the terms of the GNU General Public Licence, version 3 or
|
||||||
|
# later. <http://www.gnu.org/licenses/>
|
||||||
|
|
||||||
|
# Windows users: Before running this program, you must first install Python 2.6
|
||||||
|
# from <http://www.python.org/download/> and PyCrypto from
|
||||||
|
# <http://www.voidspace.org.uk/python/modules.shtml#pycrypto> (make certain
|
||||||
|
# to install the version for Python 2.6). Then save this script file as
|
||||||
|
# ineptkey.pyw and double-click on it to run it. It will create a file named
|
||||||
|
# adeptkey.der in the same directory. This is your ADEPT user key.
|
||||||
|
#
|
||||||
|
# Mac OS X users: Save this script file as ineptkey.pyw. You can run this
|
||||||
|
# program from the command line (pythonw ineptkey.pyw) or by double-clicking
|
||||||
|
# it when it has been associated with PythonLauncher. It will create a file
|
||||||
|
# named adeptkey.der in the same directory. This is your ADEPT user key.
|
||||||
|
|
||||||
|
# Revision history:
|
||||||
|
# 1 - Initial release, for Adobe Digital Editions 1.7
|
||||||
|
# 2 - Better algorithm for finding pLK; improved error handling
|
||||||
|
# 3 - Rename to INEPT
|
||||||
|
# 4 - Series of changes by joblack (and others?) --
|
||||||
|
# 4.1 - quick beta fix for ADE 1.7.2 (anon)
|
||||||
|
# 4.2 - added old 1.7.1 processing
|
||||||
|
# 4.3 - better key search
|
||||||
|
# 4.4 - Make it working on 64-bit Python
|
||||||
|
# 5 - Clean up and improve 4.x changes;
|
||||||
|
# Clean up and merge OS X support by unknown
|
||||||
|
# 5.1 - add support for using OpenSSL on Windows in place of PyCrypto
|
||||||
|
# 5.2 - added support for output of key to a particular file
|
||||||
|
# 5.3 - On Windows try PyCrypto first, OpenSSL next
|
||||||
|
# 5.4 - Modify interface to allow use of import
|
||||||
|
|
||||||
|
"""
|
||||||
|
Retrieve Adobe ADEPT user key.
|
||||||
|
"""
|
||||||
|
|
||||||
|
__license__ = 'GPL v3'
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
import struct
|
||||||
|
import Tkinter
|
||||||
|
import Tkconstants
|
||||||
|
import tkMessageBox
|
||||||
|
import traceback
|
||||||
|
|
||||||
|
class ADEPTError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
if sys.platform.startswith('win'):
|
||||||
|
from ctypes import windll, c_char_p, c_wchar_p, c_uint, POINTER, byref, \
|
||||||
|
create_unicode_buffer, create_string_buffer, CFUNCTYPE, addressof, \
|
||||||
|
string_at, Structure, c_void_p, cast, c_size_t, memmove, CDLL, c_int, \
|
||||||
|
c_long, c_ulong
|
||||||
|
|
||||||
|
from ctypes.wintypes import LPVOID, DWORD, BOOL
|
||||||
|
import _winreg as winreg
|
||||||
|
|
||||||
|
def _load_crypto_libcrypto():
|
||||||
|
from ctypes.util import find_library
|
||||||
|
libcrypto = find_library('libeay32')
|
||||||
|
if libcrypto is None:
|
||||||
|
raise ADEPTError('libcrypto not found')
|
||||||
|
libcrypto = CDLL(libcrypto)
|
||||||
|
AES_MAXNR = 14
|
||||||
|
c_char_pp = POINTER(c_char_p)
|
||||||
|
c_int_p = POINTER(c_int)
|
||||||
|
class AES_KEY(Structure):
|
||||||
|
_fields_ = [('rd_key', c_long * (4 * (AES_MAXNR + 1))),
|
||||||
|
('rounds', c_int)]
|
||||||
|
AES_KEY_p = POINTER(AES_KEY)
|
||||||
|
|
||||||
|
def F(restype, name, argtypes):
|
||||||
|
func = getattr(libcrypto, name)
|
||||||
|
func.restype = restype
|
||||||
|
func.argtypes = argtypes
|
||||||
|
return func
|
||||||
|
|
||||||
|
AES_set_decrypt_key = F(c_int, 'AES_set_decrypt_key',
|
||||||
|
[c_char_p, c_int, AES_KEY_p])
|
||||||
|
AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',
|
||||||
|
[c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p,
|
||||||
|
c_int])
|
||||||
|
class AES(object):
|
||||||
|
def __init__(self, userkey):
|
||||||
|
self._blocksize = len(userkey)
|
||||||
|
if (self._blocksize != 16) and (self._blocksize != 24) and (self._blocksize != 32) :
|
||||||
|
raise ADEPTError('AES improper key used')
|
||||||
|
key = self._key = AES_KEY()
|
||||||
|
rv = AES_set_decrypt_key(userkey, len(userkey) * 8, key)
|
||||||
|
if rv < 0:
|
||||||
|
raise ADEPTError('Failed to initialize AES key')
|
||||||
|
def decrypt(self, data):
|
||||||
|
out = create_string_buffer(len(data))
|
||||||
|
iv = ("\x00" * self._blocksize)
|
||||||
|
rv = AES_cbc_encrypt(data, out, len(data), self._key, iv, 0)
|
||||||
|
if rv == 0:
|
||||||
|
raise ADEPTError('AES decryption failed')
|
||||||
|
return out.raw
|
||||||
|
return AES
|
||||||
|
|
||||||
|
def _load_crypto_pycrypto():
|
||||||
|
from Crypto.Cipher import AES as _AES
|
||||||
|
class AES(object):
|
||||||
|
def __init__(self, key):
|
||||||
|
self._aes = _AES.new(key, _AES.MODE_CBC)
|
||||||
|
def decrypt(self, data):
|
||||||
|
return self._aes.decrypt(data)
|
||||||
|
return AES
|
||||||
|
|
||||||
|
def _load_crypto():
|
||||||
|
AES = None
|
||||||
|
for loader in (_load_crypto_pycrypto, _load_crypto_libcrypto):
|
||||||
|
try:
|
||||||
|
AES = loader()
|
||||||
|
break
|
||||||
|
except (ImportError, ADEPTError):
|
||||||
|
pass
|
||||||
|
return AES
|
||||||
|
|
||||||
|
AES = _load_crypto()
|
||||||
|
|
||||||
|
|
||||||
|
DEVICE_KEY_PATH = r'Software\Adobe\Adept\Device'
|
||||||
|
PRIVATE_LICENCE_KEY_PATH = r'Software\Adobe\Adept\Activation'
|
||||||
|
|
||||||
|
MAX_PATH = 255
|
||||||
|
|
||||||
|
kernel32 = windll.kernel32
|
||||||
|
advapi32 = windll.advapi32
|
||||||
|
crypt32 = windll.crypt32
|
||||||
|
|
||||||
|
def GetSystemDirectory():
|
||||||
|
GetSystemDirectoryW = kernel32.GetSystemDirectoryW
|
||||||
|
GetSystemDirectoryW.argtypes = [c_wchar_p, c_uint]
|
||||||
|
GetSystemDirectoryW.restype = c_uint
|
||||||
|
def GetSystemDirectory():
|
||||||
|
buffer = create_unicode_buffer(MAX_PATH + 1)
|
||||||
|
GetSystemDirectoryW(buffer, len(buffer))
|
||||||
|
return buffer.value
|
||||||
|
return GetSystemDirectory
|
||||||
|
GetSystemDirectory = GetSystemDirectory()
|
||||||
|
|
||||||
|
def GetVolumeSerialNumber():
|
||||||
|
GetVolumeInformationW = kernel32.GetVolumeInformationW
|
||||||
|
GetVolumeInformationW.argtypes = [c_wchar_p, c_wchar_p, c_uint,
|
||||||
|
POINTER(c_uint), POINTER(c_uint),
|
||||||
|
POINTER(c_uint), c_wchar_p, c_uint]
|
||||||
|
GetVolumeInformationW.restype = c_uint
|
||||||
|
def GetVolumeSerialNumber(path):
|
||||||
|
vsn = c_uint(0)
|
||||||
|
GetVolumeInformationW(
|
||||||
|
path, None, 0, byref(vsn), None, None, None, 0)
|
||||||
|
return vsn.value
|
||||||
|
return GetVolumeSerialNumber
|
||||||
|
GetVolumeSerialNumber = GetVolumeSerialNumber()
|
||||||
|
|
||||||
|
def GetUserName():
|
||||||
|
GetUserNameW = advapi32.GetUserNameW
|
||||||
|
GetUserNameW.argtypes = [c_wchar_p, POINTER(c_uint)]
|
||||||
|
GetUserNameW.restype = c_uint
|
||||||
|
def GetUserName():
|
||||||
|
buffer = create_unicode_buffer(32)
|
||||||
|
size = c_uint(len(buffer))
|
||||||
|
while not GetUserNameW(buffer, byref(size)):
|
||||||
|
buffer = create_unicode_buffer(len(buffer) * 2)
|
||||||
|
size.value = len(buffer)
|
||||||
|
return buffer.value.encode('utf-16-le')[::2]
|
||||||
|
return GetUserName
|
||||||
|
GetUserName = GetUserName()
|
||||||
|
|
||||||
|
PAGE_EXECUTE_READWRITE = 0x40
|
||||||
|
MEM_COMMIT = 0x1000
|
||||||
|
MEM_RESERVE = 0x2000
|
||||||
|
|
||||||
|
def VirtualAlloc():
|
||||||
|
_VirtualAlloc = kernel32.VirtualAlloc
|
||||||
|
_VirtualAlloc.argtypes = [LPVOID, c_size_t, DWORD, DWORD]
|
||||||
|
_VirtualAlloc.restype = LPVOID
|
||||||
|
def VirtualAlloc(addr, size, alloctype=(MEM_COMMIT | MEM_RESERVE),
|
||||||
|
protect=PAGE_EXECUTE_READWRITE):
|
||||||
|
return _VirtualAlloc(addr, size, alloctype, protect)
|
||||||
|
return VirtualAlloc
|
||||||
|
VirtualAlloc = VirtualAlloc()
|
||||||
|
|
||||||
|
MEM_RELEASE = 0x8000
|
||||||
|
|
||||||
|
def VirtualFree():
|
||||||
|
_VirtualFree = kernel32.VirtualFree
|
||||||
|
_VirtualFree.argtypes = [LPVOID, c_size_t, DWORD]
|
||||||
|
_VirtualFree.restype = BOOL
|
||||||
|
def VirtualFree(addr, size=0, freetype=MEM_RELEASE):
|
||||||
|
return _VirtualFree(addr, size, freetype)
|
||||||
|
return VirtualFree
|
||||||
|
VirtualFree = VirtualFree()
|
||||||
|
|
||||||
|
class NativeFunction(object):
|
||||||
|
def __init__(self, restype, argtypes, insns):
|
||||||
|
self._buf = buf = VirtualAlloc(None, len(insns))
|
||||||
|
memmove(buf, insns, len(insns))
|
||||||
|
ftype = CFUNCTYPE(restype, *argtypes)
|
||||||
|
self._native = ftype(buf)
|
||||||
|
|
||||||
|
def __call__(self, *args):
|
||||||
|
return self._native(*args)
|
||||||
|
|
||||||
|
def __del__(self):
|
||||||
|
if self._buf is not None:
|
||||||
|
VirtualFree(self._buf)
|
||||||
|
self._buf = None
|
||||||
|
|
||||||
|
if struct.calcsize("P") == 4:
|
||||||
|
CPUID0_INSNS = (
|
||||||
|
"\x53" # push %ebx
|
||||||
|
"\x31\xc0" # xor %eax,%eax
|
||||||
|
"\x0f\xa2" # cpuid
|
||||||
|
"\x8b\x44\x24\x08" # mov 0x8(%esp),%eax
|
||||||
|
"\x89\x18" # mov %ebx,0x0(%eax)
|
||||||
|
"\x89\x50\x04" # mov %edx,0x4(%eax)
|
||||||
|
"\x89\x48\x08" # mov %ecx,0x8(%eax)
|
||||||
|
"\x5b" # pop %ebx
|
||||||
|
"\xc3" # ret
|
||||||
|
)
|
||||||
|
CPUID1_INSNS = (
|
||||||
|
"\x53" # push %ebx
|
||||||
|
"\x31\xc0" # xor %eax,%eax
|
||||||
|
"\x40" # inc %eax
|
||||||
|
"\x0f\xa2" # cpuid
|
||||||
|
"\x5b" # pop %ebx
|
||||||
|
"\xc3" # ret
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
CPUID0_INSNS = (
|
||||||
|
"\x49\x89\xd8" # mov %rbx,%r8
|
||||||
|
"\x49\x89\xc9" # mov %rcx,%r9
|
||||||
|
"\x48\x31\xc0" # xor %rax,%rax
|
||||||
|
"\x0f\xa2" # cpuid
|
||||||
|
"\x4c\x89\xc8" # mov %r9,%rax
|
||||||
|
"\x89\x18" # mov %ebx,0x0(%rax)
|
||||||
|
"\x89\x50\x04" # mov %edx,0x4(%rax)
|
||||||
|
"\x89\x48\x08" # mov %ecx,0x8(%rax)
|
||||||
|
"\x4c\x89\xc3" # mov %r8,%rbx
|
||||||
|
"\xc3" # retq
|
||||||
|
)
|
||||||
|
CPUID1_INSNS = (
|
||||||
|
"\x53" # push %rbx
|
||||||
|
"\x48\x31\xc0" # xor %rax,%rax
|
||||||
|
"\x48\xff\xc0" # inc %rax
|
||||||
|
"\x0f\xa2" # cpuid
|
||||||
|
"\x5b" # pop %rbx
|
||||||
|
"\xc3" # retq
|
||||||
|
)
|
||||||
|
|
||||||
|
def cpuid0():
|
||||||
|
_cpuid0 = NativeFunction(None, [c_char_p], CPUID0_INSNS)
|
||||||
|
buf = create_string_buffer(12)
|
||||||
|
def cpuid0():
|
||||||
|
_cpuid0(buf)
|
||||||
|
return buf.raw
|
||||||
|
return cpuid0
|
||||||
|
cpuid0 = cpuid0()
|
||||||
|
|
||||||
|
cpuid1 = NativeFunction(c_uint, [], CPUID1_INSNS)
|
||||||
|
|
||||||
|
class DataBlob(Structure):
|
||||||
|
_fields_ = [('cbData', c_uint),
|
||||||
|
('pbData', c_void_p)]
|
||||||
|
DataBlob_p = POINTER(DataBlob)
|
||||||
|
|
||||||
|
def CryptUnprotectData():
|
||||||
|
_CryptUnprotectData = crypt32.CryptUnprotectData
|
||||||
|
_CryptUnprotectData.argtypes = [DataBlob_p, c_wchar_p, DataBlob_p,
|
||||||
|
c_void_p, c_void_p, c_uint, DataBlob_p]
|
||||||
|
_CryptUnprotectData.restype = c_uint
|
||||||
|
def CryptUnprotectData(indata, entropy):
|
||||||
|
indatab = create_string_buffer(indata)
|
||||||
|
indata = DataBlob(len(indata), cast(indatab, c_void_p))
|
||||||
|
entropyb = create_string_buffer(entropy)
|
||||||
|
entropy = DataBlob(len(entropy), cast(entropyb, c_void_p))
|
||||||
|
outdata = DataBlob()
|
||||||
|
if not _CryptUnprotectData(byref(indata), None, byref(entropy),
|
||||||
|
None, None, 0, byref(outdata)):
|
||||||
|
raise ADEPTError("Failed to decrypt user key key (sic)")
|
||||||
|
return string_at(outdata.pbData, outdata.cbData)
|
||||||
|
return CryptUnprotectData
|
||||||
|
CryptUnprotectData = CryptUnprotectData()
|
||||||
|
|
||||||
|
def retrieve_key(keypath):
|
||||||
|
if AES is None:
|
||||||
|
tkMessageBox.showerror(
|
||||||
|
"ADEPT Key",
|
||||||
|
"This script requires PyCrypto or OpenSSL which must be installed "
|
||||||
|
"separately. Read the top-of-script comment for details.")
|
||||||
|
return False
|
||||||
|
root = GetSystemDirectory().split('\\')[0] + '\\'
|
||||||
|
serial = GetVolumeSerialNumber(root)
|
||||||
|
vendor = cpuid0()
|
||||||
|
signature = struct.pack('>I', cpuid1())[1:]
|
||||||
|
user = GetUserName()
|
||||||
|
entropy = struct.pack('>I12s3s13s', serial, vendor, signature, user)
|
||||||
|
cuser = winreg.HKEY_CURRENT_USER
|
||||||
|
try:
|
||||||
|
regkey = winreg.OpenKey(cuser, DEVICE_KEY_PATH)
|
||||||
|
except WindowsError:
|
||||||
|
raise ADEPTError("Adobe Digital Editions not activated")
|
||||||
|
device = winreg.QueryValueEx(regkey, 'key')[0]
|
||||||
|
keykey = CryptUnprotectData(device, entropy)
|
||||||
|
userkey = None
|
||||||
|
try:
|
||||||
|
plkroot = winreg.OpenKey(cuser, PRIVATE_LICENCE_KEY_PATH)
|
||||||
|
except WindowsError:
|
||||||
|
raise ADEPTError("Could not locate ADE activation")
|
||||||
|
for i in xrange(0, 16):
|
||||||
|
try:
|
||||||
|
plkparent = winreg.OpenKey(plkroot, "%04d" % (i,))
|
||||||
|
except WindowsError:
|
||||||
|
break
|
||||||
|
ktype = winreg.QueryValueEx(plkparent, None)[0]
|
||||||
|
if ktype != 'credentials':
|
||||||
|
continue
|
||||||
|
for j in xrange(0, 16):
|
||||||
|
try:
|
||||||
|
plkkey = winreg.OpenKey(plkparent, "%04d" % (j,))
|
||||||
|
except WindowsError:
|
||||||
|
break
|
||||||
|
ktype = winreg.QueryValueEx(plkkey, None)[0]
|
||||||
|
if ktype != 'privateLicenseKey':
|
||||||
|
continue
|
||||||
|
userkey = winreg.QueryValueEx(plkkey, 'value')[0]
|
||||||
|
break
|
||||||
|
if userkey is not None:
|
||||||
|
break
|
||||||
|
if userkey is None:
|
||||||
|
raise ADEPTError('Could not locate privateLicenseKey')
|
||||||
|
userkey = userkey.decode('base64')
|
||||||
|
aes = AES(keykey)
|
||||||
|
userkey = aes.decrypt(userkey)
|
||||||
|
userkey = userkey[26:-ord(userkey[-1])]
|
||||||
|
with open(keypath, 'wb') as f:
|
||||||
|
f.write(userkey)
|
||||||
|
return True
|
||||||
|
|
||||||
|
elif sys.platform.startswith('darwin'):
|
||||||
|
import xml.etree.ElementTree as etree
|
||||||
|
import Carbon.File
|
||||||
|
import Carbon.Folder
|
||||||
|
import Carbon.Folders
|
||||||
|
import MacOS
|
||||||
|
|
||||||
|
ACTIVATION_PATH = 'Adobe/Digital Editions/activation.dat'
|
||||||
|
NSMAP = {'adept': 'http://ns.adobe.com/adept',
|
||||||
|
'enc': 'http://www.w3.org/2001/04/xmlenc#'}
|
||||||
|
|
||||||
|
def find_folder(domain, dtype):
|
||||||
|
try:
|
||||||
|
fsref = Carbon.Folder.FSFindFolder(domain, dtype, False)
|
||||||
|
return Carbon.File.pathname(fsref)
|
||||||
|
except MacOS.Error:
|
||||||
|
return None
|
||||||
|
|
||||||
|
def find_app_support_file(subpath):
|
||||||
|
dtype = Carbon.Folders.kApplicationSupportFolderType
|
||||||
|
for domain in Carbon.Folders.kUserDomain, Carbon.Folders.kLocalDomain:
|
||||||
|
path = find_folder(domain, dtype)
|
||||||
|
if path is None:
|
||||||
|
continue
|
||||||
|
path = os.path.join(path, subpath)
|
||||||
|
if os.path.isfile(path):
|
||||||
|
return path
|
||||||
|
return None
|
||||||
|
|
||||||
|
def retrieve_key(keypath):
|
||||||
|
actpath = find_app_support_file(ACTIVATION_PATH)
|
||||||
|
if actpath is None:
|
||||||
|
raise ADEPTError("Could not locate ADE activation")
|
||||||
|
tree = etree.parse(actpath)
|
||||||
|
adept = lambda tag: '{%s}%s' % (NSMAP['adept'], tag)
|
||||||
|
expr = '//%s/%s' % (adept('credentials'), adept('privateLicenseKey'))
|
||||||
|
userkey = tree.findtext(expr)
|
||||||
|
userkey = userkey.decode('base64')
|
||||||
|
userkey = userkey[26:]
|
||||||
|
with open(keypath, 'wb') as f:
|
||||||
|
f.write(userkey)
|
||||||
|
return True
|
||||||
|
|
||||||
|
elif sys.platform.startswith('cygwin'):
|
||||||
|
def retrieve_key(keypath):
|
||||||
|
tkMessageBox.showerror(
|
||||||
|
"ADEPT Key",
|
||||||
|
"This script requires a Windows-native Python, and cannot be run "
|
||||||
|
"under Cygwin. Please install a Windows-native Python and/or "
|
||||||
|
"check your file associations.")
|
||||||
|
return False
|
||||||
|
|
||||||
|
else:
|
||||||
|
def retrieve_key(keypath):
|
||||||
|
tkMessageBox.showerror(
|
||||||
|
"ADEPT Key",
|
||||||
|
"This script only supports Windows and Mac OS X. For Linux "
|
||||||
|
"you should be able to run ADE and this script under Wine (with "
|
||||||
|
"an appropriate version of Windows Python installed).")
|
||||||
|
return False
|
||||||
|
|
||||||
|
class ExceptionDialog(Tkinter.Frame):
|
||||||
|
def __init__(self, root, text):
|
||||||
|
Tkinter.Frame.__init__(self, root, border=5)
|
||||||
|
label = Tkinter.Label(self, text="Unexpected error:",
|
||||||
|
anchor=Tkconstants.W, justify=Tkconstants.LEFT)
|
||||||
|
label.pack(fill=Tkconstants.X, expand=0)
|
||||||
|
self.text = Tkinter.Text(self)
|
||||||
|
self.text.pack(fill=Tkconstants.BOTH, expand=1)
|
||||||
|
|
||||||
|
self.text.insert(Tkconstants.END, text)
|
||||||
|
|
||||||
|
|
||||||
|
def extractKeyfile(keypath):
|
||||||
|
try:
|
||||||
|
success = retrieve_key(keypath)
|
||||||
|
except ADEPTError, e:
|
||||||
|
print "Key generation Error: " + str(e)
|
||||||
|
return 1
|
||||||
|
except Exception, e:
|
||||||
|
print "General Error: " + str(e)
|
||||||
|
return 1
|
||||||
|
if not success:
|
||||||
|
return 1
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
def cli_main(argv=sys.argv):
|
||||||
|
keypath = argv[1]
|
||||||
|
return extractKeyfile(keypath)
|
||||||
|
|
||||||
|
|
||||||
|
def main(argv=sys.argv):
|
||||||
|
root = Tkinter.Tk()
|
||||||
|
root.withdraw()
|
||||||
|
progname = os.path.basename(argv[0])
|
||||||
|
keypath = 'adeptkey.der'
|
||||||
|
success = False
|
||||||
|
try:
|
||||||
|
success = retrieve_key(keypath)
|
||||||
|
except ADEPTError, e:
|
||||||
|
tkMessageBox.showerror("ADEPT Key", "Error: " + str(e))
|
||||||
|
except Exception:
|
||||||
|
root.wm_state('normal')
|
||||||
|
root.title('ADEPT Key')
|
||||||
|
text = traceback.format_exc()
|
||||||
|
ExceptionDialog(root, text).pack(fill=Tkconstants.BOTH, expand=1)
|
||||||
|
root.mainloop()
|
||||||
|
if not success:
|
||||||
|
return 1
|
||||||
|
tkMessageBox.showinfo(
|
||||||
|
"ADEPT Key", "Key successfully retrieved to %s" % (keypath))
|
||||||
|
return 0
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
if len(sys.argv) > 1:
|
||||||
|
sys.exit(cli_main())
|
||||||
|
sys.exit(main())
|
||||||
2245
DeDRM_Windows_Application/DeDRM_WinApp/DeDRM_lib/lib/ineptpdf.py
Normal file
2245
DeDRM_Windows_Application/DeDRM_WinApp/DeDRM_lib/lib/ineptpdf.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,212 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
from __future__ import with_statement
|
||||||
|
|
||||||
|
# engine to remove drm from Kindle for Mac and Kindle for PC books
|
||||||
|
# for personal use for archiving and converting your ebooks
|
||||||
|
|
||||||
|
# PLEASE DO NOT PIRATE EBOOKS!
|
||||||
|
|
||||||
|
# We want all authors and publishers, and eBook stores to live
|
||||||
|
# long and prosperous lives but at the same time we just want to
|
||||||
|
# be able to read OUR books on whatever device we want and to keep
|
||||||
|
# readable for a long, long time
|
||||||
|
|
||||||
|
# This borrows very heavily from works by CMBDTC, IHeartCabbages, skindle,
|
||||||
|
# unswindle, DarkReverser, ApprenticeAlf, DiapDealer, some_updates
|
||||||
|
# and many many others
|
||||||
|
|
||||||
|
|
||||||
|
__version__ = '4.0'
|
||||||
|
|
||||||
|
class Unbuffered:
|
||||||
|
def __init__(self, stream):
|
||||||
|
self.stream = stream
|
||||||
|
def write(self, data):
|
||||||
|
self.stream.write(data)
|
||||||
|
self.stream.flush()
|
||||||
|
def __getattr__(self, attr):
|
||||||
|
return getattr(self.stream, attr)
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import os, csv, getopt
|
||||||
|
import string
|
||||||
|
import re
|
||||||
|
import traceback
|
||||||
|
|
||||||
|
buildXML = False
|
||||||
|
|
||||||
|
class DrmException(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
if 'calibre' in sys.modules:
|
||||||
|
inCalibre = True
|
||||||
|
else:
|
||||||
|
inCalibre = False
|
||||||
|
|
||||||
|
if inCalibre:
|
||||||
|
from calibre_plugins.k4mobidedrm import mobidedrm
|
||||||
|
from calibre_plugins.k4mobidedrm import topazextract
|
||||||
|
from calibre_plugins.k4mobidedrm import kgenpids
|
||||||
|
else:
|
||||||
|
import mobidedrm
|
||||||
|
import topazextract
|
||||||
|
import kgenpids
|
||||||
|
|
||||||
|
|
||||||
|
# cleanup bytestring filenames
|
||||||
|
# borrowed from calibre from calibre/src/calibre/__init__.py
|
||||||
|
# added in removal of non-printing chars
|
||||||
|
# and removal of . at start
|
||||||
|
# convert spaces to underscores
|
||||||
|
def cleanup_name(name):
|
||||||
|
_filename_sanitize = re.compile(r'[\xae\0\\|\?\*<":>\+/]')
|
||||||
|
substitute='_'
|
||||||
|
one = ''.join(char for char in name if char in string.printable)
|
||||||
|
one = _filename_sanitize.sub(substitute, one)
|
||||||
|
one = re.sub(r'\s', ' ', one).strip()
|
||||||
|
one = re.sub(r'^\.+$', '_', one)
|
||||||
|
one = one.replace('..', substitute)
|
||||||
|
# Windows doesn't like path components that end with a period
|
||||||
|
if one.endswith('.'):
|
||||||
|
one = one[:-1]+substitute
|
||||||
|
# Mac and Unix don't like file names that begin with a full stop
|
||||||
|
if len(one) > 0 and one[0] == '.':
|
||||||
|
one = substitute+one[1:]
|
||||||
|
one = one.replace(' ','_')
|
||||||
|
return one
|
||||||
|
|
||||||
|
def decryptBook(infile, outdir, k4, kInfoFiles, serials, pids):
|
||||||
|
global buildXML
|
||||||
|
|
||||||
|
# handle the obvious cases at the beginning
|
||||||
|
if not os.path.isfile(infile):
|
||||||
|
print >>sys.stderr, ('K4MobiDeDrm v%(__version__)s\n' % globals()) + "Error: Input file does not exist"
|
||||||
|
return 1
|
||||||
|
|
||||||
|
mobi = True
|
||||||
|
magic3 = file(infile,'rb').read(3)
|
||||||
|
if magic3 == 'TPZ':
|
||||||
|
mobi = False
|
||||||
|
|
||||||
|
bookname = os.path.splitext(os.path.basename(infile))[0]
|
||||||
|
|
||||||
|
if mobi:
|
||||||
|
mb = mobidedrm.MobiBook(infile)
|
||||||
|
else:
|
||||||
|
mb = topazextract.TopazBook(infile)
|
||||||
|
|
||||||
|
title = mb.getBookTitle()
|
||||||
|
print "Processing Book: ", title
|
||||||
|
filenametitle = cleanup_name(title)
|
||||||
|
outfilename = bookname
|
||||||
|
if len(outfilename)<=8 or len(filenametitle)<=8:
|
||||||
|
outfilename = outfilename + "_" + filenametitle
|
||||||
|
elif outfilename[:8] != filenametitle[:8]:
|
||||||
|
outfilename = outfilename[:8] + "_" + filenametitle
|
||||||
|
|
||||||
|
# avoid excessively long file names
|
||||||
|
if len(outfilename)>150:
|
||||||
|
outfilename = outfilename[:150]
|
||||||
|
|
||||||
|
# build pid list
|
||||||
|
md1, md2 = mb.getPIDMetaInfo()
|
||||||
|
pidlst = kgenpids.getPidList(md1, md2, k4, pids, serials, kInfoFiles)
|
||||||
|
|
||||||
|
try:
|
||||||
|
mb.processBook(pidlst)
|
||||||
|
|
||||||
|
except mobidedrm.DrmException, e:
|
||||||
|
print >>sys.stderr, ('K4MobiDeDrm v%(__version__)s\n' % globals()) + "Error: " + str(e) + "\nDRM Removal Failed.\n"
|
||||||
|
return 1
|
||||||
|
except topazextract.TpzDRMError, e:
|
||||||
|
print >>sys.stderr, ('K4MobiDeDrm v%(__version__)s\n' % globals()) + "Error: " + str(e) + "\nDRM Removal Failed.\n"
|
||||||
|
return 1
|
||||||
|
except Exception, e:
|
||||||
|
print >>sys.stderr, ('K4MobiDeDrm v%(__version__)s\n' % globals()) + "Error: " + str(e) + "\nDRM Removal Failed.\n"
|
||||||
|
return 1
|
||||||
|
|
||||||
|
if mobi:
|
||||||
|
if mb.getPrintReplica():
|
||||||
|
outfile = os.path.join(outdir, outfilename + '_nodrm' + '.azw4')
|
||||||
|
else:
|
||||||
|
outfile = os.path.join(outdir, outfilename + '_nodrm' + '.mobi')
|
||||||
|
mb.getMobiFile(outfile)
|
||||||
|
return 0
|
||||||
|
|
||||||
|
# topaz:
|
||||||
|
print " Creating NoDRM HTMLZ Archive"
|
||||||
|
zipname = os.path.join(outdir, outfilename + '_nodrm' + '.htmlz')
|
||||||
|
mb.getHTMLZip(zipname)
|
||||||
|
|
||||||
|
print " Creating SVG ZIP Archive"
|
||||||
|
zipname = os.path.join(outdir, outfilename + '_SVG' + '.zip')
|
||||||
|
mb.getSVGZip(zipname)
|
||||||
|
|
||||||
|
if buildXML:
|
||||||
|
print " Creating XML ZIP Archive"
|
||||||
|
zipname = os.path.join(outdir, outfilename + '_XML' + '.zip')
|
||||||
|
mb.getXMLZip(zipname)
|
||||||
|
|
||||||
|
# remove internal temporary directory of Topaz pieces
|
||||||
|
mb.cleanup()
|
||||||
|
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
def usage(progname):
|
||||||
|
print "Removes DRM protection from K4PC/M, Kindle, Mobi and Topaz ebooks"
|
||||||
|
print "Usage:"
|
||||||
|
print " %s [-k <kindle.info>] [-p <pidnums>] [-s <kindleSerialNumbers>] <infile> <outdir> " % progname
|
||||||
|
|
||||||
|
#
|
||||||
|
# Main
|
||||||
|
#
|
||||||
|
def main(argv=sys.argv):
|
||||||
|
progname = os.path.basename(argv[0])
|
||||||
|
|
||||||
|
k4 = False
|
||||||
|
kInfoFiles = []
|
||||||
|
serials = []
|
||||||
|
pids = []
|
||||||
|
|
||||||
|
print ('K4MobiDeDrm v%(__version__)s '
|
||||||
|
'provided by the work of many including DiapDealer, SomeUpdates, IHeartCabbages, CMBDTC, Skindle, DarkReverser, ApprenticeAlf, etc .' % globals())
|
||||||
|
|
||||||
|
try:
|
||||||
|
opts, args = getopt.getopt(sys.argv[1:], "k:p:s:")
|
||||||
|
except getopt.GetoptError, err:
|
||||||
|
print str(err)
|
||||||
|
usage(progname)
|
||||||
|
sys.exit(2)
|
||||||
|
if len(args)<2:
|
||||||
|
usage(progname)
|
||||||
|
sys.exit(2)
|
||||||
|
|
||||||
|
for o, a in opts:
|
||||||
|
if o == "-k":
|
||||||
|
if a == None :
|
||||||
|
raise DrmException("Invalid parameter for -k")
|
||||||
|
kInfoFiles.append(a)
|
||||||
|
if o == "-p":
|
||||||
|
if a == None :
|
||||||
|
raise DrmException("Invalid parameter for -p")
|
||||||
|
pids = a.split(',')
|
||||||
|
if o == "-s":
|
||||||
|
if a == None :
|
||||||
|
raise DrmException("Invalid parameter for -s")
|
||||||
|
serials = a.split(',')
|
||||||
|
|
||||||
|
# try with built in Kindle Info files
|
||||||
|
k4 = True
|
||||||
|
if sys.platform.startswith('linux'):
|
||||||
|
k4 = False
|
||||||
|
kInfoFiles = None
|
||||||
|
infile = args[0]
|
||||||
|
outdir = args[1]
|
||||||
|
return decryptBook(infile, outdir, k4, kInfoFiles, serials, pids)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
sys.stdout=Unbuffered(sys.stdout)
|
||||||
|
sys.exit(main())
|
||||||
726
DeDRM_Windows_Application/DeDRM_WinApp/DeDRM_lib/lib/k4mutils.py
Normal file
726
DeDRM_Windows_Application/DeDRM_WinApp/DeDRM_lib/lib/k4mutils.py
Normal file
@@ -0,0 +1,726 @@
|
|||||||
|
# standlone set of Mac OSX specific routines needed for KindleBooks
|
||||||
|
|
||||||
|
from __future__ import with_statement
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
import os.path
|
||||||
|
import re
|
||||||
|
import copy
|
||||||
|
import subprocess
|
||||||
|
from struct import pack, unpack, unpack_from
|
||||||
|
|
||||||
|
class DrmException(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
# interface to needed routines in openssl's libcrypto
|
||||||
|
def _load_crypto_libcrypto():
|
||||||
|
from ctypes import CDLL, byref, POINTER, c_void_p, c_char_p, c_int, c_long, \
|
||||||
|
Structure, c_ulong, create_string_buffer, addressof, string_at, cast
|
||||||
|
from ctypes.util import find_library
|
||||||
|
|
||||||
|
libcrypto = find_library('crypto')
|
||||||
|
if libcrypto is None:
|
||||||
|
raise DrmException('libcrypto not found')
|
||||||
|
libcrypto = CDLL(libcrypto)
|
||||||
|
|
||||||
|
# From OpenSSL's crypto aes header
|
||||||
|
#
|
||||||
|
# AES_ENCRYPT 1
|
||||||
|
# AES_DECRYPT 0
|
||||||
|
# AES_MAXNR 14 (in bytes)
|
||||||
|
# AES_BLOCK_SIZE 16 (in bytes)
|
||||||
|
#
|
||||||
|
# struct aes_key_st {
|
||||||
|
# unsigned long rd_key[4 *(AES_MAXNR + 1)];
|
||||||
|
# int rounds;
|
||||||
|
# };
|
||||||
|
# typedef struct aes_key_st AES_KEY;
|
||||||
|
#
|
||||||
|
# int AES_set_decrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key);
|
||||||
|
#
|
||||||
|
# note: the ivec string, and output buffer are mutable
|
||||||
|
# void AES_cbc_encrypt(const unsigned char *in, unsigned char *out,
|
||||||
|
# const unsigned long length, const AES_KEY *key, unsigned char *ivec, const int enc);
|
||||||
|
|
||||||
|
AES_MAXNR = 14
|
||||||
|
c_char_pp = POINTER(c_char_p)
|
||||||
|
c_int_p = POINTER(c_int)
|
||||||
|
|
||||||
|
class AES_KEY(Structure):
|
||||||
|
_fields_ = [('rd_key', c_long * (4 * (AES_MAXNR + 1))), ('rounds', c_int)]
|
||||||
|
AES_KEY_p = POINTER(AES_KEY)
|
||||||
|
|
||||||
|
def F(restype, name, argtypes):
|
||||||
|
func = getattr(libcrypto, name)
|
||||||
|
func.restype = restype
|
||||||
|
func.argtypes = argtypes
|
||||||
|
return func
|
||||||
|
|
||||||
|
AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',[c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p,c_int])
|
||||||
|
|
||||||
|
AES_set_decrypt_key = F(c_int, 'AES_set_decrypt_key',[c_char_p, c_int, AES_KEY_p])
|
||||||
|
|
||||||
|
# From OpenSSL's Crypto evp/p5_crpt2.c
|
||||||
|
#
|
||||||
|
# int PKCS5_PBKDF2_HMAC_SHA1(const char *pass, int passlen,
|
||||||
|
# const unsigned char *salt, int saltlen, int iter,
|
||||||
|
# int keylen, unsigned char *out);
|
||||||
|
|
||||||
|
PKCS5_PBKDF2_HMAC_SHA1 = F(c_int, 'PKCS5_PBKDF2_HMAC_SHA1',
|
||||||
|
[c_char_p, c_ulong, c_char_p, c_ulong, c_ulong, c_ulong, c_char_p])
|
||||||
|
|
||||||
|
class LibCrypto(object):
|
||||||
|
def __init__(self):
|
||||||
|
self._blocksize = 0
|
||||||
|
self._keyctx = None
|
||||||
|
self._iv = 0
|
||||||
|
|
||||||
|
def set_decrypt_key(self, userkey, iv):
|
||||||
|
self._blocksize = len(userkey)
|
||||||
|
if (self._blocksize != 16) and (self._blocksize != 24) and (self._blocksize != 32) :
|
||||||
|
raise DrmException('AES improper key used')
|
||||||
|
return
|
||||||
|
keyctx = self._keyctx = AES_KEY()
|
||||||
|
self._iv = iv
|
||||||
|
self._userkey = userkey
|
||||||
|
rv = AES_set_decrypt_key(userkey, len(userkey) * 8, keyctx)
|
||||||
|
if rv < 0:
|
||||||
|
raise DrmException('Failed to initialize AES key')
|
||||||
|
|
||||||
|
def decrypt(self, data):
|
||||||
|
out = create_string_buffer(len(data))
|
||||||
|
mutable_iv = create_string_buffer(self._iv, len(self._iv))
|
||||||
|
keyctx = self._keyctx
|
||||||
|
rv = AES_cbc_encrypt(data, out, len(data), keyctx, mutable_iv, 0)
|
||||||
|
if rv == 0:
|
||||||
|
raise DrmException('AES decryption failed')
|
||||||
|
return out.raw
|
||||||
|
|
||||||
|
def keyivgen(self, passwd, salt, iter, keylen):
|
||||||
|
saltlen = len(salt)
|
||||||
|
passlen = len(passwd)
|
||||||
|
out = create_string_buffer(keylen)
|
||||||
|
rv = PKCS5_PBKDF2_HMAC_SHA1(passwd, passlen, salt, saltlen, iter, keylen, out)
|
||||||
|
return out.raw
|
||||||
|
return LibCrypto
|
||||||
|
|
||||||
|
def _load_crypto():
|
||||||
|
LibCrypto = None
|
||||||
|
try:
|
||||||
|
LibCrypto = _load_crypto_libcrypto()
|
||||||
|
except (ImportError, DrmException):
|
||||||
|
pass
|
||||||
|
return LibCrypto
|
||||||
|
|
||||||
|
LibCrypto = _load_crypto()
|
||||||
|
|
||||||
|
#
|
||||||
|
# Utility Routines
|
||||||
|
#
|
||||||
|
|
||||||
|
# crypto digestroutines
|
||||||
|
import hashlib
|
||||||
|
|
||||||
|
def MD5(message):
|
||||||
|
ctx = hashlib.md5()
|
||||||
|
ctx.update(message)
|
||||||
|
return ctx.digest()
|
||||||
|
|
||||||
|
def SHA1(message):
|
||||||
|
ctx = hashlib.sha1()
|
||||||
|
ctx.update(message)
|
||||||
|
return ctx.digest()
|
||||||
|
|
||||||
|
def SHA256(message):
|
||||||
|
ctx = hashlib.sha256()
|
||||||
|
ctx.update(message)
|
||||||
|
return ctx.digest()
|
||||||
|
|
||||||
|
# Various character maps used to decrypt books. Probably supposed to act as obfuscation
|
||||||
|
charMap1 = "n5Pr6St7Uv8Wx9YzAb0Cd1Ef2Gh3Jk4M"
|
||||||
|
charMap2 = "ZB0bYyc1xDdW2wEV3Ff7KkPpL8UuGA4gz-Tme9Nn_tHh5SvXCsIiR6rJjQaqlOoM"
|
||||||
|
|
||||||
|
# For kinf approach of K4Mac 1.6.X or later
|
||||||
|
# On K4PC charMap5 = "AzB0bYyCeVvaZ3FfUuG4g-TtHh5SsIiR6rJjQq7KkPpL8lOoMm9Nn_c1XxDdW2wE"
|
||||||
|
# For Mac they seem to re-use charMap2 here
|
||||||
|
charMap5 = charMap2
|
||||||
|
|
||||||
|
# new in K4M 1.9.X
|
||||||
|
testMap8 = "YvaZ3FfUm9Nn_c1XuG4yCAzB0beVg-TtHh5SsIiR6rJjQdW2wEq7KkPpL8lOoMxD"
|
||||||
|
|
||||||
|
|
||||||
|
def encode(data, map):
|
||||||
|
result = ""
|
||||||
|
for char in data:
|
||||||
|
value = ord(char)
|
||||||
|
Q = (value ^ 0x80) // len(map)
|
||||||
|
R = value % len(map)
|
||||||
|
result += map[Q]
|
||||||
|
result += map[R]
|
||||||
|
return result
|
||||||
|
|
||||||
|
# Hash the bytes in data and then encode the digest with the characters in map
|
||||||
|
def encodeHash(data,map):
|
||||||
|
return encode(MD5(data),map)
|
||||||
|
|
||||||
|
# Decode the string in data with the characters in map. Returns the decoded bytes
|
||||||
|
def decode(data,map):
|
||||||
|
result = ""
|
||||||
|
for i in range (0,len(data)-1,2):
|
||||||
|
high = map.find(data[i])
|
||||||
|
low = map.find(data[i+1])
|
||||||
|
if (high == -1) or (low == -1) :
|
||||||
|
break
|
||||||
|
value = (((high * len(map)) ^ 0x80) & 0xFF) + low
|
||||||
|
result += pack("B",value)
|
||||||
|
return result
|
||||||
|
|
||||||
|
# For K4M 1.6.X and later
|
||||||
|
# generate table of prime number less than or equal to int n
|
||||||
|
def primes(n):
|
||||||
|
if n==2: return [2]
|
||||||
|
elif n<2: return []
|
||||||
|
s=range(3,n+1,2)
|
||||||
|
mroot = n ** 0.5
|
||||||
|
half=(n+1)/2-1
|
||||||
|
i=0
|
||||||
|
m=3
|
||||||
|
while m <= mroot:
|
||||||
|
if s[i]:
|
||||||
|
j=(m*m-3)/2
|
||||||
|
s[j]=0
|
||||||
|
while j<half:
|
||||||
|
s[j]=0
|
||||||
|
j+=m
|
||||||
|
i=i+1
|
||||||
|
m=2*i+3
|
||||||
|
return [2]+[x for x in s if x]
|
||||||
|
|
||||||
|
|
||||||
|
# uses a sub process to get the Hard Drive Serial Number using ioreg
|
||||||
|
# returns with the serial number of drive whose BSD Name is "disk0"
|
||||||
|
def GetVolumeSerialNumber():
|
||||||
|
sernum = os.getenv('MYSERIALNUMBER')
|
||||||
|
if sernum != None:
|
||||||
|
return sernum
|
||||||
|
cmdline = '/usr/sbin/ioreg -l -S -w 0 -r -c AppleAHCIDiskDriver'
|
||||||
|
cmdline = cmdline.encode(sys.getfilesystemencoding())
|
||||||
|
p = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
|
||||||
|
out1, out2 = p.communicate()
|
||||||
|
reslst = out1.split('\n')
|
||||||
|
cnt = len(reslst)
|
||||||
|
bsdname = None
|
||||||
|
sernum = None
|
||||||
|
foundIt = False
|
||||||
|
for j in xrange(cnt):
|
||||||
|
resline = reslst[j]
|
||||||
|
pp = resline.find('"Serial Number" = "')
|
||||||
|
if pp >= 0:
|
||||||
|
sernum = resline[pp+19:-1]
|
||||||
|
sernum = sernum.strip()
|
||||||
|
bb = resline.find('"BSD Name" = "')
|
||||||
|
if bb >= 0:
|
||||||
|
bsdname = resline[bb+14:-1]
|
||||||
|
bsdname = bsdname.strip()
|
||||||
|
if (bsdname == 'disk0') and (sernum != None):
|
||||||
|
foundIt = True
|
||||||
|
break
|
||||||
|
if not foundIt:
|
||||||
|
sernum = ''
|
||||||
|
return sernum
|
||||||
|
|
||||||
|
def GetUserHomeAppSupKindleDirParitionName():
|
||||||
|
home = os.getenv('HOME')
|
||||||
|
dpath = home + '/Library/Application Support/Kindle'
|
||||||
|
cmdline = '/sbin/mount'
|
||||||
|
cmdline = cmdline.encode(sys.getfilesystemencoding())
|
||||||
|
p = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
|
||||||
|
out1, out2 = p.communicate()
|
||||||
|
reslst = out1.split('\n')
|
||||||
|
cnt = len(reslst)
|
||||||
|
disk = ''
|
||||||
|
foundIt = False
|
||||||
|
for j in xrange(cnt):
|
||||||
|
resline = reslst[j]
|
||||||
|
if resline.startswith('/dev'):
|
||||||
|
(devpart, mpath) = resline.split(' on ')
|
||||||
|
dpart = devpart[5:]
|
||||||
|
pp = mpath.find('(')
|
||||||
|
if pp >= 0:
|
||||||
|
mpath = mpath[:pp-1]
|
||||||
|
if dpath.startswith(mpath):
|
||||||
|
disk = dpart
|
||||||
|
return disk
|
||||||
|
|
||||||
|
# uses a sub process to get the UUID of the specified disk partition using ioreg
|
||||||
|
def GetDiskPartitionUUID(diskpart):
|
||||||
|
uuidnum = os.getenv('MYUUIDNUMBER')
|
||||||
|
if uuidnum != None:
|
||||||
|
return uuidnum
|
||||||
|
cmdline = '/usr/sbin/ioreg -l -S -w 0 -r -c AppleAHCIDiskDriver'
|
||||||
|
cmdline = cmdline.encode(sys.getfilesystemencoding())
|
||||||
|
p = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
|
||||||
|
out1, out2 = p.communicate()
|
||||||
|
reslst = out1.split('\n')
|
||||||
|
cnt = len(reslst)
|
||||||
|
bsdname = None
|
||||||
|
uuidnum = None
|
||||||
|
foundIt = False
|
||||||
|
nest = 0
|
||||||
|
uuidnest = -1
|
||||||
|
partnest = -2
|
||||||
|
for j in xrange(cnt):
|
||||||
|
resline = reslst[j]
|
||||||
|
if resline.find('{') >= 0:
|
||||||
|
nest += 1
|
||||||
|
if resline.find('}') >= 0:
|
||||||
|
nest -= 1
|
||||||
|
pp = resline.find('"UUID" = "')
|
||||||
|
if pp >= 0:
|
||||||
|
uuidnum = resline[pp+10:-1]
|
||||||
|
uuidnum = uuidnum.strip()
|
||||||
|
uuidnest = nest
|
||||||
|
if partnest == uuidnest and uuidnest > 0:
|
||||||
|
foundIt = True
|
||||||
|
break
|
||||||
|
bb = resline.find('"BSD Name" = "')
|
||||||
|
if bb >= 0:
|
||||||
|
bsdname = resline[bb+14:-1]
|
||||||
|
bsdname = bsdname.strip()
|
||||||
|
if (bsdname == diskpart):
|
||||||
|
partnest = nest
|
||||||
|
else :
|
||||||
|
partnest = -2
|
||||||
|
if partnest == uuidnest and partnest > 0:
|
||||||
|
foundIt = True
|
||||||
|
break
|
||||||
|
if nest == 0:
|
||||||
|
partnest = -2
|
||||||
|
uuidnest = -1
|
||||||
|
uuidnum = None
|
||||||
|
bsdname = None
|
||||||
|
if not foundIt:
|
||||||
|
uuidnum = ''
|
||||||
|
return uuidnum
|
||||||
|
|
||||||
|
def GetMACAddressMunged():
|
||||||
|
macnum = os.getenv('MYMACNUM')
|
||||||
|
if macnum != None:
|
||||||
|
return macnum
|
||||||
|
cmdline = '/sbin/ifconfig en0'
|
||||||
|
cmdline = cmdline.encode(sys.getfilesystemencoding())
|
||||||
|
p = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
|
||||||
|
out1, out2 = p.communicate()
|
||||||
|
reslst = out1.split('\n')
|
||||||
|
cnt = len(reslst)
|
||||||
|
macnum = None
|
||||||
|
foundIt = False
|
||||||
|
for j in xrange(cnt):
|
||||||
|
resline = reslst[j]
|
||||||
|
pp = resline.find('ether ')
|
||||||
|
if pp >= 0:
|
||||||
|
macnum = resline[pp+6:-1]
|
||||||
|
macnum = macnum.strip()
|
||||||
|
# print "original mac", macnum
|
||||||
|
# now munge it up the way Kindle app does
|
||||||
|
# by xoring it with 0xa5 and swapping elements 3 and 4
|
||||||
|
maclst = macnum.split(':')
|
||||||
|
n = len(maclst)
|
||||||
|
if n != 6:
|
||||||
|
fountIt = False
|
||||||
|
break
|
||||||
|
for i in range(6):
|
||||||
|
maclst[i] = int('0x' + maclst[i], 0)
|
||||||
|
mlst = [0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
|
||||||
|
mlst[5] = maclst[5] ^ 0xa5
|
||||||
|
mlst[4] = maclst[3] ^ 0xa5
|
||||||
|
mlst[3] = maclst[4] ^ 0xa5
|
||||||
|
mlst[2] = maclst[2] ^ 0xa5
|
||||||
|
mlst[1] = maclst[1] ^ 0xa5
|
||||||
|
mlst[0] = maclst[0] ^ 0xa5
|
||||||
|
macnum = "%0.2x%0.2x%0.2x%0.2x%0.2x%0.2x" % (mlst[0], mlst[1], mlst[2], mlst[3], mlst[4], mlst[5])
|
||||||
|
foundIt = True
|
||||||
|
break
|
||||||
|
if not foundIt:
|
||||||
|
macnum = ''
|
||||||
|
return macnum
|
||||||
|
|
||||||
|
|
||||||
|
# uses unix env to get username instead of using sysctlbyname
|
||||||
|
def GetUserName():
|
||||||
|
username = os.getenv('USER')
|
||||||
|
return username
|
||||||
|
|
||||||
|
def isNewInstall():
|
||||||
|
home = os.getenv('HOME')
|
||||||
|
# soccer game fan anyone
|
||||||
|
dpath = home + '/Library/Application Support/Kindle/storage/.pes2011'
|
||||||
|
# print dpath, os.path.exists(dpath)
|
||||||
|
if os.path.exists(dpath):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def GetIDString():
|
||||||
|
# K4Mac now has an extensive set of ids strings it uses
|
||||||
|
# in encoding pids and in creating unique passwords
|
||||||
|
# for use in its own version of CryptUnprotectDataV2
|
||||||
|
|
||||||
|
# BUT Amazon has now become nasty enough to detect when its app
|
||||||
|
# is being run under a debugger and actually changes code paths
|
||||||
|
# including which one of these strings is chosen, all to try
|
||||||
|
# to prevent reverse engineering
|
||||||
|
|
||||||
|
# Sad really ... they will only hurt their own sales ...
|
||||||
|
# true book lovers really want to keep their books forever
|
||||||
|
# and move them to their devices and DRM prevents that so they
|
||||||
|
# will just buy from someplace else that they can remove
|
||||||
|
# the DRM from
|
||||||
|
|
||||||
|
# Amazon should know by now that true book lover's are not like
|
||||||
|
# penniless kids that pirate music, we do not pirate books
|
||||||
|
|
||||||
|
if isNewInstall():
|
||||||
|
mungedmac = GetMACAddressMunged()
|
||||||
|
if len(mungedmac) > 7:
|
||||||
|
return mungedmac
|
||||||
|
sernum = GetVolumeSerialNumber()
|
||||||
|
if len(sernum) > 7:
|
||||||
|
return sernum
|
||||||
|
diskpart = GetUserHomeAppSupKindleDirParitionName()
|
||||||
|
uuidnum = GetDiskPartitionUUID(diskpart)
|
||||||
|
if len(uuidnum) > 7:
|
||||||
|
return uuidnum
|
||||||
|
mungedmac = GetMACAddressMunged()
|
||||||
|
if len(mungedmac) > 7:
|
||||||
|
return mungedmac
|
||||||
|
return '9999999999'
|
||||||
|
|
||||||
|
|
||||||
|
# implements an Pseudo Mac Version of Windows built-in Crypto routine
|
||||||
|
# used by Kindle for Mac versions < 1.6.0
|
||||||
|
class CryptUnprotectData(object):
|
||||||
|
def __init__(self):
|
||||||
|
sernum = GetVolumeSerialNumber()
|
||||||
|
if sernum == '':
|
||||||
|
sernum = '9999999999'
|
||||||
|
sp = sernum + '!@#' + GetUserName()
|
||||||
|
passwdData = encode(SHA256(sp),charMap1)
|
||||||
|
salt = '16743'
|
||||||
|
self.crp = LibCrypto()
|
||||||
|
iter = 0x3e8
|
||||||
|
keylen = 0x80
|
||||||
|
key_iv = self.crp.keyivgen(passwdData, salt, iter, keylen)
|
||||||
|
self.key = key_iv[0:32]
|
||||||
|
self.iv = key_iv[32:48]
|
||||||
|
self.crp.set_decrypt_key(self.key, self.iv)
|
||||||
|
|
||||||
|
def decrypt(self, encryptedData):
|
||||||
|
cleartext = self.crp.decrypt(encryptedData)
|
||||||
|
cleartext = decode(cleartext,charMap1)
|
||||||
|
return cleartext
|
||||||
|
|
||||||
|
|
||||||
|
# implements an Pseudo Mac Version of Windows built-in Crypto routine
|
||||||
|
# used for Kindle for Mac Versions >= 1.6.0
|
||||||
|
class CryptUnprotectDataV2(object):
|
||||||
|
def __init__(self):
|
||||||
|
sp = GetUserName() + ':&%:' + GetIDString()
|
||||||
|
passwdData = encode(SHA256(sp),charMap5)
|
||||||
|
# salt generation as per the code
|
||||||
|
salt = 0x0512981d * 2 * 1 * 1
|
||||||
|
salt = str(salt) + GetUserName()
|
||||||
|
salt = encode(salt,charMap5)
|
||||||
|
self.crp = LibCrypto()
|
||||||
|
iter = 0x800
|
||||||
|
keylen = 0x400
|
||||||
|
key_iv = self.crp.keyivgen(passwdData, salt, iter, keylen)
|
||||||
|
self.key = key_iv[0:32]
|
||||||
|
self.iv = key_iv[32:48]
|
||||||
|
self.crp.set_decrypt_key(self.key, self.iv)
|
||||||
|
|
||||||
|
def decrypt(self, encryptedData):
|
||||||
|
cleartext = self.crp.decrypt(encryptedData)
|
||||||
|
cleartext = decode(cleartext, charMap5)
|
||||||
|
return cleartext
|
||||||
|
|
||||||
|
|
||||||
|
# unprotect the new header blob in .kinf2011
|
||||||
|
# used in Kindle for Mac Version >= 1.9.0
|
||||||
|
def UnprotectHeaderData(encryptedData):
|
||||||
|
passwdData = 'header_key_data'
|
||||||
|
salt = 'HEADER.2011'
|
||||||
|
iter = 0x80
|
||||||
|
keylen = 0x100
|
||||||
|
crp = LibCrypto()
|
||||||
|
key_iv = crp.keyivgen(passwdData, salt, iter, keylen)
|
||||||
|
key = key_iv[0:32]
|
||||||
|
iv = key_iv[32:48]
|
||||||
|
crp.set_decrypt_key(key,iv)
|
||||||
|
cleartext = crp.decrypt(encryptedData)
|
||||||
|
return cleartext
|
||||||
|
|
||||||
|
|
||||||
|
# implements an Pseudo Mac Version of Windows built-in Crypto routine
|
||||||
|
# used for Kindle for Mac Versions >= 1.9.0
|
||||||
|
class CryptUnprotectDataV3(object):
|
||||||
|
def __init__(self, entropy):
|
||||||
|
sp = GetUserName() + '+@#$%+' + GetIDString()
|
||||||
|
passwdData = encode(SHA256(sp),charMap2)
|
||||||
|
salt = entropy
|
||||||
|
self.crp = LibCrypto()
|
||||||
|
iter = 0x800
|
||||||
|
keylen = 0x400
|
||||||
|
key_iv = self.crp.keyivgen(passwdData, salt, iter, keylen)
|
||||||
|
self.key = key_iv[0:32]
|
||||||
|
self.iv = key_iv[32:48]
|
||||||
|
self.crp.set_decrypt_key(self.key, self.iv)
|
||||||
|
|
||||||
|
def decrypt(self, encryptedData):
|
||||||
|
cleartext = self.crp.decrypt(encryptedData)
|
||||||
|
cleartext = decode(cleartext, charMap2)
|
||||||
|
return cleartext
|
||||||
|
|
||||||
|
|
||||||
|
# Locate the .kindle-info files
|
||||||
|
def getKindleInfoFiles(kInfoFiles):
|
||||||
|
# first search for current .kindle-info files
|
||||||
|
home = os.getenv('HOME')
|
||||||
|
cmdline = 'find "' + home + '/Library/Application Support" -name ".kindle-info"'
|
||||||
|
cmdline = cmdline.encode(sys.getfilesystemencoding())
|
||||||
|
p1 = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
|
||||||
|
out1, out2 = p1.communicate()
|
||||||
|
reslst = out1.split('\n')
|
||||||
|
kinfopath = 'NONE'
|
||||||
|
found = False
|
||||||
|
for resline in reslst:
|
||||||
|
if os.path.isfile(resline):
|
||||||
|
kInfoFiles.append(resline)
|
||||||
|
found = True
|
||||||
|
# add any .rainier*-kinf files
|
||||||
|
cmdline = 'find "' + home + '/Library/Application Support" -name ".rainier*-kinf"'
|
||||||
|
cmdline = cmdline.encode(sys.getfilesystemencoding())
|
||||||
|
p1 = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
|
||||||
|
out1, out2 = p1.communicate()
|
||||||
|
reslst = out1.split('\n')
|
||||||
|
for resline in reslst:
|
||||||
|
if os.path.isfile(resline):
|
||||||
|
kInfoFiles.append(resline)
|
||||||
|
found = True
|
||||||
|
# add any .kinf2011 files
|
||||||
|
cmdline = 'find "' + home + '/Library/Application Support" -name ".kinf2011"'
|
||||||
|
cmdline = cmdline.encode(sys.getfilesystemencoding())
|
||||||
|
p1 = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
|
||||||
|
out1, out2 = p1.communicate()
|
||||||
|
reslst = out1.split('\n')
|
||||||
|
for resline in reslst:
|
||||||
|
if os.path.isfile(resline):
|
||||||
|
kInfoFiles.append(resline)
|
||||||
|
found = True
|
||||||
|
if not found:
|
||||||
|
print('No kindle-info files have been found.')
|
||||||
|
return kInfoFiles
|
||||||
|
|
||||||
|
# determine type of kindle info provided and return a
|
||||||
|
# database of keynames and values
|
||||||
|
def getDBfromFile(kInfoFile):
|
||||||
|
names = ["kindle.account.tokens","kindle.cookie.item","eulaVersionAccepted","login_date","kindle.token.item","login","kindle.key.item","kindle.name.info","kindle.device.info", "MazamaRandomNumber", "max_date", "SIGVERIF"]
|
||||||
|
DB = {}
|
||||||
|
cnt = 0
|
||||||
|
infoReader = open(kInfoFile, 'r')
|
||||||
|
hdr = infoReader.read(1)
|
||||||
|
data = infoReader.read()
|
||||||
|
|
||||||
|
if data.find('[') != -1 :
|
||||||
|
|
||||||
|
# older style kindle-info file
|
||||||
|
cud = CryptUnprotectData()
|
||||||
|
items = data.split('[')
|
||||||
|
for item in items:
|
||||||
|
if item != '':
|
||||||
|
keyhash, rawdata = item.split(':')
|
||||||
|
keyname = "unknown"
|
||||||
|
for name in names:
|
||||||
|
if encodeHash(name,charMap2) == keyhash:
|
||||||
|
keyname = name
|
||||||
|
break
|
||||||
|
if keyname == "unknown":
|
||||||
|
keyname = keyhash
|
||||||
|
encryptedValue = decode(rawdata,charMap2)
|
||||||
|
cleartext = cud.decrypt(encryptedValue)
|
||||||
|
DB[keyname] = cleartext
|
||||||
|
cnt = cnt + 1
|
||||||
|
if cnt == 0:
|
||||||
|
DB = None
|
||||||
|
return DB
|
||||||
|
|
||||||
|
if hdr == '/':
|
||||||
|
|
||||||
|
# else newer style .kinf file used by K4Mac >= 1.6.0
|
||||||
|
# the .kinf file uses "/" to separate it into records
|
||||||
|
# so remove the trailing "/" to make it easy to use split
|
||||||
|
data = data[:-1]
|
||||||
|
items = data.split('/')
|
||||||
|
cud = CryptUnprotectDataV2()
|
||||||
|
|
||||||
|
# loop through the item records until all are processed
|
||||||
|
while len(items) > 0:
|
||||||
|
|
||||||
|
# get the first item record
|
||||||
|
item = items.pop(0)
|
||||||
|
|
||||||
|
# the first 32 chars of the first record of a group
|
||||||
|
# is the MD5 hash of the key name encoded by charMap5
|
||||||
|
keyhash = item[0:32]
|
||||||
|
keyname = "unknown"
|
||||||
|
|
||||||
|
# the raw keyhash string is also used to create entropy for the actual
|
||||||
|
# CryptProtectData Blob that represents that keys contents
|
||||||
|
# "entropy" not used for K4Mac only K4PC
|
||||||
|
# entropy = SHA1(keyhash)
|
||||||
|
|
||||||
|
# the remainder of the first record when decoded with charMap5
|
||||||
|
# has the ':' split char followed by the string representation
|
||||||
|
# of the number of records that follow
|
||||||
|
# and make up the contents
|
||||||
|
srcnt = decode(item[34:],charMap5)
|
||||||
|
rcnt = int(srcnt)
|
||||||
|
|
||||||
|
# read and store in rcnt records of data
|
||||||
|
# that make up the contents value
|
||||||
|
edlst = []
|
||||||
|
for i in xrange(rcnt):
|
||||||
|
item = items.pop(0)
|
||||||
|
edlst.append(item)
|
||||||
|
|
||||||
|
keyname = "unknown"
|
||||||
|
for name in names:
|
||||||
|
if encodeHash(name,charMap5) == keyhash:
|
||||||
|
keyname = name
|
||||||
|
break
|
||||||
|
if keyname == "unknown":
|
||||||
|
keyname = keyhash
|
||||||
|
|
||||||
|
# the charMap5 encoded contents data has had a length
|
||||||
|
# of chars (always odd) cut off of the front and moved
|
||||||
|
# to the end to prevent decoding using charMap5 from
|
||||||
|
# working properly, and thereby preventing the ensuing
|
||||||
|
# CryptUnprotectData call from succeeding.
|
||||||
|
|
||||||
|
# The offset into the charMap5 encoded contents seems to be:
|
||||||
|
# len(contents) - largest prime number less than or equal to int(len(content)/3)
|
||||||
|
# (in other words split "about" 2/3rds of the way through)
|
||||||
|
|
||||||
|
# move first offsets chars to end to align for decode by charMap5
|
||||||
|
encdata = "".join(edlst)
|
||||||
|
contlen = len(encdata)
|
||||||
|
|
||||||
|
# now properly split and recombine
|
||||||
|
# by moving noffset chars from the start of the
|
||||||
|
# string to the end of the string
|
||||||
|
noffset = contlen - primes(int(contlen/3))[-1]
|
||||||
|
pfx = encdata[0:noffset]
|
||||||
|
encdata = encdata[noffset:]
|
||||||
|
encdata = encdata + pfx
|
||||||
|
|
||||||
|
# decode using charMap5 to get the CryptProtect Data
|
||||||
|
encryptedValue = decode(encdata,charMap5)
|
||||||
|
cleartext = cud.decrypt(encryptedValue)
|
||||||
|
DB[keyname] = cleartext
|
||||||
|
cnt = cnt + 1
|
||||||
|
|
||||||
|
if cnt == 0:
|
||||||
|
DB = None
|
||||||
|
return DB
|
||||||
|
|
||||||
|
# the latest .kinf2011 version for K4M 1.9.1
|
||||||
|
# put back the hdr char, it is needed
|
||||||
|
data = hdr + data
|
||||||
|
data = data[:-1]
|
||||||
|
items = data.split('/')
|
||||||
|
|
||||||
|
# the headerblob is the encrypted information needed to build the entropy string
|
||||||
|
headerblob = items.pop(0)
|
||||||
|
encryptedValue = decode(headerblob, charMap1)
|
||||||
|
cleartext = UnprotectHeaderData(encryptedValue)
|
||||||
|
|
||||||
|
# now extract the pieces in the same way
|
||||||
|
# this version is different from K4PC it scales the build number by multipying by 735
|
||||||
|
pattern = re.compile(r'''\[Version:(\d+)\]\[Build:(\d+)\]\[Cksum:([^\]]+)\]\[Guid:([\{\}a-z0-9\-]+)\]''', re.IGNORECASE)
|
||||||
|
for m in re.finditer(pattern, cleartext):
|
||||||
|
entropy = str(int(m.group(2)) * 0x2df) + m.group(4)
|
||||||
|
|
||||||
|
cud = CryptUnprotectDataV3(entropy)
|
||||||
|
|
||||||
|
# loop through the item records until all are processed
|
||||||
|
while len(items) > 0:
|
||||||
|
|
||||||
|
# get the first item record
|
||||||
|
item = items.pop(0)
|
||||||
|
|
||||||
|
# the first 32 chars of the first record of a group
|
||||||
|
# is the MD5 hash of the key name encoded by charMap5
|
||||||
|
keyhash = item[0:32]
|
||||||
|
keyname = "unknown"
|
||||||
|
|
||||||
|
# unlike K4PC the keyhash is not used in generating entropy
|
||||||
|
# entropy = SHA1(keyhash) + added_entropy
|
||||||
|
# entropy = added_entropy
|
||||||
|
|
||||||
|
# the remainder of the first record when decoded with charMap5
|
||||||
|
# has the ':' split char followed by the string representation
|
||||||
|
# of the number of records that follow
|
||||||
|
# and make up the contents
|
||||||
|
srcnt = decode(item[34:],charMap5)
|
||||||
|
rcnt = int(srcnt)
|
||||||
|
|
||||||
|
# read and store in rcnt records of data
|
||||||
|
# that make up the contents value
|
||||||
|
edlst = []
|
||||||
|
for i in xrange(rcnt):
|
||||||
|
item = items.pop(0)
|
||||||
|
edlst.append(item)
|
||||||
|
|
||||||
|
keyname = "unknown"
|
||||||
|
for name in names:
|
||||||
|
if encodeHash(name,testMap8) == keyhash:
|
||||||
|
keyname = name
|
||||||
|
break
|
||||||
|
if keyname == "unknown":
|
||||||
|
keyname = keyhash
|
||||||
|
|
||||||
|
# the testMap8 encoded contents data has had a length
|
||||||
|
# of chars (always odd) cut off of the front and moved
|
||||||
|
# to the end to prevent decoding using testMap8 from
|
||||||
|
# working properly, and thereby preventing the ensuing
|
||||||
|
# CryptUnprotectData call from succeeding.
|
||||||
|
|
||||||
|
# The offset into the testMap8 encoded contents seems to be:
|
||||||
|
# len(contents) - largest prime number less than or equal to int(len(content)/3)
|
||||||
|
# (in other words split "about" 2/3rds of the way through)
|
||||||
|
|
||||||
|
# move first offsets chars to end to align for decode by testMap8
|
||||||
|
encdata = "".join(edlst)
|
||||||
|
contlen = len(encdata)
|
||||||
|
|
||||||
|
# now properly split and recombine
|
||||||
|
# by moving noffset chars from the start of the
|
||||||
|
# string to the end of the string
|
||||||
|
noffset = contlen - primes(int(contlen/3))[-1]
|
||||||
|
pfx = encdata[0:noffset]
|
||||||
|
encdata = encdata[noffset:]
|
||||||
|
encdata = encdata + pfx
|
||||||
|
|
||||||
|
# decode using testMap8 to get the CryptProtect Data
|
||||||
|
encryptedValue = decode(encdata,testMap8)
|
||||||
|
cleartext = cud.decrypt(encryptedValue)
|
||||||
|
# print keyname
|
||||||
|
# print cleartext
|
||||||
|
DB[keyname] = cleartext
|
||||||
|
cnt = cnt + 1
|
||||||
|
|
||||||
|
if cnt == 0:
|
||||||
|
DB = None
|
||||||
|
return DB
|
||||||
@@ -0,0 +1,425 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# K4PC Windows specific routines
|
||||||
|
|
||||||
|
from __future__ import with_statement
|
||||||
|
|
||||||
|
import sys, os, re
|
||||||
|
from struct import pack, unpack, unpack_from
|
||||||
|
|
||||||
|
from ctypes import windll, c_char_p, c_wchar_p, c_uint, POINTER, byref, \
|
||||||
|
create_unicode_buffer, create_string_buffer, CFUNCTYPE, addressof, \
|
||||||
|
string_at, Structure, c_void_p, cast
|
||||||
|
|
||||||
|
import _winreg as winreg
|
||||||
|
MAX_PATH = 255
|
||||||
|
kernel32 = windll.kernel32
|
||||||
|
advapi32 = windll.advapi32
|
||||||
|
crypt32 = windll.crypt32
|
||||||
|
|
||||||
|
import traceback
|
||||||
|
|
||||||
|
# crypto digestroutines
|
||||||
|
import hashlib
|
||||||
|
|
||||||
|
def MD5(message):
|
||||||
|
ctx = hashlib.md5()
|
||||||
|
ctx.update(message)
|
||||||
|
return ctx.digest()
|
||||||
|
|
||||||
|
def SHA1(message):
|
||||||
|
ctx = hashlib.sha1()
|
||||||
|
ctx.update(message)
|
||||||
|
return ctx.digest()
|
||||||
|
|
||||||
|
def SHA256(message):
|
||||||
|
ctx = hashlib.sha256()
|
||||||
|
ctx.update(message)
|
||||||
|
return ctx.digest()
|
||||||
|
|
||||||
|
# For K4PC 1.9.X
|
||||||
|
# use routines in alfcrypto:
|
||||||
|
# AES_cbc_encrypt
|
||||||
|
# AES_set_decrypt_key
|
||||||
|
# PKCS5_PBKDF2_HMAC_SHA1
|
||||||
|
|
||||||
|
from alfcrypto import AES_CBC, KeyIVGen
|
||||||
|
|
||||||
|
def UnprotectHeaderData(encryptedData):
|
||||||
|
passwdData = 'header_key_data'
|
||||||
|
salt = 'HEADER.2011'
|
||||||
|
iter = 0x80
|
||||||
|
keylen = 0x100
|
||||||
|
key_iv = KeyIVGen().pbkdf2(passwdData, salt, iter, keylen)
|
||||||
|
key = key_iv[0:32]
|
||||||
|
iv = key_iv[32:48]
|
||||||
|
aes=AES_CBC()
|
||||||
|
aes.set_decrypt_key(key, iv)
|
||||||
|
cleartext = aes.decrypt(encryptedData)
|
||||||
|
return cleartext
|
||||||
|
|
||||||
|
|
||||||
|
# simple primes table (<= n) calculator
|
||||||
|
def primes(n):
|
||||||
|
if n==2: return [2]
|
||||||
|
elif n<2: return []
|
||||||
|
s=range(3,n+1,2)
|
||||||
|
mroot = n ** 0.5
|
||||||
|
half=(n+1)/2-1
|
||||||
|
i=0
|
||||||
|
m=3
|
||||||
|
while m <= mroot:
|
||||||
|
if s[i]:
|
||||||
|
j=(m*m-3)/2
|
||||||
|
s[j]=0
|
||||||
|
while j<half:
|
||||||
|
s[j]=0
|
||||||
|
j+=m
|
||||||
|
i=i+1
|
||||||
|
m=2*i+3
|
||||||
|
return [2]+[x for x in s if x]
|
||||||
|
|
||||||
|
|
||||||
|
# Various character maps used to decrypt kindle info values.
|
||||||
|
# Probably supposed to act as obfuscation
|
||||||
|
charMap2 = "AaZzB0bYyCc1XxDdW2wEeVv3FfUuG4g-TtHh5SsIiR6rJjQq7KkPpL8lOoMm9Nn_"
|
||||||
|
charMap5 = "AzB0bYyCeVvaZ3FfUuG4g-TtHh5SsIiR6rJjQq7KkPpL8lOoMm9Nn_c1XxDdW2wE"
|
||||||
|
# New maps in K4PC 1.9.0
|
||||||
|
testMap1 = "n5Pr6St7Uv8Wx9YzAb0Cd1Ef2Gh3Jk4M"
|
||||||
|
testMap6 = "9YzAb0Cd1Ef2n5Pr6St7Uvh3Jk4M8WxG"
|
||||||
|
testMap8 = "YvaZ3FfUm9Nn_c1XuG4yCAzB0beVg-TtHh5SsIiR6rJjQdW2wEq7KkPpL8lOoMxD"
|
||||||
|
|
||||||
|
class DrmException(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Encode the bytes in data with the characters in map
|
||||||
|
def encode(data, map):
|
||||||
|
result = ""
|
||||||
|
for char in data:
|
||||||
|
value = ord(char)
|
||||||
|
Q = (value ^ 0x80) // len(map)
|
||||||
|
R = value % len(map)
|
||||||
|
result += map[Q]
|
||||||
|
result += map[R]
|
||||||
|
return result
|
||||||
|
|
||||||
|
# Hash the bytes in data and then encode the digest with the characters in map
|
||||||
|
def encodeHash(data,map):
|
||||||
|
return encode(MD5(data),map)
|
||||||
|
|
||||||
|
# Decode the string in data with the characters in map. Returns the decoded bytes
|
||||||
|
def decode(data,map):
|
||||||
|
result = ""
|
||||||
|
for i in range (0,len(data)-1,2):
|
||||||
|
high = map.find(data[i])
|
||||||
|
low = map.find(data[i+1])
|
||||||
|
if (high == -1) or (low == -1) :
|
||||||
|
break
|
||||||
|
value = (((high * len(map)) ^ 0x80) & 0xFF) + low
|
||||||
|
result += pack("B",value)
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
# interface with Windows OS Routines
|
||||||
|
class DataBlob(Structure):
|
||||||
|
_fields_ = [('cbData', c_uint),
|
||||||
|
('pbData', c_void_p)]
|
||||||
|
DataBlob_p = POINTER(DataBlob)
|
||||||
|
|
||||||
|
|
||||||
|
def GetSystemDirectory():
|
||||||
|
GetSystemDirectoryW = kernel32.GetSystemDirectoryW
|
||||||
|
GetSystemDirectoryW.argtypes = [c_wchar_p, c_uint]
|
||||||
|
GetSystemDirectoryW.restype = c_uint
|
||||||
|
def GetSystemDirectory():
|
||||||
|
buffer = create_unicode_buffer(MAX_PATH + 1)
|
||||||
|
GetSystemDirectoryW(buffer, len(buffer))
|
||||||
|
return buffer.value
|
||||||
|
return GetSystemDirectory
|
||||||
|
GetSystemDirectory = GetSystemDirectory()
|
||||||
|
|
||||||
|
def GetVolumeSerialNumber():
|
||||||
|
GetVolumeInformationW = kernel32.GetVolumeInformationW
|
||||||
|
GetVolumeInformationW.argtypes = [c_wchar_p, c_wchar_p, c_uint,
|
||||||
|
POINTER(c_uint), POINTER(c_uint),
|
||||||
|
POINTER(c_uint), c_wchar_p, c_uint]
|
||||||
|
GetVolumeInformationW.restype = c_uint
|
||||||
|
def GetVolumeSerialNumber(path = GetSystemDirectory().split('\\')[0] + '\\'):
|
||||||
|
vsn = c_uint(0)
|
||||||
|
GetVolumeInformationW(path, None, 0, byref(vsn), None, None, None, 0)
|
||||||
|
return str(vsn.value)
|
||||||
|
return GetVolumeSerialNumber
|
||||||
|
GetVolumeSerialNumber = GetVolumeSerialNumber()
|
||||||
|
|
||||||
|
def GetIDString():
|
||||||
|
return GetVolumeSerialNumber()
|
||||||
|
|
||||||
|
def getLastError():
|
||||||
|
GetLastError = kernel32.GetLastError
|
||||||
|
GetLastError.argtypes = None
|
||||||
|
GetLastError.restype = c_uint
|
||||||
|
def getLastError():
|
||||||
|
return GetLastError()
|
||||||
|
return getLastError
|
||||||
|
getLastError = getLastError()
|
||||||
|
|
||||||
|
def GetUserName():
|
||||||
|
GetUserNameW = advapi32.GetUserNameW
|
||||||
|
GetUserNameW.argtypes = [c_wchar_p, POINTER(c_uint)]
|
||||||
|
GetUserNameW.restype = c_uint
|
||||||
|
def GetUserName():
|
||||||
|
buffer = create_unicode_buffer(2)
|
||||||
|
size = c_uint(len(buffer))
|
||||||
|
while not GetUserNameW(buffer, byref(size)):
|
||||||
|
errcd = getLastError()
|
||||||
|
if errcd == 234:
|
||||||
|
# bad wine implementation up through wine 1.3.21
|
||||||
|
return "AlternateUserName"
|
||||||
|
buffer = create_unicode_buffer(len(buffer) * 2)
|
||||||
|
size.value = len(buffer)
|
||||||
|
return buffer.value.encode('utf-16-le')[::2]
|
||||||
|
return GetUserName
|
||||||
|
GetUserName = GetUserName()
|
||||||
|
|
||||||
|
def CryptUnprotectData():
|
||||||
|
_CryptUnprotectData = crypt32.CryptUnprotectData
|
||||||
|
_CryptUnprotectData.argtypes = [DataBlob_p, c_wchar_p, DataBlob_p,
|
||||||
|
c_void_p, c_void_p, c_uint, DataBlob_p]
|
||||||
|
_CryptUnprotectData.restype = c_uint
|
||||||
|
def CryptUnprotectData(indata, entropy, flags):
|
||||||
|
indatab = create_string_buffer(indata)
|
||||||
|
indata = DataBlob(len(indata), cast(indatab, c_void_p))
|
||||||
|
entropyb = create_string_buffer(entropy)
|
||||||
|
entropy = DataBlob(len(entropy), cast(entropyb, c_void_p))
|
||||||
|
outdata = DataBlob()
|
||||||
|
if not _CryptUnprotectData(byref(indata), None, byref(entropy),
|
||||||
|
None, None, flags, byref(outdata)):
|
||||||
|
# raise DrmException("Failed to Unprotect Data")
|
||||||
|
return 'failed'
|
||||||
|
return string_at(outdata.pbData, outdata.cbData)
|
||||||
|
return CryptUnprotectData
|
||||||
|
CryptUnprotectData = CryptUnprotectData()
|
||||||
|
|
||||||
|
|
||||||
|
# Locate all of the kindle-info style files and return as list
|
||||||
|
def getKindleInfoFiles(kInfoFiles):
|
||||||
|
regkey = winreg.OpenKey(winreg.HKEY_CURRENT_USER, "Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Shell Folders\\")
|
||||||
|
path = winreg.QueryValueEx(regkey, 'Local AppData')[0]
|
||||||
|
|
||||||
|
# first look for older kindle-info files
|
||||||
|
kinfopath = path +'\\Amazon\\Kindle For PC\\{AMAwzsaPaaZAzmZzZQzgZCAkZ3AjA_AY}\\kindle.info'
|
||||||
|
if not os.path.isfile(kinfopath):
|
||||||
|
print('No kindle.info files have not been found.')
|
||||||
|
else:
|
||||||
|
kInfoFiles.append(kinfopath)
|
||||||
|
|
||||||
|
# now look for newer (K4PC 1.5.0 and later rainier.2.1.1.kinf file
|
||||||
|
|
||||||
|
kinfopath = path +'\\Amazon\\Kindle For PC\\storage\\rainier.2.1.1.kinf'
|
||||||
|
if not os.path.isfile(kinfopath):
|
||||||
|
print('No K4PC 1.5.X .kinf files have not been found.')
|
||||||
|
else:
|
||||||
|
kInfoFiles.append(kinfopath)
|
||||||
|
|
||||||
|
# now look for even newer (K4PC 1.6.0 and later) rainier.2.1.1.kinf file
|
||||||
|
kinfopath = path +'\\Amazon\\Kindle\\storage\\rainier.2.1.1.kinf'
|
||||||
|
if not os.path.isfile(kinfopath):
|
||||||
|
print('No K4PC 1.6.X .kinf files have not been found.')
|
||||||
|
else:
|
||||||
|
kInfoFiles.append(kinfopath)
|
||||||
|
|
||||||
|
# now look for even newer (K4PC 1.9.0 and later) .kinf2011 file
|
||||||
|
kinfopath = path +'\\Amazon\\Kindle\\storage\\.kinf2011'
|
||||||
|
if not os.path.isfile(kinfopath):
|
||||||
|
print('No K4PC 1.9.X .kinf files have not been found.')
|
||||||
|
else:
|
||||||
|
kInfoFiles.append(kinfopath)
|
||||||
|
|
||||||
|
return kInfoFiles
|
||||||
|
|
||||||
|
|
||||||
|
# determine type of kindle info provided and return a
|
||||||
|
# database of keynames and values
|
||||||
|
def getDBfromFile(kInfoFile):
|
||||||
|
names = ["kindle.account.tokens","kindle.cookie.item","eulaVersionAccepted","login_date","kindle.token.item","login","kindle.key.item","kindle.name.info","kindle.device.info", "MazamaRandomNumber", "max_date", "SIGVERIF"]
|
||||||
|
DB = {}
|
||||||
|
cnt = 0
|
||||||
|
infoReader = open(kInfoFile, 'r')
|
||||||
|
hdr = infoReader.read(1)
|
||||||
|
data = infoReader.read()
|
||||||
|
|
||||||
|
if data.find('{') != -1 :
|
||||||
|
|
||||||
|
# older style kindle-info file
|
||||||
|
items = data.split('{')
|
||||||
|
for item in items:
|
||||||
|
if item != '':
|
||||||
|
keyhash, rawdata = item.split(':')
|
||||||
|
keyname = "unknown"
|
||||||
|
for name in names:
|
||||||
|
if encodeHash(name,charMap2) == keyhash:
|
||||||
|
keyname = name
|
||||||
|
break
|
||||||
|
if keyname == "unknown":
|
||||||
|
keyname = keyhash
|
||||||
|
encryptedValue = decode(rawdata,charMap2)
|
||||||
|
DB[keyname] = CryptUnprotectData(encryptedValue, "", 0)
|
||||||
|
cnt = cnt + 1
|
||||||
|
if cnt == 0:
|
||||||
|
DB = None
|
||||||
|
return DB
|
||||||
|
|
||||||
|
if hdr == '/':
|
||||||
|
# else rainier-2-1-1 .kinf file
|
||||||
|
# the .kinf file uses "/" to separate it into records
|
||||||
|
# so remove the trailing "/" to make it easy to use split
|
||||||
|
data = data[:-1]
|
||||||
|
items = data.split('/')
|
||||||
|
|
||||||
|
# loop through the item records until all are processed
|
||||||
|
while len(items) > 0:
|
||||||
|
|
||||||
|
# get the first item record
|
||||||
|
item = items.pop(0)
|
||||||
|
|
||||||
|
# the first 32 chars of the first record of a group
|
||||||
|
# is the MD5 hash of the key name encoded by charMap5
|
||||||
|
keyhash = item[0:32]
|
||||||
|
|
||||||
|
# the raw keyhash string is used to create entropy for the actual
|
||||||
|
# CryptProtectData Blob that represents that keys contents
|
||||||
|
entropy = SHA1(keyhash)
|
||||||
|
|
||||||
|
# the remainder of the first record when decoded with charMap5
|
||||||
|
# has the ':' split char followed by the string representation
|
||||||
|
# of the number of records that follow
|
||||||
|
# and make up the contents
|
||||||
|
srcnt = decode(item[34:],charMap5)
|
||||||
|
rcnt = int(srcnt)
|
||||||
|
|
||||||
|
# read and store in rcnt records of data
|
||||||
|
# that make up the contents value
|
||||||
|
edlst = []
|
||||||
|
for i in xrange(rcnt):
|
||||||
|
item = items.pop(0)
|
||||||
|
edlst.append(item)
|
||||||
|
|
||||||
|
keyname = "unknown"
|
||||||
|
for name in names:
|
||||||
|
if encodeHash(name,charMap5) == keyhash:
|
||||||
|
keyname = name
|
||||||
|
break
|
||||||
|
if keyname == "unknown":
|
||||||
|
keyname = keyhash
|
||||||
|
# the charMap5 encoded contents data has had a length
|
||||||
|
# of chars (always odd) cut off of the front and moved
|
||||||
|
# to the end to prevent decoding using charMap5 from
|
||||||
|
# working properly, and thereby preventing the ensuing
|
||||||
|
# CryptUnprotectData call from succeeding.
|
||||||
|
|
||||||
|
# The offset into the charMap5 encoded contents seems to be:
|
||||||
|
# len(contents)-largest prime number <= int(len(content)/3)
|
||||||
|
# (in other words split "about" 2/3rds of the way through)
|
||||||
|
|
||||||
|
# move first offsets chars to end to align for decode by charMap5
|
||||||
|
encdata = "".join(edlst)
|
||||||
|
contlen = len(encdata)
|
||||||
|
noffset = contlen - primes(int(contlen/3))[-1]
|
||||||
|
|
||||||
|
# now properly split and recombine
|
||||||
|
# by moving noffset chars from the start of the
|
||||||
|
# string to the end of the string
|
||||||
|
pfx = encdata[0:noffset]
|
||||||
|
encdata = encdata[noffset:]
|
||||||
|
encdata = encdata + pfx
|
||||||
|
|
||||||
|
# decode using Map5 to get the CryptProtect Data
|
||||||
|
encryptedValue = decode(encdata,charMap5)
|
||||||
|
DB[keyname] = CryptUnprotectData(encryptedValue, entropy, 1)
|
||||||
|
cnt = cnt + 1
|
||||||
|
|
||||||
|
if cnt == 0:
|
||||||
|
DB = None
|
||||||
|
return DB
|
||||||
|
|
||||||
|
# else newest .kinf2011 style .kinf file
|
||||||
|
# the .kinf file uses "/" to separate it into records
|
||||||
|
# so remove the trailing "/" to make it easy to use split
|
||||||
|
# need to put back the first char read because it it part
|
||||||
|
# of the added entropy blob
|
||||||
|
data = hdr + data[:-1]
|
||||||
|
items = data.split('/')
|
||||||
|
|
||||||
|
# starts with and encoded and encrypted header blob
|
||||||
|
headerblob = items.pop(0)
|
||||||
|
encryptedValue = decode(headerblob, testMap1)
|
||||||
|
cleartext = UnprotectHeaderData(encryptedValue)
|
||||||
|
# now extract the pieces that form the added entropy
|
||||||
|
pattern = re.compile(r'''\[Version:(\d+)\]\[Build:(\d+)\]\[Cksum:([^\]]+)\]\[Guid:([\{\}a-z0-9\-]+)\]''', re.IGNORECASE)
|
||||||
|
for m in re.finditer(pattern, cleartext):
|
||||||
|
added_entropy = m.group(2) + m.group(4)
|
||||||
|
|
||||||
|
|
||||||
|
# loop through the item records until all are processed
|
||||||
|
while len(items) > 0:
|
||||||
|
|
||||||
|
# get the first item record
|
||||||
|
item = items.pop(0)
|
||||||
|
|
||||||
|
# the first 32 chars of the first record of a group
|
||||||
|
# is the MD5 hash of the key name encoded by charMap5
|
||||||
|
keyhash = item[0:32]
|
||||||
|
|
||||||
|
# the sha1 of raw keyhash string is used to create entropy along
|
||||||
|
# with the added entropy provided above from the headerblob
|
||||||
|
entropy = SHA1(keyhash) + added_entropy
|
||||||
|
|
||||||
|
# the remainder of the first record when decoded with charMap5
|
||||||
|
# has the ':' split char followed by the string representation
|
||||||
|
# of the number of records that follow
|
||||||
|
# and make up the contents
|
||||||
|
srcnt = decode(item[34:],charMap5)
|
||||||
|
rcnt = int(srcnt)
|
||||||
|
|
||||||
|
# read and store in rcnt records of data
|
||||||
|
# that make up the contents value
|
||||||
|
edlst = []
|
||||||
|
for i in xrange(rcnt):
|
||||||
|
item = items.pop(0)
|
||||||
|
edlst.append(item)
|
||||||
|
|
||||||
|
# key names now use the new testMap8 encoding
|
||||||
|
keyname = "unknown"
|
||||||
|
for name in names:
|
||||||
|
if encodeHash(name,testMap8) == keyhash:
|
||||||
|
keyname = name
|
||||||
|
break
|
||||||
|
|
||||||
|
# the testMap8 encoded contents data has had a length
|
||||||
|
# of chars (always odd) cut off of the front and moved
|
||||||
|
# to the end to prevent decoding using testMap8 from
|
||||||
|
# working properly, and thereby preventing the ensuing
|
||||||
|
# CryptUnprotectData call from succeeding.
|
||||||
|
|
||||||
|
# The offset into the testMap8 encoded contents seems to be:
|
||||||
|
# len(contents)-largest prime number <= int(len(content)/3)
|
||||||
|
# (in other words split "about" 2/3rds of the way through)
|
||||||
|
|
||||||
|
# move first offsets chars to end to align for decode by testMap8
|
||||||
|
# by moving noffset chars from the start of the
|
||||||
|
# string to the end of the string
|
||||||
|
encdata = "".join(edlst)
|
||||||
|
contlen = len(encdata)
|
||||||
|
noffset = contlen - primes(int(contlen/3))[-1]
|
||||||
|
pfx = encdata[0:noffset]
|
||||||
|
encdata = encdata[noffset:]
|
||||||
|
encdata = encdata + pfx
|
||||||
|
|
||||||
|
# decode using new testMap8 to get the original CryptProtect Data
|
||||||
|
encryptedValue = decode(encdata,testMap8)
|
||||||
|
cleartext = CryptUnprotectData(encryptedValue, entropy, 1)
|
||||||
|
DB[keyname] = cleartext
|
||||||
|
cnt = cnt + 1
|
||||||
|
|
||||||
|
if cnt == 0:
|
||||||
|
DB = None
|
||||||
|
return DB
|
||||||
270
DeDRM_Windows_Application/DeDRM_WinApp/DeDRM_lib/lib/kgenpids.py
Normal file
270
DeDRM_Windows_Application/DeDRM_WinApp/DeDRM_lib/lib/kgenpids.py
Normal file
@@ -0,0 +1,270 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
from __future__ import with_statement
|
||||||
|
import sys
|
||||||
|
import os, csv
|
||||||
|
import binascii
|
||||||
|
import zlib
|
||||||
|
import re
|
||||||
|
from struct import pack, unpack, unpack_from
|
||||||
|
|
||||||
|
class DrmException(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
global charMap1
|
||||||
|
global charMap3
|
||||||
|
global charMap4
|
||||||
|
|
||||||
|
if 'calibre' in sys.modules:
|
||||||
|
inCalibre = True
|
||||||
|
else:
|
||||||
|
inCalibre = False
|
||||||
|
|
||||||
|
if inCalibre:
|
||||||
|
if sys.platform.startswith('win'):
|
||||||
|
from calibre_plugins.k4mobidedrm.k4pcutils import getKindleInfoFiles, getDBfromFile, GetUserName, GetIDString
|
||||||
|
|
||||||
|
if sys.platform.startswith('darwin'):
|
||||||
|
from calibre_plugins.k4mobidedrm.k4mutils import getKindleInfoFiles, getDBfromFile, GetUserName, GetIDString
|
||||||
|
else:
|
||||||
|
if sys.platform.startswith('win'):
|
||||||
|
from k4pcutils import getKindleInfoFiles, getDBfromFile, GetUserName, GetIDString
|
||||||
|
|
||||||
|
if sys.platform.startswith('darwin'):
|
||||||
|
from k4mutils import getKindleInfoFiles, getDBfromFile, GetUserName, GetIDString
|
||||||
|
|
||||||
|
|
||||||
|
charMap1 = "n5Pr6St7Uv8Wx9YzAb0Cd1Ef2Gh3Jk4M"
|
||||||
|
charMap3 = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
|
||||||
|
charMap4 = "ABCDEFGHIJKLMNPQRSTUVWXYZ123456789"
|
||||||
|
|
||||||
|
# crypto digestroutines
|
||||||
|
import hashlib
|
||||||
|
|
||||||
|
def MD5(message):
|
||||||
|
ctx = hashlib.md5()
|
||||||
|
ctx.update(message)
|
||||||
|
return ctx.digest()
|
||||||
|
|
||||||
|
def SHA1(message):
|
||||||
|
ctx = hashlib.sha1()
|
||||||
|
ctx.update(message)
|
||||||
|
return ctx.digest()
|
||||||
|
|
||||||
|
|
||||||
|
# Encode the bytes in data with the characters in map
|
||||||
|
def encode(data, map):
|
||||||
|
result = ""
|
||||||
|
for char in data:
|
||||||
|
value = ord(char)
|
||||||
|
Q = (value ^ 0x80) // len(map)
|
||||||
|
R = value % len(map)
|
||||||
|
result += map[Q]
|
||||||
|
result += map[R]
|
||||||
|
return result
|
||||||
|
|
||||||
|
# Hash the bytes in data and then encode the digest with the characters in map
|
||||||
|
def encodeHash(data,map):
|
||||||
|
return encode(MD5(data),map)
|
||||||
|
|
||||||
|
# Decode the string in data with the characters in map. Returns the decoded bytes
|
||||||
|
def decode(data,map):
|
||||||
|
result = ""
|
||||||
|
for i in range (0,len(data)-1,2):
|
||||||
|
high = map.find(data[i])
|
||||||
|
low = map.find(data[i+1])
|
||||||
|
if (high == -1) or (low == -1) :
|
||||||
|
break
|
||||||
|
value = (((high * len(map)) ^ 0x80) & 0xFF) + low
|
||||||
|
result += pack("B",value)
|
||||||
|
return result
|
||||||
|
|
||||||
|
#
|
||||||
|
# PID generation routines
|
||||||
|
#
|
||||||
|
|
||||||
|
# Returns two bit at offset from a bit field
|
||||||
|
def getTwoBitsFromBitField(bitField,offset):
|
||||||
|
byteNumber = offset // 4
|
||||||
|
bitPosition = 6 - 2*(offset % 4)
|
||||||
|
return ord(bitField[byteNumber]) >> bitPosition & 3
|
||||||
|
|
||||||
|
# Returns the six bits at offset from a bit field
|
||||||
|
def getSixBitsFromBitField(bitField,offset):
|
||||||
|
offset *= 3
|
||||||
|
value = (getTwoBitsFromBitField(bitField,offset) <<4) + (getTwoBitsFromBitField(bitField,offset+1) << 2) +getTwoBitsFromBitField(bitField,offset+2)
|
||||||
|
return value
|
||||||
|
|
||||||
|
# 8 bits to six bits encoding from hash to generate PID string
|
||||||
|
def encodePID(hash):
|
||||||
|
global charMap3
|
||||||
|
PID = ""
|
||||||
|
for position in range (0,8):
|
||||||
|
PID += charMap3[getSixBitsFromBitField(hash,position)]
|
||||||
|
return PID
|
||||||
|
|
||||||
|
# Encryption table used to generate the device PID
|
||||||
|
def generatePidEncryptionTable() :
|
||||||
|
table = []
|
||||||
|
for counter1 in range (0,0x100):
|
||||||
|
value = counter1
|
||||||
|
for counter2 in range (0,8):
|
||||||
|
if (value & 1 == 0) :
|
||||||
|
value = value >> 1
|
||||||
|
else :
|
||||||
|
value = value >> 1
|
||||||
|
value = value ^ 0xEDB88320
|
||||||
|
table.append(value)
|
||||||
|
return table
|
||||||
|
|
||||||
|
# Seed value used to generate the device PID
|
||||||
|
def generatePidSeed(table,dsn) :
|
||||||
|
value = 0
|
||||||
|
for counter in range (0,4) :
|
||||||
|
index = (ord(dsn[counter]) ^ value) &0xFF
|
||||||
|
value = (value >> 8) ^ table[index]
|
||||||
|
return value
|
||||||
|
|
||||||
|
# Generate the device PID
|
||||||
|
def generateDevicePID(table,dsn,nbRoll):
|
||||||
|
global charMap4
|
||||||
|
seed = generatePidSeed(table,dsn)
|
||||||
|
pidAscii = ""
|
||||||
|
pid = [(seed >>24) &0xFF,(seed >> 16) &0xff,(seed >> 8) &0xFF,(seed) & 0xFF,(seed>>24) & 0xFF,(seed >> 16) &0xff,(seed >> 8) &0xFF,(seed) & 0xFF]
|
||||||
|
index = 0
|
||||||
|
for counter in range (0,nbRoll):
|
||||||
|
pid[index] = pid[index] ^ ord(dsn[counter])
|
||||||
|
index = (index+1) %8
|
||||||
|
for counter in range (0,8):
|
||||||
|
index = ((((pid[counter] >>5) & 3) ^ pid[counter]) & 0x1f) + (pid[counter] >> 7)
|
||||||
|
pidAscii += charMap4[index]
|
||||||
|
return pidAscii
|
||||||
|
|
||||||
|
def crc32(s):
|
||||||
|
return (~binascii.crc32(s,-1))&0xFFFFFFFF
|
||||||
|
|
||||||
|
# convert from 8 digit PID to 10 digit PID with checksum
|
||||||
|
def checksumPid(s):
|
||||||
|
global charMap4
|
||||||
|
crc = crc32(s)
|
||||||
|
crc = crc ^ (crc >> 16)
|
||||||
|
res = s
|
||||||
|
l = len(charMap4)
|
||||||
|
for i in (0,1):
|
||||||
|
b = crc & 0xff
|
||||||
|
pos = (b // l) ^ (b % l)
|
||||||
|
res += charMap4[pos%l]
|
||||||
|
crc >>= 8
|
||||||
|
return res
|
||||||
|
|
||||||
|
|
||||||
|
# old kindle serial number to fixed pid
|
||||||
|
def pidFromSerial(s, l):
|
||||||
|
global charMap4
|
||||||
|
crc = crc32(s)
|
||||||
|
arr1 = [0]*l
|
||||||
|
for i in xrange(len(s)):
|
||||||
|
arr1[i%l] ^= ord(s[i])
|
||||||
|
crc_bytes = [crc >> 24 & 0xff, crc >> 16 & 0xff, crc >> 8 & 0xff, crc & 0xff]
|
||||||
|
for i in xrange(l):
|
||||||
|
arr1[i] ^= crc_bytes[i&3]
|
||||||
|
pid = ""
|
||||||
|
for i in xrange(l):
|
||||||
|
b = arr1[i] & 0xff
|
||||||
|
pid+=charMap4[(b >> 7) + ((b >> 5 & 3) ^ (b & 0x1f))]
|
||||||
|
return pid
|
||||||
|
|
||||||
|
|
||||||
|
# Parse the EXTH header records and use the Kindle serial number to calculate the book pid.
|
||||||
|
def getKindlePid(pidlst, rec209, token, serialnum):
|
||||||
|
# Compute book PID
|
||||||
|
pidHash = SHA1(serialnum+rec209+token)
|
||||||
|
bookPID = encodePID(pidHash)
|
||||||
|
bookPID = checksumPid(bookPID)
|
||||||
|
pidlst.append(bookPID)
|
||||||
|
|
||||||
|
# compute fixed pid for old pre 2.5 firmware update pid as well
|
||||||
|
bookPID = pidFromSerial(serialnum, 7) + "*"
|
||||||
|
bookPID = checksumPid(bookPID)
|
||||||
|
pidlst.append(bookPID)
|
||||||
|
|
||||||
|
return pidlst
|
||||||
|
|
||||||
|
|
||||||
|
# parse the Kindleinfo file to calculate the book pid.
|
||||||
|
|
||||||
|
keynames = ["kindle.account.tokens","kindle.cookie.item","eulaVersionAccepted","login_date","kindle.token.item","login","kindle.key.item","kindle.name.info","kindle.device.info", "MazamaRandomNumber"]
|
||||||
|
|
||||||
|
def getK4Pids(pidlst, rec209, token, kInfoFile):
|
||||||
|
global charMap1
|
||||||
|
kindleDatabase = None
|
||||||
|
try:
|
||||||
|
kindleDatabase = getDBfromFile(kInfoFile)
|
||||||
|
except Exception, message:
|
||||||
|
print(message)
|
||||||
|
kindleDatabase = None
|
||||||
|
pass
|
||||||
|
|
||||||
|
if kindleDatabase == None :
|
||||||
|
return pidlst
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Get the Mazama Random number
|
||||||
|
MazamaRandomNumber = kindleDatabase["MazamaRandomNumber"]
|
||||||
|
|
||||||
|
# Get the kindle account token
|
||||||
|
kindleAccountToken = kindleDatabase["kindle.account.tokens"]
|
||||||
|
except KeyError:
|
||||||
|
print "Keys not found in " + kInfoFile
|
||||||
|
return pidlst
|
||||||
|
|
||||||
|
# Get the ID string used
|
||||||
|
encodedIDString = encodeHash(GetIDString(),charMap1)
|
||||||
|
|
||||||
|
# Get the current user name
|
||||||
|
encodedUsername = encodeHash(GetUserName(),charMap1)
|
||||||
|
|
||||||
|
# concat, hash and encode to calculate the DSN
|
||||||
|
DSN = encode(SHA1(MazamaRandomNumber+encodedIDString+encodedUsername),charMap1)
|
||||||
|
|
||||||
|
# Compute the device PID (for which I can tell, is used for nothing).
|
||||||
|
table = generatePidEncryptionTable()
|
||||||
|
devicePID = generateDevicePID(table,DSN,4)
|
||||||
|
devicePID = checksumPid(devicePID)
|
||||||
|
pidlst.append(devicePID)
|
||||||
|
|
||||||
|
# Compute book PIDs
|
||||||
|
|
||||||
|
# book pid
|
||||||
|
pidHash = SHA1(DSN+kindleAccountToken+rec209+token)
|
||||||
|
bookPID = encodePID(pidHash)
|
||||||
|
bookPID = checksumPid(bookPID)
|
||||||
|
pidlst.append(bookPID)
|
||||||
|
|
||||||
|
# variant 1
|
||||||
|
pidHash = SHA1(kindleAccountToken+rec209+token)
|
||||||
|
bookPID = encodePID(pidHash)
|
||||||
|
bookPID = checksumPid(bookPID)
|
||||||
|
pidlst.append(bookPID)
|
||||||
|
|
||||||
|
# variant 2
|
||||||
|
pidHash = SHA1(DSN+rec209+token)
|
||||||
|
bookPID = encodePID(pidHash)
|
||||||
|
bookPID = checksumPid(bookPID)
|
||||||
|
pidlst.append(bookPID)
|
||||||
|
|
||||||
|
return pidlst
|
||||||
|
|
||||||
|
def getPidList(md1, md2, k4, pids, serials, kInfoFiles):
|
||||||
|
pidlst = []
|
||||||
|
if kInfoFiles is None:
|
||||||
|
kInfoFiles = []
|
||||||
|
if k4:
|
||||||
|
kInfoFiles = getKindleInfoFiles(kInfoFiles)
|
||||||
|
for infoFile in kInfoFiles:
|
||||||
|
pidlst = getK4Pids(pidlst, md1, md2, infoFile)
|
||||||
|
for serialnum in serials:
|
||||||
|
pidlst = getKindlePid(pidlst, md1, md2, serialnum)
|
||||||
|
for pid in pids:
|
||||||
|
pidlst.append(pid)
|
||||||
|
return pidlst
|
||||||
@@ -44,8 +44,20 @@
|
|||||||
# 0.22 - revised structure to hold MobiBook as a class to allow an extended interface
|
# 0.22 - revised structure to hold MobiBook as a class to allow an extended interface
|
||||||
# 0.23 - fixed problem with older files with no EXTH section
|
# 0.23 - fixed problem with older files with no EXTH section
|
||||||
# 0.24 - add support for type 1 encryption and 'TEXtREAd' books as well
|
# 0.24 - add support for type 1 encryption and 'TEXtREAd' books as well
|
||||||
|
# 0.25 - Fixed support for 'BOOKMOBI' type 1 encryption
|
||||||
|
# 0.26 - Now enables Text-To-Speech flag and sets clipping limit to 100%
|
||||||
|
# 0.27 - Correct pid metadata token generation to match that used by skindle (Thank You Bart!)
|
||||||
|
# 0.28 - slight additional changes to metadata token generation (None -> '')
|
||||||
|
# 0.29 - It seems that the ideas about when multibyte trailing characters were
|
||||||
|
# included in the encryption were wrong. They are for DOC compressed
|
||||||
|
# files, but they are not for HUFF/CDIC compress files!
|
||||||
|
# 0.30 - Modified interface slightly to work better with new calibre plugin style
|
||||||
|
# 0.31 - The multibyte encrytion info is true for version 7 files too.
|
||||||
|
# 0.32 - Added support for "Print Replica" Kindle ebooks
|
||||||
|
# 0.33 - Performance improvements for large files (concatenation)
|
||||||
|
# 0.34 - Performance improvements in decryption (libalfcrypto)
|
||||||
|
|
||||||
__version__ = '0.24'
|
__version__ = '0.34'
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
@@ -62,6 +74,7 @@ sys.stdout=Unbuffered(sys.stdout)
|
|||||||
import os
|
import os
|
||||||
import struct
|
import struct
|
||||||
import binascii
|
import binascii
|
||||||
|
from alfcrypto import Pukall_Cipher
|
||||||
|
|
||||||
class DrmException(Exception):
|
class DrmException(Exception):
|
||||||
pass
|
pass
|
||||||
@@ -73,36 +86,37 @@ class DrmException(Exception):
|
|||||||
|
|
||||||
# Implementation of Pukall Cipher 1
|
# Implementation of Pukall Cipher 1
|
||||||
def PC1(key, src, decryption=True):
|
def PC1(key, src, decryption=True):
|
||||||
sum1 = 0;
|
return Pukall_Cipher().PC1(key,src,decryption)
|
||||||
sum2 = 0;
|
# sum1 = 0;
|
||||||
keyXorVal = 0;
|
# sum2 = 0;
|
||||||
if len(key)!=16:
|
# keyXorVal = 0;
|
||||||
print "Bad key length!"
|
# if len(key)!=16:
|
||||||
return None
|
# print "Bad key length!"
|
||||||
wkey = []
|
# return None
|
||||||
for i in xrange(8):
|
# wkey = []
|
||||||
wkey.append(ord(key[i*2])<<8 | ord(key[i*2+1]))
|
# for i in xrange(8):
|
||||||
dst = ""
|
# wkey.append(ord(key[i*2])<<8 | ord(key[i*2+1]))
|
||||||
for i in xrange(len(src)):
|
# dst = ""
|
||||||
temp1 = 0;
|
# for i in xrange(len(src)):
|
||||||
byteXorVal = 0;
|
# temp1 = 0;
|
||||||
for j in xrange(8):
|
# byteXorVal = 0;
|
||||||
temp1 ^= wkey[j]
|
# for j in xrange(8):
|
||||||
sum2 = (sum2+j)*20021 + sum1
|
# temp1 ^= wkey[j]
|
||||||
sum1 = (temp1*346)&0xFFFF
|
# sum2 = (sum2+j)*20021 + sum1
|
||||||
sum2 = (sum2+sum1)&0xFFFF
|
# sum1 = (temp1*346)&0xFFFF
|
||||||
temp1 = (temp1*20021+1)&0xFFFF
|
# sum2 = (sum2+sum1)&0xFFFF
|
||||||
byteXorVal ^= temp1 ^ sum2
|
# temp1 = (temp1*20021+1)&0xFFFF
|
||||||
curByte = ord(src[i])
|
# byteXorVal ^= temp1 ^ sum2
|
||||||
if not decryption:
|
# curByte = ord(src[i])
|
||||||
keyXorVal = curByte * 257;
|
# if not decryption:
|
||||||
curByte = ((curByte ^ (byteXorVal >> 8)) ^ byteXorVal) & 0xFF
|
# keyXorVal = curByte * 257;
|
||||||
if decryption:
|
# curByte = ((curByte ^ (byteXorVal >> 8)) ^ byteXorVal) & 0xFF
|
||||||
keyXorVal = curByte * 257;
|
# if decryption:
|
||||||
for j in xrange(8):
|
# keyXorVal = curByte * 257;
|
||||||
wkey[j] ^= keyXorVal;
|
# for j in xrange(8):
|
||||||
dst+=chr(curByte)
|
# wkey[j] ^= keyXorVal;
|
||||||
return dst
|
# dst+=chr(curByte)
|
||||||
|
# return dst
|
||||||
|
|
||||||
def checksumPid(s):
|
def checksumPid(s):
|
||||||
letters = "ABCDEFGHIJKLMNPQRSTUVWXYZ123456789"
|
letters = "ABCDEFGHIJKLMNPQRSTUVWXYZ123456789"
|
||||||
@@ -154,8 +168,12 @@ class MobiBook:
|
|||||||
return self.data_file[off:endoff]
|
return self.data_file[off:endoff]
|
||||||
|
|
||||||
def __init__(self, infile):
|
def __init__(self, infile):
|
||||||
|
print ('MobiDeDrm v%(__version__)s. '
|
||||||
|
'Copyright 2008-2011 The Dark Reverser et al.' % globals())
|
||||||
|
|
||||||
# initial sanity check on file
|
# initial sanity check on file
|
||||||
self.data_file = file(infile, 'rb').read()
|
self.data_file = file(infile, 'rb').read()
|
||||||
|
self.mobi_data = ''
|
||||||
self.header = self.data_file[0:78]
|
self.header = self.data_file[0:78]
|
||||||
if self.header[0x3C:0x3C+8] != 'BOOKMOBI' and self.header[0x3C:0x3C+8] != 'TEXtREAd':
|
if self.header[0x3C:0x3C+8] != 'BOOKMOBI' and self.header[0x3C:0x3C+8] != 'TEXtREAd':
|
||||||
raise DrmException("invalid file format")
|
raise DrmException("invalid file format")
|
||||||
@@ -173,6 +191,7 @@ class MobiBook:
|
|||||||
# parse information from section 0
|
# parse information from section 0
|
||||||
self.sect = self.loadSection(0)
|
self.sect = self.loadSection(0)
|
||||||
self.records, = struct.unpack('>H', self.sect[0x8:0x8+2])
|
self.records, = struct.unpack('>H', self.sect[0x8:0x8+2])
|
||||||
|
self.compression, = struct.unpack('>H', self.sect[0x0:0x0+2])
|
||||||
|
|
||||||
if self.magic == 'TEXtREAd':
|
if self.magic == 'TEXtREAd':
|
||||||
print "Book has format: ", self.magic
|
print "Book has format: ", self.magic
|
||||||
@@ -182,14 +201,15 @@ class MobiBook:
|
|||||||
self.meta_array = {}
|
self.meta_array = {}
|
||||||
return
|
return
|
||||||
self.mobi_length, = struct.unpack('>L',self.sect[0x14:0x18])
|
self.mobi_length, = struct.unpack('>L',self.sect[0x14:0x18])
|
||||||
|
self.mobi_codepage, = struct.unpack('>L',self.sect[0x1c:0x20])
|
||||||
self.mobi_version, = struct.unpack('>L',self.sect[0x68:0x6C])
|
self.mobi_version, = struct.unpack('>L',self.sect[0x68:0x6C])
|
||||||
print "MOBI header version = %d, length = %d" %(self.mobi_version, self.mobi_length)
|
print "MOBI header version = %d, length = %d" %(self.mobi_version, self.mobi_length)
|
||||||
self.extra_data_flags = 0
|
self.extra_data_flags = 0
|
||||||
if (self.mobi_length >= 0xE4) and (self.mobi_version >= 5):
|
if (self.mobi_length >= 0xE4) and (self.mobi_version >= 5):
|
||||||
self.extra_data_flags, = struct.unpack('>H', self.sect[0xF2:0xF4])
|
self.extra_data_flags, = struct.unpack('>H', self.sect[0xF2:0xF4])
|
||||||
print "Extra Data Flags = %d" % self.extra_data_flags
|
print "Extra Data Flags = %d" % self.extra_data_flags
|
||||||
if self.mobi_version < 7:
|
if (self.compression != 17480):
|
||||||
# multibyte utf8 data is included in the encryption for mobi_version 6 and below
|
# multibyte utf8 data is included in the encryption for PalmDoc compression
|
||||||
# so clear that byte so that we leave it to be decrypted.
|
# so clear that byte so that we leave it to be decrypted.
|
||||||
self.extra_data_flags &= 0xFFFE
|
self.extra_data_flags &= 0xFFFE
|
||||||
|
|
||||||
@@ -207,12 +227,25 @@ class MobiBook:
|
|||||||
type, size = struct.unpack('>II', exth[pos: pos + 8])
|
type, size = struct.unpack('>II', exth[pos: pos + 8])
|
||||||
content = exth[pos + 8: pos + size]
|
content = exth[pos + 8: pos + size]
|
||||||
self.meta_array[type] = content
|
self.meta_array[type] = content
|
||||||
|
# reset the text to speech flag and clipping limit, if present
|
||||||
|
if type == 401 and size == 9:
|
||||||
|
# set clipping limit to 100%
|
||||||
|
self.patchSection(0, "\144", 16 + self.mobi_length + pos + 8)
|
||||||
|
elif type == 404 and size == 9:
|
||||||
|
# make sure text to speech is enabled
|
||||||
|
self.patchSection(0, "\0", 16 + self.mobi_length + pos + 8)
|
||||||
|
# print type, size, content, content.encode('hex')
|
||||||
pos += size
|
pos += size
|
||||||
except:
|
except:
|
||||||
self.meta_array = {}
|
self.meta_array = {}
|
||||||
pass
|
pass
|
||||||
|
self.print_replica = False
|
||||||
|
|
||||||
def getBookTitle(self):
|
def getBookTitle(self):
|
||||||
|
codec_map = {
|
||||||
|
1252 : 'windows-1252',
|
||||||
|
65001 : 'utf-8',
|
||||||
|
}
|
||||||
title = ''
|
title = ''
|
||||||
if 503 in self.meta_array:
|
if 503 in self.meta_array:
|
||||||
title = self.meta_array[503]
|
title = self.meta_array[503]
|
||||||
@@ -223,21 +256,24 @@ class MobiBook:
|
|||||||
if title == '':
|
if title == '':
|
||||||
title = self.header[:32]
|
title = self.header[:32]
|
||||||
title = title.split("\0")[0]
|
title = title.split("\0")[0]
|
||||||
return title
|
codec = 'windows-1252'
|
||||||
|
if self.mobi_codepage in codec_map.keys():
|
||||||
|
codec = codec_map[self.mobi_codepage]
|
||||||
|
return unicode(title, codec).encode('utf-8')
|
||||||
|
|
||||||
def getPIDMetaInfo(self):
|
def getPIDMetaInfo(self):
|
||||||
rec209 = None
|
rec209 = ''
|
||||||
token = None
|
token = ''
|
||||||
if 209 in self.meta_array:
|
if 209 in self.meta_array:
|
||||||
rec209 = self.meta_array[209]
|
rec209 = self.meta_array[209]
|
||||||
data = rec209
|
data = rec209
|
||||||
# Parse the 209 data to find the the exth record with the token data.
|
# The 209 data comes in five byte groups. Interpret the last four bytes
|
||||||
# The last character of the 209 data points to the record with the token.
|
# of each group as a big endian unsigned integer to get a key value
|
||||||
# Always 208 from my experience, but I'll leave the logic in case that changes.
|
# if that key exists in the meta_array, append its contents to the token
|
||||||
for i in xrange(len(data)):
|
for i in xrange(0,len(data),5):
|
||||||
if ord(data[i]) != 0:
|
val, = struct.unpack('>I',data[i+1:i+5])
|
||||||
if self.meta_array[ord(data[i])] != None:
|
sval = self.meta_array.get(val,'')
|
||||||
token = self.meta_array[ord(data[i])]
|
token += sval
|
||||||
return rec209, token
|
return rec209, token
|
||||||
|
|
||||||
def patch(self, off, new):
|
def patch(self, off, new):
|
||||||
@@ -285,15 +321,29 @@ class MobiBook:
|
|||||||
break
|
break
|
||||||
return [found_key,pid]
|
return [found_key,pid]
|
||||||
|
|
||||||
|
def getMobiFile(self, outpath):
|
||||||
|
file(outpath,'wb').write(self.mobi_data)
|
||||||
|
|
||||||
|
def getPrintReplica(self):
|
||||||
|
return self.print_replica
|
||||||
|
|
||||||
def processBook(self, pidlist):
|
def processBook(self, pidlist):
|
||||||
crypto_type, = struct.unpack('>H', self.sect[0xC:0xC+2])
|
crypto_type, = struct.unpack('>H', self.sect[0xC:0xC+2])
|
||||||
print 'Crypto Type is: ', crypto_type
|
print 'Crypto Type is: ', crypto_type
|
||||||
self.crypto_type = crypto_type
|
self.crypto_type = crypto_type
|
||||||
if crypto_type == 0:
|
if crypto_type == 0:
|
||||||
print "This book is not encrypted."
|
print "This book is not encrypted."
|
||||||
return self.data_file
|
# we must still check for Print Replica
|
||||||
|
self.print_replica = (self.loadSection(1)[0:4] == '%MOP')
|
||||||
|
self.mobi_data = self.data_file
|
||||||
|
return
|
||||||
if crypto_type != 2 and crypto_type != 1:
|
if crypto_type != 2 and crypto_type != 1:
|
||||||
raise DrmException("Cannot decode unknown Mobipocket encryption type %d" % crypto_type)
|
raise DrmException("Cannot decode unknown Mobipocket encryption type %d" % crypto_type)
|
||||||
|
if 406 in self.meta_array:
|
||||||
|
data406 = self.meta_array[406]
|
||||||
|
val406, = struct.unpack('>Q',data406)
|
||||||
|
if val406 != 0:
|
||||||
|
raise DrmException("Cannot decode library or rented ebooks.")
|
||||||
|
|
||||||
goodpids = []
|
goodpids = []
|
||||||
for pid in pidlist:
|
for pid in pidlist:
|
||||||
@@ -308,8 +358,10 @@ class MobiBook:
|
|||||||
t1_keyvec = "QDCVEPMU675RUBSZ"
|
t1_keyvec = "QDCVEPMU675RUBSZ"
|
||||||
if self.magic == 'TEXtREAd':
|
if self.magic == 'TEXtREAd':
|
||||||
bookkey_data = self.sect[0x0E:0x0E+16]
|
bookkey_data = self.sect[0x0E:0x0E+16]
|
||||||
else:
|
elif self.mobi_version < 0:
|
||||||
bookkey_data = self.sect[0x90:0x90+16]
|
bookkey_data = self.sect[0x90:0x90+16]
|
||||||
|
else:
|
||||||
|
bookkey_data = self.sect[self.mobi_length+16:self.mobi_length+32]
|
||||||
pid = "00000000"
|
pid = "00000000"
|
||||||
found_key = PC1(t1_keyvec, bookkey_data)
|
found_key = PC1(t1_keyvec, bookkey_data)
|
||||||
else :
|
else :
|
||||||
@@ -335,46 +387,56 @@ class MobiBook:
|
|||||||
|
|
||||||
# decrypt sections
|
# decrypt sections
|
||||||
print "Decrypting. Please wait . . .",
|
print "Decrypting. Please wait . . .",
|
||||||
new_data = self.data_file[:self.sections[1][0]]
|
mobidataList = []
|
||||||
|
mobidataList.append(self.data_file[:self.sections[1][0]])
|
||||||
for i in xrange(1, self.records+1):
|
for i in xrange(1, self.records+1):
|
||||||
data = self.loadSection(i)
|
data = self.loadSection(i)
|
||||||
extra_size = getSizeOfTrailingDataEntries(data, len(data), self.extra_data_flags)
|
extra_size = getSizeOfTrailingDataEntries(data, len(data), self.extra_data_flags)
|
||||||
if i%100 == 0:
|
if i%100 == 0:
|
||||||
print ".",
|
print ".",
|
||||||
# print "record %d, extra_size %d" %(i,extra_size)
|
# print "record %d, extra_size %d" %(i,extra_size)
|
||||||
new_data += PC1(found_key, data[0:len(data) - extra_size])
|
decoded_data = PC1(found_key, data[0:len(data) - extra_size])
|
||||||
|
if i==1:
|
||||||
|
self.print_replica = (decoded_data[0:4] == '%MOP')
|
||||||
|
mobidataList.append(decoded_data)
|
||||||
if extra_size > 0:
|
if extra_size > 0:
|
||||||
new_data += data[-extra_size:]
|
mobidataList.append(data[-extra_size:])
|
||||||
if self.num_sections > self.records+1:
|
if self.num_sections > self.records+1:
|
||||||
new_data += self.data_file[self.sections[self.records+1][0]:]
|
mobidataList.append(self.data_file[self.sections[self.records+1][0]:])
|
||||||
self.data_file = new_data
|
self.mobi_data = "".join(mobidataList)
|
||||||
print "done"
|
print "done"
|
||||||
return self.data_file
|
return
|
||||||
|
|
||||||
def getUnencryptedBook(infile,pid):
|
def getUnencryptedBook(infile,pid):
|
||||||
if not os.path.isfile(infile):
|
if not os.path.isfile(infile):
|
||||||
raise DrmException('Input File Not Found')
|
raise DrmException('Input File Not Found')
|
||||||
book = MobiBook(infile)
|
book = MobiBook(infile)
|
||||||
return book.processBook([pid])
|
book.processBook([pid])
|
||||||
|
return book.mobi_data
|
||||||
|
|
||||||
def getUnencryptedBookWithList(infile,pidlist):
|
def getUnencryptedBookWithList(infile,pidlist):
|
||||||
if not os.path.isfile(infile):
|
if not os.path.isfile(infile):
|
||||||
raise DrmException('Input File Not Found')
|
raise DrmException('Input File Not Found')
|
||||||
book = MobiBook(infile)
|
book = MobiBook(infile)
|
||||||
return book.processBook(pidlist)
|
book.processBook(pidlist)
|
||||||
|
return book.mobi_data
|
||||||
|
|
||||||
|
|
||||||
def main(argv=sys.argv):
|
def main(argv=sys.argv):
|
||||||
print ('MobiDeDrm v%(__version__)s. '
|
print ('MobiDeDrm v%(__version__)s. '
|
||||||
'Copyright 2008-2010 The Dark Reverser.' % globals())
|
'Copyright 2008-2011 The Dark Reverser et al.' % globals())
|
||||||
if len(argv)<4:
|
if len(argv)<3 or len(argv)>4:
|
||||||
print "Removes protection from Mobipocket books"
|
print "Removes protection from Kindle/Mobipocket and Kindle/Print Replica ebooks"
|
||||||
print "Usage:"
|
print "Usage:"
|
||||||
print " %s <infile> <outfile> <Comma separated list of PIDs to try>" % sys.argv[0]
|
print " %s <infile> <outfile> [<Comma separated list of PIDs to try>]" % sys.argv[0]
|
||||||
return 1
|
return 1
|
||||||
else:
|
else:
|
||||||
infile = argv[1]
|
infile = argv[1]
|
||||||
outfile = argv[2]
|
outfile = argv[2]
|
||||||
pidlist = argv[3].split(',')
|
if len(argv) is 4:
|
||||||
|
pidlist = argv[3].split(',')
|
||||||
|
else:
|
||||||
|
pidlist = {}
|
||||||
try:
|
try:
|
||||||
stripped_file = getUnencryptedBookWithList(infile, pidlist)
|
stripped_file = getUnencryptedBookWithList(infile, pidlist)
|
||||||
file(outfile, 'wb').write(stripped_file)
|
file(outfile, 'wb').write(stripped_file)
|
||||||
@@ -87,4 +87,3 @@ def load_libcrypto():
|
|||||||
return ''.join(result)
|
return ''.join(result)
|
||||||
|
|
||||||
return DES
|
return DES
|
||||||
|
|
||||||
@@ -0,0 +1,68 @@
|
|||||||
|
# A simple implementation of pbkdf2 using stock python modules. See RFC2898
|
||||||
|
# for details. Basically, it derives a key from a password and salt.
|
||||||
|
|
||||||
|
# Copyright 2004 Matt Johnston <matt @ ucc asn au>
|
||||||
|
# Copyright 2009 Daniel Holth <dholth@fastmail.fm>
|
||||||
|
# This code may be freely used and modified for any purpose.
|
||||||
|
|
||||||
|
# Revision history
|
||||||
|
# v0.1 October 2004 - Initial release
|
||||||
|
# v0.2 8 March 2007 - Make usable with hashlib in Python 2.5 and use
|
||||||
|
# v0.3 "" the correct digest_size rather than always 20
|
||||||
|
# v0.4 Oct 2009 - Rescue from chandler svn, test and optimize.
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import hmac
|
||||||
|
from struct import pack
|
||||||
|
try:
|
||||||
|
# only in python 2.5
|
||||||
|
import hashlib
|
||||||
|
sha = hashlib.sha1
|
||||||
|
md5 = hashlib.md5
|
||||||
|
sha256 = hashlib.sha256
|
||||||
|
except ImportError: # pragma: NO COVERAGE
|
||||||
|
# fallback
|
||||||
|
import sha
|
||||||
|
import md5
|
||||||
|
|
||||||
|
# this is what you want to call.
|
||||||
|
def pbkdf2( password, salt, itercount, keylen, hashfn = sha ):
|
||||||
|
try:
|
||||||
|
# depending whether the hashfn is from hashlib or sha/md5
|
||||||
|
digest_size = hashfn().digest_size
|
||||||
|
except TypeError: # pragma: NO COVERAGE
|
||||||
|
digest_size = hashfn.digest_size
|
||||||
|
# l - number of output blocks to produce
|
||||||
|
l = keylen / digest_size
|
||||||
|
if keylen % digest_size != 0:
|
||||||
|
l += 1
|
||||||
|
|
||||||
|
h = hmac.new( password, None, hashfn )
|
||||||
|
|
||||||
|
T = ""
|
||||||
|
for i in range(1, l+1):
|
||||||
|
T += pbkdf2_F( h, salt, itercount, i )
|
||||||
|
|
||||||
|
return T[0: keylen]
|
||||||
|
|
||||||
|
def xorstr( a, b ):
|
||||||
|
if len(a) != len(b):
|
||||||
|
raise ValueError("xorstr(): lengths differ")
|
||||||
|
return ''.join((chr(ord(x)^ord(y)) for x, y in zip(a, b)))
|
||||||
|
|
||||||
|
def prf( h, data ):
|
||||||
|
hm = h.copy()
|
||||||
|
hm.update( data )
|
||||||
|
return hm.digest()
|
||||||
|
|
||||||
|
# Helper as per the spec. h is a hmac which has been created seeded with the
|
||||||
|
# password, it will be copy()ed and not modified.
|
||||||
|
def pbkdf2_F( h, salt, itercount, blocknum ):
|
||||||
|
U = prf( h, salt + pack('>i',blocknum ) )
|
||||||
|
T = U
|
||||||
|
|
||||||
|
for i in range(2, itercount+1):
|
||||||
|
U = prf( h, U )
|
||||||
|
T = xorstr( T, U )
|
||||||
|
|
||||||
|
return T
|
||||||
@@ -28,4 +28,3 @@ def load_pycrypto():
|
|||||||
i += 8
|
i += 8
|
||||||
return ''.join(result)
|
return ''.join(result)
|
||||||
return DES
|
return DES
|
||||||
|
|
||||||
@@ -0,0 +1,220 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
|
||||||
|
import sys
|
||||||
|
|
||||||
|
ECB = 0
|
||||||
|
CBC = 1
|
||||||
|
class Des(object):
|
||||||
|
__pc1 = [56, 48, 40, 32, 24, 16, 8, 0, 57, 49, 41, 33, 25, 17,
|
||||||
|
9, 1, 58, 50, 42, 34, 26, 18, 10, 2, 59, 51, 43, 35,
|
||||||
|
62, 54, 46, 38, 30, 22, 14, 6, 61, 53, 45, 37, 29, 21,
|
||||||
|
13, 5, 60, 52, 44, 36, 28, 20, 12, 4, 27, 19, 11, 3]
|
||||||
|
__left_rotations = [1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1]
|
||||||
|
__pc2 = [13, 16, 10, 23, 0, 4,2, 27, 14, 5, 20, 9,
|
||||||
|
22, 18, 11, 3, 25, 7, 15, 6, 26, 19, 12, 1,
|
||||||
|
40, 51, 30, 36, 46, 54, 29, 39, 50, 44, 32, 47,
|
||||||
|
43, 48, 38, 55, 33, 52, 45, 41, 49, 35, 28, 31]
|
||||||
|
__ip = [57, 49, 41, 33, 25, 17, 9, 1, 59, 51, 43, 35, 27, 19, 11, 3,
|
||||||
|
61, 53, 45, 37, 29, 21, 13, 5, 63, 55, 47, 39, 31, 23, 15, 7,
|
||||||
|
56, 48, 40, 32, 24, 16, 8, 0, 58, 50, 42, 34, 26, 18, 10, 2,
|
||||||
|
60, 52, 44, 36, 28, 20, 12, 4, 62, 54, 46, 38, 30, 22, 14, 6]
|
||||||
|
__expansion_table = [31, 0, 1, 2, 3, 4, 3, 4, 5, 6, 7, 8,
|
||||||
|
7, 8, 9, 10, 11, 12,11, 12, 13, 14, 15, 16,
|
||||||
|
15, 16, 17, 18, 19, 20,19, 20, 21, 22, 23, 24,
|
||||||
|
23, 24, 25, 26, 27, 28,27, 28, 29, 30, 31, 0]
|
||||||
|
__sbox = [[14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7,
|
||||||
|
0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8,
|
||||||
|
4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0,
|
||||||
|
15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13],
|
||||||
|
[15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10,
|
||||||
|
3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5,
|
||||||
|
0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15,
|
||||||
|
13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9],
|
||||||
|
[10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8,
|
||||||
|
13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1,
|
||||||
|
13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7,
|
||||||
|
1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12],
|
||||||
|
[7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15,
|
||||||
|
13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9,
|
||||||
|
10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4,
|
||||||
|
3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14],
|
||||||
|
[2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9,
|
||||||
|
14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6,
|
||||||
|
4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14,
|
||||||
|
11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3],
|
||||||
|
[12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11,
|
||||||
|
10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8,
|
||||||
|
9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6,
|
||||||
|
4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13],
|
||||||
|
[4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1,
|
||||||
|
13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6,
|
||||||
|
1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2,
|
||||||
|
6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12],
|
||||||
|
[13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7,
|
||||||
|
1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2,
|
||||||
|
7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8,
|
||||||
|
2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11],]
|
||||||
|
__p = [15, 6, 19, 20, 28, 11,27, 16, 0, 14, 22, 25,
|
||||||
|
4, 17, 30, 9, 1, 7,23,13, 31, 26, 2, 8,18, 12, 29, 5, 21, 10,3, 24]
|
||||||
|
__fp = [39, 7, 47, 15, 55, 23, 63, 31,38, 6, 46, 14, 54, 22, 62, 30,
|
||||||
|
37, 5, 45, 13, 53, 21, 61, 29,36, 4, 44, 12, 52, 20, 60, 28,
|
||||||
|
35, 3, 43, 11, 51, 19, 59, 27,34, 2, 42, 10, 50, 18, 58, 26,
|
||||||
|
33, 1, 41, 9, 49, 17, 57, 25,32, 0, 40, 8, 48, 16, 56, 24]
|
||||||
|
# Type of crypting being done
|
||||||
|
ENCRYPT = 0x00
|
||||||
|
DECRYPT = 0x01
|
||||||
|
def __init__(self, key, mode=ECB, IV=None):
|
||||||
|
if len(key) != 8:
|
||||||
|
raise ValueError("Invalid DES key size. Key must be exactly 8 bytes long.")
|
||||||
|
self.block_size = 8
|
||||||
|
self.key_size = 8
|
||||||
|
self.__padding = ''
|
||||||
|
self.setMode(mode)
|
||||||
|
if IV:
|
||||||
|
self.setIV(IV)
|
||||||
|
self.L = []
|
||||||
|
self.R = []
|
||||||
|
self.Kn = [ [0] * 48 ] * 16 # 16 48-bit keys (K1 - K16)
|
||||||
|
self.final = []
|
||||||
|
self.setKey(key)
|
||||||
|
def getKey(self):
|
||||||
|
return self.__key
|
||||||
|
def setKey(self, key):
|
||||||
|
self.__key = key
|
||||||
|
self.__create_sub_keys()
|
||||||
|
def getMode(self):
|
||||||
|
return self.__mode
|
||||||
|
def setMode(self, mode):
|
||||||
|
self.__mode = mode
|
||||||
|
def getIV(self):
|
||||||
|
return self.__iv
|
||||||
|
def setIV(self, IV):
|
||||||
|
if not IV or len(IV) != self.block_size:
|
||||||
|
raise ValueError("Invalid Initial Value (IV), must be a multiple of " + str(self.block_size) + " bytes")
|
||||||
|
self.__iv = IV
|
||||||
|
def getPadding(self):
|
||||||
|
return self.__padding
|
||||||
|
def __String_to_BitList(self, data):
|
||||||
|
l = len(data) * 8
|
||||||
|
result = [0] * l
|
||||||
|
pos = 0
|
||||||
|
for c in data:
|
||||||
|
i = 7
|
||||||
|
ch = ord(c)
|
||||||
|
while i >= 0:
|
||||||
|
if ch & (1 << i) != 0:
|
||||||
|
result[pos] = 1
|
||||||
|
else:
|
||||||
|
result[pos] = 0
|
||||||
|
pos += 1
|
||||||
|
i -= 1
|
||||||
|
return result
|
||||||
|
def __BitList_to_String(self, data):
|
||||||
|
result = ''
|
||||||
|
pos = 0
|
||||||
|
c = 0
|
||||||
|
while pos < len(data):
|
||||||
|
c += data[pos] << (7 - (pos % 8))
|
||||||
|
if (pos % 8) == 7:
|
||||||
|
result += chr(c)
|
||||||
|
c = 0
|
||||||
|
pos += 1
|
||||||
|
return result
|
||||||
|
def __permutate(self, table, block):
|
||||||
|
return [block[x] for x in table]
|
||||||
|
def __create_sub_keys(self):
|
||||||
|
key = self.__permutate(Des.__pc1, self.__String_to_BitList(self.getKey()))
|
||||||
|
i = 0
|
||||||
|
self.L = key[:28]
|
||||||
|
self.R = key[28:]
|
||||||
|
while i < 16:
|
||||||
|
j = 0
|
||||||
|
while j < Des.__left_rotations[i]:
|
||||||
|
self.L.append(self.L[0])
|
||||||
|
del self.L[0]
|
||||||
|
self.R.append(self.R[0])
|
||||||
|
del self.R[0]
|
||||||
|
j += 1
|
||||||
|
self.Kn[i] = self.__permutate(Des.__pc2, self.L + self.R)
|
||||||
|
i += 1
|
||||||
|
def __des_crypt(self, block, crypt_type):
|
||||||
|
block = self.__permutate(Des.__ip, block)
|
||||||
|
self.L = block[:32]
|
||||||
|
self.R = block[32:]
|
||||||
|
if crypt_type == Des.ENCRYPT:
|
||||||
|
iteration = 0
|
||||||
|
iteration_adjustment = 1
|
||||||
|
else:
|
||||||
|
iteration = 15
|
||||||
|
iteration_adjustment = -1
|
||||||
|
i = 0
|
||||||
|
while i < 16:
|
||||||
|
tempR = self.R[:]
|
||||||
|
self.R = self.__permutate(Des.__expansion_table, self.R)
|
||||||
|
self.R = [x ^ y for x,y in zip(self.R, self.Kn[iteration])]
|
||||||
|
B = [self.R[:6], self.R[6:12], self.R[12:18], self.R[18:24], self.R[24:30], self.R[30:36], self.R[36:42], self.R[42:]]
|
||||||
|
j = 0
|
||||||
|
Bn = [0] * 32
|
||||||
|
pos = 0
|
||||||
|
while j < 8:
|
||||||
|
m = (B[j][0] << 1) + B[j][5]
|
||||||
|
n = (B[j][1] << 3) + (B[j][2] << 2) + (B[j][3] << 1) + B[j][4]
|
||||||
|
v = Des.__sbox[j][(m << 4) + n]
|
||||||
|
Bn[pos] = (v & 8) >> 3
|
||||||
|
Bn[pos + 1] = (v & 4) >> 2
|
||||||
|
Bn[pos + 2] = (v & 2) >> 1
|
||||||
|
Bn[pos + 3] = v & 1
|
||||||
|
pos += 4
|
||||||
|
j += 1
|
||||||
|
self.R = self.__permutate(Des.__p, Bn)
|
||||||
|
self.R = [x ^ y for x, y in zip(self.R, self.L)]
|
||||||
|
self.L = tempR
|
||||||
|
i += 1
|
||||||
|
iteration += iteration_adjustment
|
||||||
|
self.final = self.__permutate(Des.__fp, self.R + self.L)
|
||||||
|
return self.final
|
||||||
|
def crypt(self, data, crypt_type):
|
||||||
|
if not data:
|
||||||
|
return ''
|
||||||
|
if len(data) % self.block_size != 0:
|
||||||
|
if crypt_type == Des.DECRYPT: # Decryption must work on 8 byte blocks
|
||||||
|
raise ValueError("Invalid data length, data must be a multiple of " + str(self.block_size) + " bytes\n.")
|
||||||
|
if not self.getPadding():
|
||||||
|
raise ValueError("Invalid data length, data must be a multiple of " + str(self.block_size) + " bytes\n. Try setting the optional padding character")
|
||||||
|
else:
|
||||||
|
data += (self.block_size - (len(data) % self.block_size)) * self.getPadding()
|
||||||
|
if self.getMode() == CBC:
|
||||||
|
if self.getIV():
|
||||||
|
iv = self.__String_to_BitList(self.getIV())
|
||||||
|
else:
|
||||||
|
raise ValueError("For CBC mode, you must supply the Initial Value (IV) for ciphering")
|
||||||
|
i = 0
|
||||||
|
dict = {}
|
||||||
|
result = []
|
||||||
|
while i < len(data):
|
||||||
|
block = self.__String_to_BitList(data[i:i+8])
|
||||||
|
if self.getMode() == CBC:
|
||||||
|
if crypt_type == Des.ENCRYPT:
|
||||||
|
block = [x ^ y for x, y in zip(block, iv)]
|
||||||
|
processed_block = self.__des_crypt(block, crypt_type)
|
||||||
|
if crypt_type == Des.DECRYPT:
|
||||||
|
processed_block = [x ^ y for x, y in zip(processed_block, iv)]
|
||||||
|
iv = block
|
||||||
|
else:
|
||||||
|
iv = processed_block
|
||||||
|
else:
|
||||||
|
processed_block = self.__des_crypt(block, crypt_type)
|
||||||
|
result.append(self.__BitList_to_String(processed_block))
|
||||||
|
i += 8
|
||||||
|
if crypt_type == Des.DECRYPT and self.getPadding():
|
||||||
|
s = result[-1]
|
||||||
|
while s[-1] == self.getPadding():
|
||||||
|
s = s[:-1]
|
||||||
|
result[-1] = s
|
||||||
|
return ''.join(result)
|
||||||
|
def encrypt(self, data, pad=''):
|
||||||
|
self.__padding = pad
|
||||||
|
return self.crypt(data, Des.ENCRYPT)
|
||||||
|
def decrypt(self, data, pad=''):
|
||||||
|
self.__padding = pad
|
||||||
|
return self.crypt(data, Des.DECRYPT)
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user