Merge pull request #394 from anthrotype/psLib-py23

make t1Lib and psLib compatible with python 3
This commit is contained in:
Cosimo Lupo 2015-10-24 08:48:48 +01:00
commit 687a0e7ee3
6 changed files with 226 additions and 64 deletions

View File

@ -19,19 +19,35 @@ def _encryptChar(plain, R):
def decrypt(cipherstring, R):
r"""
>>> testStr = b"\0\0asdadads asds\265"
>>> decryptedStr, R = decrypt(testStr, 12321)
>>> decryptedStr == b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1'
True
>>> R == 36142
True
"""
plainList = []
for cipher in cipherstring:
plain, R = _decryptChar(cipher, R)
plainList.append(plain)
plainstring = strjoin(plainList)
plainstring = bytesjoin(plainList)
return plainstring, int(R)
def encrypt(plainstring, R):
r"""
>>> testStr = b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1'
>>> encryptedStr, R = encrypt(testStr, 12321)
>>> encryptedStr == b"\0\0asdadads asds\265"
True
>>> R == 36142
True
"""
cipherList = []
for plain in plainstring:
cipher, R = _encryptChar(plain, R)
cipherList.append(cipher)
cipherstring = strjoin(cipherList)
cipherstring = bytesjoin(cipherList)
return cipherstring, int(R)
@ -41,15 +57,11 @@ def hexString(s):
def deHexString(h):
import binascii
h = strjoin(h.split())
h = bytesjoin(h.split())
return binascii.unhexlify(h)
def _test():
testStr = "\0\0asdadads asds\265"
print(decrypt, decrypt(testStr, 12321))
print(encrypt, encrypt(testStr, 12321))
if __name__ == "__main__":
_test()
import sys
import doctest
sys.exit(doctest.testmod().failed)

View File

@ -7,15 +7,15 @@ import collections
from string import whitespace
ps_special = '()<>[]{}%' # / is one too, but we take care of that one differently
ps_special = b'()<>[]{}%' # / is one too, but we take care of that one differently
skipwhiteRE = re.compile("[%s]*" % whitespace)
endofthingPat = "[^][(){}<>/%%%s]*" % whitespace
skipwhiteRE = re.compile(bytesjoin([b"[", whitespace, b"]*"]))
endofthingPat = bytesjoin([b"[^][(){}<>/%", whitespace, b"]*"])
endofthingRE = re.compile(endofthingPat)
commentRE = re.compile("%[^\n\r]*")
commentRE = re.compile(b"%[^\n\r]*")
# XXX This not entirely correct as it doesn't allow *nested* embedded parens:
stringPat = r"""
stringPat = br"""
\(
(
(
@ -29,22 +29,44 @@ stringPat = r"""
[^()]*
\)
"""
stringPat = "".join(stringPat.split())
stringPat = b"".join(stringPat.split())
stringRE = re.compile(stringPat)
hexstringRE = re.compile("<[%s0-9A-Fa-f]*>" % whitespace)
hexstringRE = re.compile(bytesjoin([b"<[", whitespace, b"0-9A-Fa-f]*>"]))
class PSTokenError(Exception): pass
class PSError(Exception): pass
# StringIO.StringIO is only available in Python 2. The PSTokenizer class
# attemps to access private attributes from the latter which are not available
# in other file(-like) objects. Therefore, io.BytesIO (or io.StringIO) can't
# be used as ready replacements. For now we must drop Python3 support in psLib,
# and consequently in t1Lib, until we rewrite them for py23.
# See: https://github.com/behdad/fonttools/issues/391
class PSTokenizer(StringIO):
class PSTokenizer(object):
def __init__(self, buf=b''):
# Force self.buf to be a byte string
buf = tobytes(buf)
self.buf = buf
self.len = len(buf)
self.pos = 0
self.closed = False
def read(self, n=-1):
"""Read at most 'n' bytes from the buffer, or less if the read
hits EOF before obtaining 'n' bytes.
If 'n' is negative or omitted, read all data until EOF is reached.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
if n is None or n < 0:
newpos = self.len
else:
newpos = min(self.pos+n, self.len)
r = self.buf[self.pos:newpos]
self.pos = newpos
return r
def close(self):
if not self.closed:
self.closed = True
del self.buf, self.pos
def getnexttoken(self,
# localize some stuff, for performance
@ -53,32 +75,30 @@ class PSTokenizer(StringIO):
stringmatch=stringRE.match,
hexstringmatch=hexstringRE.match,
commentmatch=commentRE.match,
endmatch=endofthingRE.match,
whitematch=skipwhiteRE.match):
endmatch=endofthingRE.match):
_, nextpos = whitematch(self.buf, self.pos).span()
self.pos = nextpos
self.skipwhite()
if self.pos >= self.len:
return None, None
pos = self.pos
buf = self.buf
char = buf[pos]
char = bytechr(byteord(buf[pos]))
if char in ps_special:
if char in '{}[]':
if char in b'{}[]':
tokentype = 'do_special'
token = char
elif char == '%':
elif char == b'%':
tokentype = 'do_comment'
_, nextpos = commentmatch(buf, pos).span()
token = buf[pos:nextpos]
elif char == '(':
elif char == b'(':
tokentype = 'do_string'
m = stringmatch(buf, pos)
if m is None:
raise PSTokenError('bad string at character %d' % pos)
_, nextpos = m.span()
token = buf[pos:nextpos]
elif char == '<':
elif char == b'<':
tokentype = 'do_hexstring'
m = hexstringmatch(buf, pos)
if m is None:
@ -88,7 +108,7 @@ class PSTokenizer(StringIO):
else:
raise PSTokenError('bad token at character %d' % pos)
else:
if char == '/':
if char == b'/':
tokentype = 'do_literal'
m = endmatch(buf, pos+1)
else:
@ -99,6 +119,7 @@ class PSTokenizer(StringIO):
_, nextpos = m.span()
token = buf[pos:nextpos]
self.pos = pos + len(token)
token = tostr(token, encoding='ascii')
return tokentype, token
def skipwhite(self, whitematch=skipwhiteRE.match):
@ -107,7 +128,6 @@ class PSTokenizer(StringIO):
def starteexec(self):
self.pos = self.pos + 1
#self.skipwhite()
self.dirtybuf = self.buf[self.pos:]
self.buf, R = eexec.decrypt(self.dirtybuf, 55665)
self.len = len(self.buf)
@ -119,11 +139,6 @@ class PSTokenizer(StringIO):
self.buf = self.dirtybuf
del self.dirtybuf
def flush(self):
if self.buflist:
self.buf = self.buf + "".join(self.buflist)
self.buflist = []
class PSInterpreter(PSOperators):
@ -163,7 +178,6 @@ class PSInterpreter(PSOperators):
try:
while 1:
tokentype, token = getnexttoken()
#print token
if not token:
break
if tokentype:
@ -345,12 +359,3 @@ def suckfont(data):
rawfont = fontdir[fontNames[0]]
interpreter.close()
return unpack_item(rawfont)
if __name__ == "__main__":
import EasyDialogs
path = EasyDialogs.AskFileForOpen()
if path:
from fontTools import t1Lib
data, kind = t1Lib.read(path)
font = suckfont(data)

View File

@ -13,7 +13,7 @@ write(path, data, kind='OTHER', dohex=False)
'kind' can be one of 'LWFN', 'PFB' or 'OTHER'; it defaults to 'OTHER'.
'dohex' is a flag which determines whether the eexec encrypted
part should be written as hexadecimal or binary, but only if kind
is 'LWFN' or 'PFB'.
is 'OTHER'.
"""
from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
@ -36,7 +36,6 @@ except ImportError:
haveMacSupport = 0
else:
haveMacSupport = 1
import MacOS
class T1Error(Exception): pass
@ -56,8 +55,8 @@ class T1Font(object):
else:
pass # XXX
def saveAs(self, path, type):
write(path, self.getData(), type)
def saveAs(self, path, type, dohex=False):
write(path, self.getData(), type, dohex)
def getData(self):
# XXX Todo: if the data has been converted to Python object,
@ -266,7 +265,7 @@ def writeOther(path, data, dohex=False):
if code == 2 and dohex:
while chunk:
f.write(eexec.hexString(chunk[:hexlinelen]))
f.write('\r')
f.write(b'\r')
chunk = chunk[hexlinelen:]
else:
f.write(chunk)
@ -276,13 +275,13 @@ def writeOther(path, data, dohex=False):
# decryption tools
EEXECBEGIN = "currentfile eexec"
EEXECEND = '0' * 64
EEXECINTERNALEND = "currentfile closefile"
EEXECBEGINMARKER = "%-- eexec start\r"
EEXECENDMARKER = "%-- eexec end\r"
EEXECBEGIN = b"currentfile eexec"
EEXECEND = b'0' * 64
EEXECINTERNALEND = b"currentfile closefile"
EEXECBEGINMARKER = b"%-- eexec start\r"
EEXECENDMARKER = b"%-- eexec end\r"
_ishexRE = re.compile('[0-9A-Fa-f]*$')
_ishexRE = re.compile(b'[0-9A-Fa-f]*$')
def isHex(text):
return _ishexRE.match(text) is not None
@ -300,7 +299,7 @@ def decryptType1(data):
if decrypted[-len(EEXECINTERNALEND)-1:-1] != EEXECINTERNALEND \
and decrypted[-len(EEXECINTERNALEND)-2:-2] != EEXECINTERNALEND:
raise T1Error("invalid end of eexec part")
decrypted = decrypted[:-len(EEXECINTERNALEND)-2] + '\r'
decrypted = decrypted[:-len(EEXECINTERNALEND)-2] + b'\r'
data.append(EEXECBEGINMARKER + decrypted + EEXECENDMARKER)
else:
if chunk[-len(EEXECBEGIN)-1:-1] == EEXECBEGIN:
@ -333,7 +332,7 @@ def findEncryptedChunks(data):
return chunks
def deHexString(hexstring):
return eexec.deHexString(strjoin(hexstring.split()))
return eexec.deHexString(bytesjoin(hexstring.split()))
# Type 1 assertion
@ -357,7 +356,7 @@ def assertType1(data):
# pfb helpers
def longToString(long):
s = ""
s = b""
for i in range(4):
s += bytechr((long & (0xff << (i * 8))) >> i * 8)
return s

View File

@ -0,0 +1,86 @@
from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
import unittest
import os
from fontTools import t1Lib
from fontTools.pens.basePen import NullPen
import random
CWD = os.path.abspath(os.path.dirname(__file__))
DATADIR = os.path.join(CWD, 'testdata')
PFA = os.path.join(DATADIR, 'TestT1-Regular.pfa')
PFB = os.path.join(DATADIR, 'TestT1-Regular.pfb')
class FindEncryptedChunksTest(unittest.TestCase):
def test_findEncryptedChunks(self):
with open(PFA, "rb") as f:
data = f.read()
chunks = t1Lib.findEncryptedChunks(data)
self.assertEqual(len(chunks), 3)
self.assertFalse(chunks[0][0])
# the second chunk is encrypted
self.assertTrue(chunks[1][0])
self.assertFalse(chunks[2][0])
class DecryptType1Test(unittest.TestCase):
def test_decryptType1(self):
with open(PFA, "rb") as f:
data = f.read()
decrypted = t1Lib.decryptType1(data)
self.assertNotEqual(decrypted, data)
class ReadWriteTest(unittest.TestCase):
def test_read_pfa_write_pfb(self):
font = t1Lib.T1Font(PFA)
data = self.write(font, 'PFB')
self.assertEqual(font.getData(), data)
def test_read_pfb_write_pfa(self):
font = t1Lib.T1Font(PFB)
# 'OTHER' == 'PFA'
data = self.write(font, 'OTHER', dohex=True)
self.assertEqual(font.getData(), data)
@staticmethod
def write(font, outtype, dohex=False):
temp = os.path.join(DATADIR, 'temp.' + outtype.lower())
try:
font.saveAs(temp, outtype, dohex=dohex)
newfont = t1Lib.T1Font(temp)
data = newfont.getData()
finally:
if os.path.exists(temp):
os.remove(temp)
return data
class T1FontTest(unittest.TestCase):
def test_parse_pfa(self):
font = t1Lib.T1Font(PFA)
font.parse()
self.assertEqual(font['FontName'], 'TestT1-Regular')
self.assertTrue('Subrs' in font['Private'])
def test_parse_pfb(self):
font = t1Lib.T1Font(PFB)
font.parse()
self.assertEqual(font['FontName'], 'TestT1-Regular')
self.assertTrue('Subrs' in font['Private'])
def test_getGlyphSet(self):
font = t1Lib.T1Font(PFA)
glyphs = font.getGlyphSet()
i = random.randrange(len(glyphs))
aglyph = list(glyphs.values())[i]
self.assertTrue(hasattr(aglyph, 'draw'))
self.assertFalse(hasattr(aglyph, 'width'))
aglyph.draw(NullPen())
self.assertTrue(hasattr(aglyph, 'width'))

View File

@ -0,0 +1,60 @@
%!FontType1-1.1: TestT1-Regular 1.0
%%BeginResource: font TestT1-Regular
12 dict dup begin
/FontType 1 def
/FontName /TestT1-Regular def
/FontInfo 14 dict dup begin
/version (1.0) def
/Notice (Test T1 is not a trademark of FontTools.) def
/Copyright (Copyright c 2015 by FontTools. No rights reserved.) def
/FullName (Test T1) def
/FamilyName (Test T1) def
/Weight (Regular) def
/ItalicAngle 0.000000 def
/isFixedPitch false def
/UnderlinePosition -75.000000 def
/UnderlineThickness 50.000000 def
/FSType 0 def
end def
/PaintType 0 def
/FontMatrix [0.001 0 0 0.001 0 0] def
/Encoding 256 array
0 1 255 {1 index exch /.notdef put} for
def
/FontBBox {50.000000 0.000000 668.000000 750.000000} def
end
currentfile eexec bab431ea06bb0a1031e1aa11919e714ac1ac5197cb08b39a4d7e746fca0af12d89ac0ebd1bc11ab1
b3887b922efcec739534242d2fd22e7c30e3edce24b93798627e1ac3387816a8c4b84d76047dada8
28b2ad27c5603046fecbc2a97adc5c37a68912324d2d435f2ee0ccc38df10ba1271a1c9af8897a6d
6e425cd7d18fd6bd64c2adadb74365bc101a850841669886291e158cbfa7f204b3fe0ba49ffe0c80
4f6795d32eb770c5fcd38a3879c06a4bb87b2d3ab100d8c2b5f89e9be99248575575025c66381446
e4d9183674880aef57fb2032a1e00431133b16f6d758de7c3d0c48a0fada1d40034742a69fb3a6f9
450d2251e659158a04697cbfa70907346d27d37ef683284385c44a1b5089bd29b4629b6483122dc8
cbce7327bdc33dd30e6fcdb346c0ddaf433a5ac740423aa35639b2386673832f5ae8cc380e9703ba
d3369533bfa85af9f56a090c9d97f5fc26ed102c07b647137e83632be51a65a532bd26430b59a31c
3cb037ded351c1d4e944733feb30a3e6f81c1a7b74ac4e0eadbe705412d47991c246e8820876bbc6
1f6a3e264ae6b2ad4b864b0d7abee289308bea26eb15d00d2b9103861386e0a5f1802ba06f916810
62110d2b1c3641806f78eea365614f440b580185e84bac6f87bee36108d95174c786600cf0e9dc4a
5545d1a84cfe8392115c0b7027c17fd460481d21f684af32204085690946327bfded992852645149
8d44150d2495bd2efe0db6a450c6e28d0a52ca234e252129d5095596b0d8de096682d2eb00bc8320
f257fd653b05a22eab7a193ccc315a6ee274a03ff1fdf443b310157a02656ca4b06c581dca8ced72
c6ddcab26eb856ad1093452c587438b7f8408c1311e19254955914612c09828fd4d4fc2b8b0406ea
2ee38348a8bdab88a77b8033366b2e469834c01b7bd73207b7c67756937c7a9232947fde2e0ea327
7b7d610e601b91389ccbcdd813c87db5333c0c723e48d3ef69285f246327978ce68ae9081076a227
1a962a2a10e2b1147ec40b0f6553a00c8b329118569d16fe04a4fa195caf1b04c52c9a562b72e0cd
e411d747af796b9d2fb086ed927efb0e5fc9f50aa18aaf4949cba0de0805210620a19eec4319dfef
a74d9d13d16f8ad793323a231347e6b40022a1100c1e064b8679c1da63a26dfb217a6037096ad796
320da5a9d0526eed51d7d64d3223e285c1a8c70780c59ecc9dd9bc90a0f84ffa038834918cebe247
f6e8fa4ca0654019196388f2df008e63bc32c8e5e686dbb69193b7749638c22b389fb1f090fbb007
fdb8a6ee4e4b29e123fe1652fe72239bd2c8
0000000000000000000000000000000000000000000000000000000000000000
0000000000000000000000000000000000000000000000000000000000000000
0000000000000000000000000000000000000000000000000000000000000000
0000000000000000000000000000000000000000000000000000000000000000
0000000000000000000000000000000000000000000000000000000000000000
0000000000000000000000000000000000000000000000000000000000000000
0000000000000000000000000000000000000000000000000000000000000000
0000000000000000000000000000000000000000000000000000000000000000
cleartomark
%%EndResource
%%EOF

Binary file not shown.