1999-12-16 21:34:53 +00:00
|
|
|
"""cffLib.py -- read/write tools for Adobe CFF fonts."""
|
|
|
|
|
2014-01-14 15:07:50 +08:00
|
|
|
from __future__ import print_function, division, absolute_import
|
2013-11-27 17:27:45 -05:00
|
|
|
from fontTools.misc.py23 import *
|
2013-09-17 16:59:39 -04:00
|
|
|
from fontTools.misc import sstruct
|
2000-01-16 22:14:02 +00:00
|
|
|
from fontTools.misc import psCharStrings
|
2002-05-24 09:58:04 +00:00
|
|
|
from fontTools.misc.textTools import safeEval
|
2013-11-27 17:27:45 -05:00
|
|
|
import struct
|
2016-01-24 14:50:57 +00:00
|
|
|
import logging
|
2013-11-27 05:52:33 -05:00
|
|
|
|
2016-01-24 14:50:57 +00:00
|
|
|
|
|
|
|
# mute cffLib debug messages when running ttx in verbose mode
|
|
|
|
DEBUG = logging.DEBUG - 1
|
|
|
|
log = logging.getLogger(__name__)
|
2002-05-17 07:06:32 +00:00
|
|
|
|
|
|
|
|
1999-12-16 21:34:53 +00:00
|
|
|
cffHeaderFormat = """
|
|
|
|
major: B
|
|
|
|
minor: B
|
|
|
|
hdrSize: B
|
|
|
|
offSize: B
|
|
|
|
"""
|
|
|
|
|
2013-11-28 14:26:58 -05:00
|
|
|
class CFFFontSet(object):
|
2015-04-26 02:01:01 -04:00
|
|
|
|
1999-12-16 21:34:53 +00:00
|
|
|
def __init__(self):
|
2002-05-16 18:17:32 +00:00
|
|
|
pass
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-24 09:58:04 +00:00
|
|
|
def decompile(self, file, otFont):
|
2002-05-13 11:25:17 +00:00
|
|
|
sstruct.unpack(cffHeaderFormat, file.read(4), self)
|
1999-12-16 21:34:53 +00:00
|
|
|
assert self.major == 1 and self.minor == 0, \
|
|
|
|
"unknown CFF format: %d.%d" % (self.major, self.minor)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
file.seek(self.hdrSize)
|
2002-05-24 09:58:04 +00:00
|
|
|
self.fontNames = list(Index(file))
|
2002-05-16 18:17:32 +00:00
|
|
|
self.topDictIndex = TopDictIndex(file)
|
2002-05-17 07:06:32 +00:00
|
|
|
self.strings = IndexedStrings(file)
|
2002-05-24 09:58:04 +00:00
|
|
|
self.GlobalSubrs = GlobalSubrsIndex(file)
|
2002-05-16 18:17:32 +00:00
|
|
|
self.topDictIndex.strings = self.strings
|
2002-05-16 18:38:03 +00:00
|
|
|
self.topDictIndex.GlobalSubrs = self.GlobalSubrs
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-16 18:17:32 +00:00
|
|
|
def __len__(self):
|
|
|
|
return len(self.fontNames)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-16 18:17:32 +00:00
|
|
|
def keys(self):
|
2003-08-25 07:37:25 +00:00
|
|
|
return list(self.fontNames)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-17 07:06:32 +00:00
|
|
|
def values(self):
|
|
|
|
return self.topDictIndex
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-16 18:17:32 +00:00
|
|
|
def __getitem__(self, name):
|
|
|
|
try:
|
|
|
|
index = self.fontNames.index(name)
|
|
|
|
except ValueError:
|
2013-11-27 02:42:28 -05:00
|
|
|
raise KeyError(name)
|
2002-05-16 18:38:03 +00:00
|
|
|
return self.topDictIndex[index]
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-24 09:58:04 +00:00
|
|
|
def compile(self, file, otFont):
|
1999-12-16 21:34:53 +00:00
|
|
|
strings = IndexedStrings()
|
2002-05-23 21:50:36 +00:00
|
|
|
writer = CFFWriter()
|
|
|
|
writer.add(sstruct.pack(cffHeaderFormat, self))
|
|
|
|
fontNames = Index()
|
|
|
|
for name in self.fontNames:
|
|
|
|
fontNames.append(name)
|
|
|
|
writer.add(fontNames.getCompiler(strings, None))
|
|
|
|
topCompiler = self.topDictIndex.getCompiler(strings, None)
|
|
|
|
writer.add(topCompiler)
|
|
|
|
writer.add(strings.getCompiler())
|
|
|
|
writer.add(self.GlobalSubrs.getCompiler(strings, None))
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-24 09:58:04 +00:00
|
|
|
for topDict in self.topDictIndex:
|
|
|
|
if not hasattr(topDict, "charset") or topDict.charset is None:
|
|
|
|
charset = otFont.getGlyphOrder()
|
|
|
|
topDict.charset = charset
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
for child in topCompiler.getChildren(strings):
|
|
|
|
writer.add(child)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
writer.toFile(file)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
1999-12-16 21:34:53 +00:00
|
|
|
def toXML(self, xmlWriter, progress=None):
|
|
|
|
for fontName in self.fontNames:
|
2013-11-28 07:10:53 -05:00
|
|
|
xmlWriter.begintag("CFFFont", name=tostr(fontName))
|
1999-12-16 21:34:53 +00:00
|
|
|
xmlWriter.newline()
|
2002-05-16 18:17:32 +00:00
|
|
|
font = self[fontName]
|
1999-12-16 21:34:53 +00:00
|
|
|
font.toXML(xmlWriter, progress)
|
|
|
|
xmlWriter.endtag("CFFFont")
|
|
|
|
xmlWriter.newline()
|
|
|
|
xmlWriter.newline()
|
|
|
|
xmlWriter.begintag("GlobalSubrs")
|
|
|
|
xmlWriter.newline()
|
2002-05-16 18:17:32 +00:00
|
|
|
self.GlobalSubrs.toXML(xmlWriter, progress)
|
1999-12-16 21:34:53 +00:00
|
|
|
xmlWriter.endtag("GlobalSubrs")
|
|
|
|
xmlWriter.newline()
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2013-11-27 03:19:32 -05:00
|
|
|
def fromXML(self, name, attrs, content):
|
2002-05-24 09:58:04 +00:00
|
|
|
if not hasattr(self, "GlobalSubrs"):
|
|
|
|
self.GlobalSubrs = GlobalSubrsIndex()
|
|
|
|
self.major = 1
|
|
|
|
self.minor = 0
|
|
|
|
self.hdrSize = 4
|
|
|
|
self.offSize = 4 # XXX ??
|
|
|
|
if name == "CFFFont":
|
|
|
|
if not hasattr(self, "fontNames"):
|
|
|
|
self.fontNames = []
|
|
|
|
self.topDictIndex = TopDictIndex()
|
|
|
|
fontName = attrs["name"]
|
|
|
|
topDict = TopDict(GlobalSubrs=self.GlobalSubrs)
|
|
|
|
topDict.charset = None # gets filled in later
|
|
|
|
self.fontNames.append(fontName)
|
|
|
|
self.topDictIndex.append(topDict)
|
|
|
|
for element in content:
|
2008-03-07 19:56:17 +00:00
|
|
|
if isinstance(element, basestring):
|
2002-05-24 09:58:04 +00:00
|
|
|
continue
|
2013-11-27 03:19:32 -05:00
|
|
|
name, attrs, content = element
|
|
|
|
topDict.fromXML(name, attrs, content)
|
2002-05-24 09:58:04 +00:00
|
|
|
elif name == "GlobalSubrs":
|
|
|
|
for element in content:
|
2008-03-07 19:56:17 +00:00
|
|
|
if isinstance(element, basestring):
|
2002-05-24 09:58:04 +00:00
|
|
|
continue
|
|
|
|
name, attrs, content = element
|
2003-08-24 19:56:16 +00:00
|
|
|
subr = psCharStrings.T2CharString()
|
2013-11-27 03:19:32 -05:00
|
|
|
subr.fromXML(name, attrs, content)
|
2002-05-24 09:58:04 +00:00
|
|
|
self.GlobalSubrs.append(subr)
|
1999-12-16 21:34:53 +00:00
|
|
|
|
|
|
|
|
2013-11-28 14:26:58 -05:00
|
|
|
class CFFWriter(object):
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def __init__(self):
|
|
|
|
self.data = []
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def add(self, table):
|
|
|
|
self.data.append(table)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def toFile(self, file):
|
|
|
|
lastPosList = None
|
|
|
|
count = 1
|
2013-11-27 04:15:34 -05:00
|
|
|
while True:
|
2016-01-24 14:50:57 +00:00
|
|
|
log.log(DEBUG, "CFFWriter.toFile() iteration: %d", count)
|
2002-05-24 09:58:04 +00:00
|
|
|
count = count + 1
|
2002-05-23 21:50:36 +00:00
|
|
|
pos = 0
|
|
|
|
posList = [pos]
|
|
|
|
for item in self.data:
|
|
|
|
if hasattr(item, "getDataLength"):
|
2002-05-24 09:58:04 +00:00
|
|
|
endPos = pos + item.getDataLength()
|
2002-05-23 21:50:36 +00:00
|
|
|
else:
|
2002-05-24 09:58:04 +00:00
|
|
|
endPos = pos + len(item)
|
|
|
|
if hasattr(item, "setPos"):
|
|
|
|
item.setPos(pos, endPos)
|
|
|
|
pos = endPos
|
2002-05-23 21:50:36 +00:00
|
|
|
posList.append(pos)
|
|
|
|
if posList == lastPosList:
|
|
|
|
break
|
|
|
|
lastPosList = posList
|
2016-01-24 14:50:57 +00:00
|
|
|
log.log(DEBUG, "CFFWriter.toFile() writing to file.")
|
2002-05-23 21:50:36 +00:00
|
|
|
begin = file.tell()
|
|
|
|
posList = [0]
|
|
|
|
for item in self.data:
|
|
|
|
if hasattr(item, "toFile"):
|
|
|
|
item.toFile(file)
|
|
|
|
else:
|
|
|
|
file.write(item)
|
|
|
|
posList.append(file.tell() - begin)
|
|
|
|
assert posList == lastPosList
|
|
|
|
|
|
|
|
|
|
|
|
def calcOffSize(largestOffset):
|
|
|
|
if largestOffset < 0x100:
|
|
|
|
offSize = 1
|
|
|
|
elif largestOffset < 0x10000:
|
|
|
|
offSize = 2
|
|
|
|
elif largestOffset < 0x1000000:
|
|
|
|
offSize = 3
|
|
|
|
else:
|
|
|
|
offSize = 4
|
|
|
|
return offSize
|
|
|
|
|
|
|
|
|
2013-11-28 14:26:58 -05:00
|
|
|
class IndexCompiler(object):
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def __init__(self, items, strings, parent):
|
|
|
|
self.items = self.getItems(items, strings)
|
|
|
|
self.parent = parent
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def getItems(self, items, strings):
|
|
|
|
return items
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def getOffsets(self):
|
|
|
|
pos = 1
|
|
|
|
offsets = [pos]
|
|
|
|
for item in self.items:
|
|
|
|
if hasattr(item, "getDataLength"):
|
|
|
|
pos = pos + item.getDataLength()
|
|
|
|
else:
|
|
|
|
pos = pos + len(item)
|
|
|
|
offsets.append(pos)
|
|
|
|
return offsets
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def getDataLength(self):
|
|
|
|
lastOffset = self.getOffsets()[-1]
|
|
|
|
offSize = calcOffSize(lastOffset)
|
|
|
|
dataLength = (
|
|
|
|
2 + # count
|
|
|
|
1 + # offSize
|
|
|
|
(len(self.items) + 1) * offSize + # the offsets
|
|
|
|
lastOffset - 1 # size of object data
|
|
|
|
)
|
|
|
|
return dataLength
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def toFile(self, file):
|
|
|
|
offsets = self.getOffsets()
|
|
|
|
writeCard16(file, len(self.items))
|
|
|
|
offSize = calcOffSize(offsets[-1])
|
|
|
|
writeCard8(file, offSize)
|
|
|
|
offSize = -offSize
|
|
|
|
pack = struct.pack
|
|
|
|
for offset in offsets:
|
|
|
|
binOffset = pack(">l", offset)[offSize:]
|
|
|
|
assert len(binOffset) == -offSize
|
|
|
|
file.write(binOffset)
|
|
|
|
for item in self.items:
|
|
|
|
if hasattr(item, "toFile"):
|
|
|
|
item.toFile(file)
|
|
|
|
else:
|
2013-12-16 00:03:15 -05:00
|
|
|
file.write(tobytes(item, encoding="latin1"))
|
2002-05-23 21:50:36 +00:00
|
|
|
|
|
|
|
|
|
|
|
class IndexedStringsCompiler(IndexCompiler):
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def getItems(self, items, strings):
|
|
|
|
return items.strings
|
|
|
|
|
|
|
|
|
|
|
|
class TopDictIndexCompiler(IndexCompiler):
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def getItems(self, items, strings):
|
|
|
|
out = []
|
|
|
|
for item in items:
|
|
|
|
out.append(item.getCompiler(strings, self))
|
|
|
|
return out
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def getChildren(self, strings):
|
|
|
|
children = []
|
|
|
|
for topDict in self.items:
|
|
|
|
children.extend(topDict.getChildren(strings))
|
|
|
|
return children
|
|
|
|
|
|
|
|
|
2003-08-22 19:53:32 +00:00
|
|
|
class FDArrayIndexCompiler(IndexCompiler):
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2003-08-22 19:53:32 +00:00
|
|
|
def getItems(self, items, strings):
|
|
|
|
out = []
|
|
|
|
for item in items:
|
|
|
|
out.append(item.getCompiler(strings, self))
|
|
|
|
return out
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2003-08-22 19:53:32 +00:00
|
|
|
def getChildren(self, strings):
|
|
|
|
children = []
|
|
|
|
for fontDict in self.items:
|
|
|
|
children.extend(fontDict.getChildren(strings))
|
|
|
|
return children
|
|
|
|
|
|
|
|
def toFile(self, file):
|
|
|
|
offsets = self.getOffsets()
|
|
|
|
writeCard16(file, len(self.items))
|
|
|
|
offSize = calcOffSize(offsets[-1])
|
|
|
|
writeCard8(file, offSize)
|
|
|
|
offSize = -offSize
|
|
|
|
pack = struct.pack
|
|
|
|
for offset in offsets:
|
|
|
|
binOffset = pack(">l", offset)[offSize:]
|
|
|
|
assert len(binOffset) == -offSize
|
|
|
|
file.write(binOffset)
|
|
|
|
for item in self.items:
|
|
|
|
if hasattr(item, "toFile"):
|
|
|
|
item.toFile(file)
|
|
|
|
else:
|
|
|
|
file.write(item)
|
|
|
|
|
|
|
|
def setPos(self, pos, endPos):
|
|
|
|
self.parent.rawDict["FDArray"] = pos
|
|
|
|
|
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
class GlobalSubrsCompiler(IndexCompiler):
|
|
|
|
def getItems(self, items, strings):
|
|
|
|
out = []
|
|
|
|
for cs in items:
|
|
|
|
cs.compile()
|
|
|
|
out.append(cs.bytecode)
|
|
|
|
return out
|
|
|
|
|
|
|
|
class SubrsCompiler(GlobalSubrsCompiler):
|
2002-05-24 09:58:04 +00:00
|
|
|
def setPos(self, pos, endPos):
|
2002-05-23 21:50:36 +00:00
|
|
|
offset = pos - self.parent.pos
|
|
|
|
self.parent.rawDict["Subrs"] = offset
|
|
|
|
|
|
|
|
class CharStringsCompiler(GlobalSubrsCompiler):
|
2002-05-24 09:58:04 +00:00
|
|
|
def setPos(self, pos, endPos):
|
2002-05-23 21:50:36 +00:00
|
|
|
self.parent.rawDict["CharStrings"] = pos
|
|
|
|
|
|
|
|
|
2013-11-28 14:26:58 -05:00
|
|
|
class Index(object):
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-16 18:17:32 +00:00
|
|
|
"""This class represents what the CFF spec calls an INDEX."""
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
compilerClass = IndexCompiler
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-24 09:58:04 +00:00
|
|
|
def __init__(self, file=None):
|
2014-06-16 15:35:15 -04:00
|
|
|
self.items = []
|
2002-05-24 09:58:04 +00:00
|
|
|
name = self.__class__.__name__
|
2002-05-23 21:50:36 +00:00
|
|
|
if file is None:
|
|
|
|
return
|
2016-01-24 14:50:57 +00:00
|
|
|
log.log(DEBUG, "loading %s at %s", name, file.tell())
|
2002-05-15 07:41:30 +00:00
|
|
|
self.file = file
|
2002-05-17 18:36:07 +00:00
|
|
|
count = readCard16(file)
|
2002-05-15 07:41:30 +00:00
|
|
|
if count == 0:
|
|
|
|
return
|
2014-06-16 15:35:15 -04:00
|
|
|
self.items = [None] * count
|
2002-05-17 18:36:07 +00:00
|
|
|
offSize = readCard8(file)
|
2016-01-24 14:50:57 +00:00
|
|
|
log.log(DEBUG, " index count: %s offSize: %s", count, offSize)
|
2002-05-17 07:06:32 +00:00
|
|
|
assert offSize <= 4, "offSize too large: %s" % offSize
|
2002-05-15 07:41:30 +00:00
|
|
|
self.offsets = offsets = []
|
2013-11-27 21:17:35 -05:00
|
|
|
pad = b'\0' * (4 - offSize)
|
2002-05-15 07:41:30 +00:00
|
|
|
for index in range(count+1):
|
|
|
|
chunk = file.read(offSize)
|
|
|
|
chunk = pad + chunk
|
|
|
|
offset, = struct.unpack(">L", chunk)
|
|
|
|
offsets.append(int(offset))
|
|
|
|
self.offsetBase = file.tell() - 1
|
2002-05-16 18:17:32 +00:00
|
|
|
file.seek(self.offsetBase + offsets[-1]) # pretend we've read the whole lot
|
2016-01-24 14:50:57 +00:00
|
|
|
log.log(DEBUG, " end of %s at %s", name, file.tell())
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-15 07:41:30 +00:00
|
|
|
def __len__(self):
|
2002-05-23 21:50:36 +00:00
|
|
|
return len(self.items)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-15 07:41:30 +00:00
|
|
|
def __getitem__(self, index):
|
2002-05-16 18:17:32 +00:00
|
|
|
item = self.items[index]
|
|
|
|
if item is not None:
|
|
|
|
return item
|
2002-05-15 07:41:30 +00:00
|
|
|
offset = self.offsets[index] + self.offsetBase
|
|
|
|
size = self.offsets[index+1] - self.offsets[index]
|
2002-05-16 18:17:32 +00:00
|
|
|
file = self.file
|
|
|
|
file.seek(offset)
|
|
|
|
data = file.read(size)
|
|
|
|
assert len(data) == size
|
2002-05-17 18:36:07 +00:00
|
|
|
item = self.produceItem(index, data, file, offset, size)
|
2002-05-16 18:17:32 +00:00
|
|
|
self.items[index] = item
|
|
|
|
return item
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-17 18:36:07 +00:00
|
|
|
def produceItem(self, index, data, file, offset, size):
|
2002-05-16 18:17:32 +00:00
|
|
|
return data
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def append(self, item):
|
|
|
|
self.items.append(item)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def getCompiler(self, strings, parent):
|
|
|
|
return self.compilerClass(self, strings, parent)
|
2002-05-15 07:41:30 +00:00
|
|
|
|
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
class GlobalSubrsIndex(Index):
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
compilerClass = GlobalSubrsCompiler
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-24 09:58:04 +00:00
|
|
|
def __init__(self, file=None, globalSubrs=None, private=None, fdSelect=None, fdArray=None):
|
|
|
|
Index.__init__(self, file)
|
2002-05-17 18:36:07 +00:00
|
|
|
self.globalSubrs = globalSubrs
|
|
|
|
self.private = private
|
2003-08-22 19:53:32 +00:00
|
|
|
if fdSelect:
|
|
|
|
self.fdSelect = fdSelect
|
|
|
|
if fdArray:
|
|
|
|
self.fdArray = fdArray
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-17 18:36:07 +00:00
|
|
|
def produceItem(self, index, data, file, offset, size):
|
|
|
|
if self.private is not None:
|
|
|
|
private = self.private
|
2003-08-22 19:53:32 +00:00
|
|
|
elif hasattr(self, 'fdArray') and self.fdArray is not None:
|
2002-05-17 18:36:07 +00:00
|
|
|
private = self.fdArray[self.fdSelect[index]].Private
|
|
|
|
else:
|
|
|
|
private = None
|
2003-08-24 19:56:16 +00:00
|
|
|
return psCharStrings.T2CharString(data, private=private, globalSubrs=self.globalSubrs)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-16 18:17:32 +00:00
|
|
|
def toXML(self, xmlWriter, progress):
|
2003-08-22 19:53:32 +00:00
|
|
|
xmlWriter.comment("The 'index' attribute is only for humans; it is ignored when parsed.")
|
2002-05-24 09:58:04 +00:00
|
|
|
xmlWriter.newline()
|
2002-05-16 18:17:32 +00:00
|
|
|
for i in range(len(self)):
|
2002-05-24 11:55:37 +00:00
|
|
|
subr = self[i]
|
|
|
|
if subr.needsDecompilation():
|
|
|
|
xmlWriter.begintag("CharString", index=i, raw=1)
|
|
|
|
else:
|
|
|
|
xmlWriter.begintag("CharString", index=i)
|
2002-05-16 18:17:32 +00:00
|
|
|
xmlWriter.newline()
|
2002-05-24 11:55:37 +00:00
|
|
|
subr.toXML(xmlWriter)
|
2002-05-16 18:17:32 +00:00
|
|
|
xmlWriter.endtag("CharString")
|
|
|
|
xmlWriter.newline()
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2013-11-27 03:19:32 -05:00
|
|
|
def fromXML(self, name, attrs, content):
|
2013-11-27 02:40:30 -05:00
|
|
|
if name != "CharString":
|
2002-05-24 09:58:04 +00:00
|
|
|
return
|
2003-08-24 19:56:16 +00:00
|
|
|
subr = psCharStrings.T2CharString()
|
2013-11-27 03:19:32 -05:00
|
|
|
subr.fromXML(name, attrs, content)
|
2002-05-24 09:58:04 +00:00
|
|
|
self.append(subr)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-17 18:36:07 +00:00
|
|
|
def getItemAndSelector(self, index):
|
2003-08-22 19:53:32 +00:00
|
|
|
sel = None
|
|
|
|
if hasattr(self, 'fdSelect'):
|
|
|
|
sel = self.fdSelect[index]
|
2002-05-17 18:36:07 +00:00
|
|
|
return self[index], sel
|
2002-05-23 21:50:36 +00:00
|
|
|
|
2002-09-09 14:18:39 +00:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
class SubrsIndex(GlobalSubrsIndex):
|
|
|
|
compilerClass = SubrsCompiler
|
|
|
|
|
2002-05-16 18:17:32 +00:00
|
|
|
|
2002-05-17 07:06:32 +00:00
|
|
|
class TopDictIndex(Index):
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
compilerClass = TopDictIndexCompiler
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-17 18:36:07 +00:00
|
|
|
def produceItem(self, index, data, file, offset, size):
|
2002-05-17 07:06:32 +00:00
|
|
|
top = TopDict(self.strings, file, offset, self.GlobalSubrs)
|
|
|
|
top.decompile(data)
|
|
|
|
return top
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-17 18:36:07 +00:00
|
|
|
def toXML(self, xmlWriter, progress):
|
|
|
|
for i in range(len(self)):
|
|
|
|
xmlWriter.begintag("FontDict", index=i)
|
|
|
|
xmlWriter.newline()
|
|
|
|
self[i].toXML(xmlWriter, progress)
|
|
|
|
xmlWriter.endtag("FontDict")
|
|
|
|
xmlWriter.newline()
|
2002-05-17 07:06:32 +00:00
|
|
|
|
|
|
|
|
2003-08-22 19:53:32 +00:00
|
|
|
class FDArrayIndex(TopDictIndex):
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2003-08-22 19:53:32 +00:00
|
|
|
compilerClass = FDArrayIndexCompiler
|
|
|
|
|
2013-11-27 03:19:32 -05:00
|
|
|
def fromXML(self, name, attrs, content):
|
2013-11-27 02:40:30 -05:00
|
|
|
if name != "FontDict":
|
2003-08-22 19:53:32 +00:00
|
|
|
return
|
|
|
|
fontDict = FontDict()
|
|
|
|
for element in content:
|
2008-03-07 19:56:17 +00:00
|
|
|
if isinstance(element, basestring):
|
2003-08-22 19:53:32 +00:00
|
|
|
continue
|
2013-11-27 03:19:32 -05:00
|
|
|
name, attrs, content = element
|
|
|
|
fontDict.fromXML(name, attrs, content)
|
2003-08-22 19:53:32 +00:00
|
|
|
self.append(fontDict)
|
|
|
|
|
|
|
|
|
|
|
|
class FDSelect:
|
2015-04-26 00:54:30 -04:00
|
|
|
def __init__(self, file=None, numGlyphs=None, format=None):
|
2003-08-22 19:53:32 +00:00
|
|
|
if file:
|
|
|
|
# read data in from file
|
|
|
|
self.format = readCard8(file)
|
|
|
|
if self.format == 0:
|
|
|
|
from array import array
|
|
|
|
self.gidArray = array("B", file.read(numGlyphs)).tolist()
|
|
|
|
elif self.format == 3:
|
|
|
|
gidArray = [None] * numGlyphs
|
|
|
|
nRanges = readCard16(file)
|
2015-04-26 02:17:13 -04:00
|
|
|
fd = None
|
2003-08-22 19:53:32 +00:00
|
|
|
prev = None
|
|
|
|
for i in range(nRanges):
|
|
|
|
first = readCard16(file)
|
|
|
|
if prev is not None:
|
|
|
|
for glyphID in range(prev, first):
|
|
|
|
gidArray[glyphID] = fd
|
|
|
|
prev = first
|
|
|
|
fd = readCard8(file)
|
|
|
|
if prev is not None:
|
|
|
|
first = readCard16(file)
|
|
|
|
for glyphID in range(prev, first):
|
|
|
|
gidArray[glyphID] = fd
|
|
|
|
self.gidArray = gidArray
|
|
|
|
else:
|
2013-12-04 01:15:46 -05:00
|
|
|
assert False, "unsupported FDSelect format: %s" % format
|
2003-08-22 19:53:32 +00:00
|
|
|
else:
|
|
|
|
# reading from XML. Make empty gidArray,, and leave format as passed in.
|
2013-12-04 16:31:44 -05:00
|
|
|
# format is None will result in the smallest representation being used.
|
2003-08-22 19:53:32 +00:00
|
|
|
self.format = format
|
|
|
|
self.gidArray = []
|
|
|
|
|
|
|
|
def __len__(self):
|
|
|
|
return len(self.gidArray)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2003-08-22 19:53:32 +00:00
|
|
|
def __getitem__(self, index):
|
|
|
|
return self.gidArray[index]
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2003-08-22 19:53:32 +00:00
|
|
|
def __setitem__(self, index, fdSelectValue):
|
|
|
|
self.gidArray[index] = fdSelectValue
|
|
|
|
|
|
|
|
def append(self, fdSelectValue):
|
|
|
|
self.gidArray.append(fdSelectValue)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2003-08-22 19:53:32 +00:00
|
|
|
|
2013-11-28 14:26:58 -05:00
|
|
|
class CharStrings(object):
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-17 18:36:07 +00:00
|
|
|
def __init__(self, file, charset, globalSubrs, private, fdSelect, fdArray):
|
2002-05-24 09:58:04 +00:00
|
|
|
if file is not None:
|
|
|
|
self.charStringsIndex = SubrsIndex(file, globalSubrs, private, fdSelect, fdArray)
|
|
|
|
self.charStrings = charStrings = {}
|
|
|
|
for i in range(len(charset)):
|
|
|
|
charStrings[charset[i]] = i
|
|
|
|
self.charStringsAreIndexed = 1
|
|
|
|
else:
|
|
|
|
self.charStrings = {}
|
|
|
|
self.charStringsAreIndexed = 0
|
|
|
|
self.globalSubrs = globalSubrs
|
|
|
|
self.private = private
|
2013-12-04 16:31:44 -05:00
|
|
|
if fdSelect is not None:
|
2003-08-22 19:53:32 +00:00
|
|
|
self.fdSelect = fdSelect
|
2013-12-04 16:31:44 -05:00
|
|
|
if fdArray is not None:
|
2003-08-22 19:53:32 +00:00
|
|
|
self.fdArray = fdArray
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-16 18:17:32 +00:00
|
|
|
def keys(self):
|
2013-11-27 06:26:55 -05:00
|
|
|
return list(self.charStrings.keys())
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-16 18:38:03 +00:00
|
|
|
def values(self):
|
2002-05-24 09:58:04 +00:00
|
|
|
if self.charStringsAreIndexed:
|
|
|
|
return self.charStringsIndex
|
|
|
|
else:
|
2013-11-27 06:26:55 -05:00
|
|
|
return list(self.charStrings.values())
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-16 18:17:32 +00:00
|
|
|
def has_key(self, name):
|
2013-11-27 02:33:03 -05:00
|
|
|
return name in self.charStrings
|
2014-06-13 12:44:38 -04:00
|
|
|
|
|
|
|
__contains__ = has_key
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-17 07:06:32 +00:00
|
|
|
def __len__(self):
|
2002-05-24 09:58:04 +00:00
|
|
|
return len(self.charStrings)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-16 18:17:32 +00:00
|
|
|
def __getitem__(self, name):
|
2002-05-24 09:58:04 +00:00
|
|
|
charString = self.charStrings[name]
|
|
|
|
if self.charStringsAreIndexed:
|
|
|
|
charString = self.charStringsIndex[charString]
|
|
|
|
return charString
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-24 09:58:04 +00:00
|
|
|
def __setitem__(self, name, charString):
|
|
|
|
if self.charStringsAreIndexed:
|
|
|
|
index = self.charStrings[name]
|
|
|
|
self.charStringsIndex[index] = charString
|
|
|
|
else:
|
|
|
|
self.charStrings[name] = charString
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-17 18:36:07 +00:00
|
|
|
def getItemAndSelector(self, name):
|
2002-05-24 09:58:04 +00:00
|
|
|
if self.charStringsAreIndexed:
|
|
|
|
index = self.charStrings[name]
|
|
|
|
return self.charStringsIndex.getItemAndSelector(index)
|
|
|
|
else:
|
2016-07-06 04:23:43 -06:00
|
|
|
if hasattr(self, 'fdArray'):
|
|
|
|
if hasattr(self, 'fdSelect'):
|
2016-07-06 10:27:02 -06:00
|
|
|
sel = self.charStrings[name].fdSelectIndex
|
2016-07-06 04:23:43 -06:00
|
|
|
else:
|
|
|
|
raise KeyError("fdSelect array not yet defined.")
|
2003-08-22 19:53:32 +00:00
|
|
|
else:
|
2016-07-06 04:23:43 -06:00
|
|
|
sel = None
|
2003-08-22 19:53:32 +00:00
|
|
|
return self.charStrings[name], sel
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-16 18:17:32 +00:00
|
|
|
def toXML(self, xmlWriter, progress):
|
2013-11-27 04:15:34 -05:00
|
|
|
names = sorted(self.keys())
|
2002-07-23 16:42:11 +00:00
|
|
|
i = 0
|
|
|
|
step = 10
|
|
|
|
numGlyphs = len(names)
|
2002-05-16 18:17:32 +00:00
|
|
|
for name in names:
|
2003-08-22 19:53:32 +00:00
|
|
|
charStr, fdSelectIndex = self.getItemAndSelector(name)
|
2002-05-24 11:55:37 +00:00
|
|
|
if charStr.needsDecompilation():
|
|
|
|
raw = [("raw", 1)]
|
|
|
|
else:
|
|
|
|
raw = []
|
2003-08-22 19:53:32 +00:00
|
|
|
if fdSelectIndex is None:
|
2002-05-24 11:55:37 +00:00
|
|
|
xmlWriter.begintag("CharString", [('name', name)] + raw)
|
2002-05-17 18:36:07 +00:00
|
|
|
else:
|
|
|
|
xmlWriter.begintag("CharString",
|
2003-08-22 19:53:32 +00:00
|
|
|
[('name', name), ('fdSelectIndex', fdSelectIndex)] + raw)
|
2002-05-16 18:17:32 +00:00
|
|
|
xmlWriter.newline()
|
2002-05-24 11:55:37 +00:00
|
|
|
charStr.toXML(xmlWriter)
|
2002-05-16 18:17:32 +00:00
|
|
|
xmlWriter.endtag("CharString")
|
|
|
|
xmlWriter.newline()
|
2002-07-23 16:42:11 +00:00
|
|
|
if not i % step and progress is not None:
|
|
|
|
progress.setLabel("Dumping 'CFF ' table... (%s)" % name)
|
2013-11-27 17:46:17 -05:00
|
|
|
progress.increment(step / numGlyphs)
|
2002-07-23 16:42:11 +00:00
|
|
|
i = i + 1
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2013-11-27 03:19:32 -05:00
|
|
|
def fromXML(self, name, attrs, content):
|
2002-05-24 09:58:04 +00:00
|
|
|
for element in content:
|
2008-03-07 19:56:17 +00:00
|
|
|
if isinstance(element, basestring):
|
2002-05-24 09:58:04 +00:00
|
|
|
continue
|
|
|
|
name, attrs, content = element
|
2013-11-27 02:40:30 -05:00
|
|
|
if name != "CharString":
|
2002-05-24 09:58:04 +00:00
|
|
|
continue
|
2003-08-22 19:53:32 +00:00
|
|
|
fdID = -1
|
|
|
|
if hasattr(self, "fdArray"):
|
|
|
|
fdID = safeEval(attrs["fdSelectIndex"])
|
|
|
|
private = self.fdArray[fdID].Private
|
|
|
|
else:
|
|
|
|
private = self.private
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-24 09:58:04 +00:00
|
|
|
glyphName = attrs["name"]
|
2003-08-24 19:56:16 +00:00
|
|
|
charString = psCharStrings.T2CharString(
|
|
|
|
private=private,
|
|
|
|
globalSubrs=self.globalSubrs)
|
2013-11-27 03:19:32 -05:00
|
|
|
charString.fromXML(name, attrs, content)
|
2003-08-22 19:53:32 +00:00
|
|
|
if fdID >= 0:
|
|
|
|
charString.fdSelectIndex = fdID
|
2002-05-24 09:58:04 +00:00
|
|
|
self[glyphName] = charString
|
2002-05-15 07:41:30 +00:00
|
|
|
|
|
|
|
|
2002-05-17 18:36:07 +00:00
|
|
|
def readCard8(file):
|
2013-11-27 18:13:48 -05:00
|
|
|
return byteord(file.read(1))
|
2002-05-17 18:36:07 +00:00
|
|
|
|
|
|
|
def readCard16(file):
|
|
|
|
value, = struct.unpack(">H", file.read(2))
|
|
|
|
return value
|
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def writeCard8(file, value):
|
2013-11-27 15:19:40 -05:00
|
|
|
file.write(bytechr(value))
|
2002-05-23 21:50:36 +00:00
|
|
|
|
|
|
|
def writeCard16(file, value):
|
|
|
|
file.write(struct.pack(">H", value))
|
|
|
|
|
|
|
|
def packCard8(value):
|
2013-11-27 15:19:40 -05:00
|
|
|
return bytechr(value)
|
2002-05-23 21:50:36 +00:00
|
|
|
|
|
|
|
def packCard16(value):
|
|
|
|
return struct.pack(">H", value)
|
|
|
|
|
2002-05-16 18:17:32 +00:00
|
|
|
def buildOperatorDict(table):
|
|
|
|
d = {}
|
|
|
|
for op, name, arg, default, conv in table:
|
|
|
|
d[op] = (name, arg)
|
|
|
|
return d
|
2002-05-15 07:41:30 +00:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def buildOpcodeDict(table):
|
|
|
|
d = {}
|
|
|
|
for op, name, arg, default, conv in table:
|
2008-03-07 19:56:17 +00:00
|
|
|
if isinstance(op, tuple):
|
2013-11-27 15:19:40 -05:00
|
|
|
op = bytechr(op[0]) + bytechr(op[1])
|
2002-05-23 21:50:36 +00:00
|
|
|
else:
|
2013-11-27 15:19:40 -05:00
|
|
|
op = bytechr(op)
|
2002-05-23 21:50:36 +00:00
|
|
|
d[name] = (op, arg)
|
|
|
|
return d
|
|
|
|
|
2002-05-16 18:17:32 +00:00
|
|
|
def buildOrder(table):
|
|
|
|
l = []
|
|
|
|
for op, name, arg, default, conv in table:
|
|
|
|
l.append(name)
|
|
|
|
return l
|
2002-05-15 07:41:30 +00:00
|
|
|
|
2002-05-16 18:17:32 +00:00
|
|
|
def buildDefaults(table):
|
|
|
|
d = {}
|
|
|
|
for op, name, arg, default, conv in table:
|
|
|
|
if default is not None:
|
|
|
|
d[name] = default
|
|
|
|
return d
|
|
|
|
|
|
|
|
def buildConverters(table):
|
|
|
|
d = {}
|
|
|
|
for op, name, arg, default, conv in table:
|
|
|
|
d[name] = conv
|
|
|
|
return d
|
|
|
|
|
|
|
|
|
2013-11-28 14:26:58 -05:00
|
|
|
class SimpleConverter(object):
|
2002-05-17 20:04:05 +00:00
|
|
|
def read(self, parent, value):
|
|
|
|
return value
|
2002-05-23 21:50:36 +00:00
|
|
|
def write(self, parent, value):
|
|
|
|
return value
|
2002-07-23 16:42:11 +00:00
|
|
|
def xmlWrite(self, xmlWriter, name, value, progress):
|
2002-05-24 09:58:04 +00:00
|
|
|
xmlWriter.simpletag(name, value=value)
|
|
|
|
xmlWriter.newline()
|
2013-11-27 03:19:32 -05:00
|
|
|
def xmlRead(self, name, attrs, content, parent):
|
2002-05-24 09:58:04 +00:00
|
|
|
return attrs["value"]
|
|
|
|
|
2013-11-28 07:10:53 -05:00
|
|
|
class ASCIIConverter(SimpleConverter):
|
|
|
|
def read(self, parent, value):
|
|
|
|
return tostr(value, encoding='ascii')
|
|
|
|
def write(self, parent, value):
|
|
|
|
return tobytes(value, encoding='ascii')
|
|
|
|
def xmlWrite(self, xmlWriter, name, value, progress):
|
2015-07-03 08:42:11 +09:00
|
|
|
xmlWriter.simpletag(name, value=tounicode(value, encoding="ascii"))
|
2013-11-28 07:10:53 -05:00
|
|
|
xmlWriter.newline()
|
|
|
|
def xmlRead(self, name, attrs, content, parent):
|
|
|
|
return tobytes(attrs["value"], encoding=("ascii"))
|
|
|
|
|
2002-09-09 14:18:39 +00:00
|
|
|
class Latin1Converter(SimpleConverter):
|
2013-11-28 07:10:53 -05:00
|
|
|
def read(self, parent, value):
|
|
|
|
return tostr(value, encoding='latin1')
|
|
|
|
def write(self, parent, value):
|
|
|
|
return tobytes(value, encoding='latin1')
|
2013-10-28 13:20:00 +01:00
|
|
|
def xmlWrite(self, xmlWriter, name, value, progress):
|
2015-07-03 08:42:11 +09:00
|
|
|
xmlWriter.simpletag(name, value=tounicode(value, encoding="latin1"))
|
2013-10-28 13:20:00 +01:00
|
|
|
xmlWriter.newline()
|
2013-11-27 03:19:32 -05:00
|
|
|
def xmlRead(self, name, attrs, content, parent):
|
2013-11-28 07:10:53 -05:00
|
|
|
return tobytes(attrs["value"], encoding=("latin1"))
|
2002-09-09 14:18:39 +00:00
|
|
|
|
|
|
|
|
2002-05-24 09:58:04 +00:00
|
|
|
def parseNum(s):
|
|
|
|
try:
|
|
|
|
value = int(s)
|
|
|
|
except:
|
|
|
|
value = float(s)
|
|
|
|
return value
|
|
|
|
|
|
|
|
class NumberConverter(SimpleConverter):
|
2013-11-27 03:19:32 -05:00
|
|
|
def xmlRead(self, name, attrs, content, parent):
|
2002-05-24 09:58:04 +00:00
|
|
|
return parseNum(attrs["value"])
|
|
|
|
|
|
|
|
class ArrayConverter(SimpleConverter):
|
2002-07-23 16:42:11 +00:00
|
|
|
def xmlWrite(self, xmlWriter, name, value, progress):
|
2013-11-27 04:38:16 -05:00
|
|
|
value = " ".join(map(str, value))
|
|
|
|
xmlWriter.simpletag(name, value=value)
|
2002-05-24 09:58:04 +00:00
|
|
|
xmlWriter.newline()
|
2013-11-27 03:19:32 -05:00
|
|
|
def xmlRead(self, name, attrs, content, parent):
|
2002-05-24 09:58:04 +00:00
|
|
|
values = attrs["value"].split()
|
2013-11-27 04:38:16 -05:00
|
|
|
return [parseNum(value) for value in values]
|
2002-05-24 09:58:04 +00:00
|
|
|
|
|
|
|
class TableConverter(SimpleConverter):
|
2002-07-23 16:42:11 +00:00
|
|
|
def xmlWrite(self, xmlWriter, name, value, progress):
|
2002-05-17 18:36:07 +00:00
|
|
|
xmlWriter.begintag(name)
|
|
|
|
xmlWriter.newline()
|
2002-07-23 16:42:11 +00:00
|
|
|
value.toXML(xmlWriter, progress)
|
2002-05-17 18:36:07 +00:00
|
|
|
xmlWriter.endtag(name)
|
|
|
|
xmlWriter.newline()
|
2013-11-27 03:19:32 -05:00
|
|
|
def xmlRead(self, name, attrs, content, parent):
|
2002-05-24 09:58:04 +00:00
|
|
|
ob = self.getClass()()
|
|
|
|
for element in content:
|
2008-03-07 19:56:17 +00:00
|
|
|
if isinstance(element, basestring):
|
2002-05-24 09:58:04 +00:00
|
|
|
continue
|
2013-11-27 03:19:32 -05:00
|
|
|
name, attrs, content = element
|
|
|
|
ob.fromXML(name, attrs, content)
|
2002-05-24 09:58:04 +00:00
|
|
|
return ob
|
2002-05-17 18:36:07 +00:00
|
|
|
|
2002-05-24 09:58:04 +00:00
|
|
|
class PrivateDictConverter(TableConverter):
|
|
|
|
def getClass(self):
|
|
|
|
return PrivateDict
|
2002-05-16 18:17:32 +00:00
|
|
|
def read(self, parent, value):
|
|
|
|
size, offset = value
|
|
|
|
file = parent.file
|
2002-05-24 09:58:04 +00:00
|
|
|
priv = PrivateDict(parent.strings, file, offset)
|
2002-05-16 18:17:32 +00:00
|
|
|
file.seek(offset)
|
|
|
|
data = file.read(size)
|
2013-12-04 01:15:46 -05:00
|
|
|
assert len(data) == size
|
2002-05-24 09:58:04 +00:00
|
|
|
priv.decompile(data)
|
|
|
|
return priv
|
2002-05-23 21:50:36 +00:00
|
|
|
def write(self, parent, value):
|
|
|
|
return (0, 0) # dummy value
|
2002-05-16 18:17:32 +00:00
|
|
|
|
2002-05-24 09:58:04 +00:00
|
|
|
class SubrsConverter(TableConverter):
|
|
|
|
def getClass(self):
|
|
|
|
return SubrsIndex
|
2002-05-16 18:17:32 +00:00
|
|
|
def read(self, parent, value):
|
|
|
|
file = parent.file
|
|
|
|
file.seek(parent.offset + value) # Offset(self)
|
2002-05-24 09:58:04 +00:00
|
|
|
return SubrsIndex(file)
|
2002-05-23 21:50:36 +00:00
|
|
|
def write(self, parent, value):
|
|
|
|
return 0 # dummy value
|
2002-05-16 18:17:32 +00:00
|
|
|
|
2002-05-24 09:58:04 +00:00
|
|
|
class CharStringsConverter(TableConverter):
|
2002-05-16 18:17:32 +00:00
|
|
|
def read(self, parent, value):
|
|
|
|
file = parent.file
|
2002-05-17 07:06:32 +00:00
|
|
|
charset = parent.charset
|
2002-05-17 18:36:07 +00:00
|
|
|
globalSubrs = parent.GlobalSubrs
|
|
|
|
if hasattr(parent, "ROS"):
|
|
|
|
fdSelect, fdArray = parent.FDSelect, parent.FDArray
|
|
|
|
private = None
|
|
|
|
else:
|
|
|
|
fdSelect, fdArray = None, None
|
|
|
|
private = parent.Private
|
2002-05-16 18:17:32 +00:00
|
|
|
file.seek(value) # Offset(0)
|
2002-05-17 18:36:07 +00:00
|
|
|
return CharStrings(file, charset, globalSubrs, private, fdSelect, fdArray)
|
2002-05-23 21:50:36 +00:00
|
|
|
def write(self, parent, value):
|
|
|
|
return 0 # dummy value
|
2013-11-27 03:19:32 -05:00
|
|
|
def xmlRead(self, name, attrs, content, parent):
|
2003-08-22 19:53:32 +00:00
|
|
|
if hasattr(parent, "ROS"):
|
2015-04-26 02:01:01 -04:00
|
|
|
# if it is a CID-keyed font, then the private Dict is extracted from the parent.FDArray
|
2003-08-22 19:53:32 +00:00
|
|
|
private, fdSelect, fdArray = None, parent.FDSelect, parent.FDArray
|
|
|
|
else:
|
2015-04-26 02:01:01 -04:00
|
|
|
# if it is a name-keyed font, then the private dict is in the top dict, and there is no fdArray.
|
2003-08-22 19:53:32 +00:00
|
|
|
private, fdSelect, fdArray = parent.Private, None, None
|
|
|
|
charStrings = CharStrings(None, None, parent.GlobalSubrs, private, fdSelect, fdArray)
|
2013-11-27 03:19:32 -05:00
|
|
|
charStrings.fromXML(name, attrs, content)
|
2002-05-24 09:58:04 +00:00
|
|
|
return charStrings
|
2002-05-16 18:17:32 +00:00
|
|
|
|
2013-11-28 14:26:58 -05:00
|
|
|
class CharsetConverter(object):
|
2002-05-16 18:17:32 +00:00
|
|
|
def read(self, parent, value):
|
|
|
|
isCID = hasattr(parent, "ROS")
|
|
|
|
if value > 2:
|
|
|
|
numGlyphs = parent.numGlyphs
|
|
|
|
file = parent.file
|
|
|
|
file.seek(value)
|
2016-01-24 14:50:57 +00:00
|
|
|
log.log(DEBUG, "loading charset at %s", value)
|
2002-05-17 18:36:07 +00:00
|
|
|
format = readCard8(file)
|
2002-05-16 18:17:32 +00:00
|
|
|
if format == 0:
|
2006-10-21 13:41:18 +00:00
|
|
|
charset = parseCharset0(numGlyphs, file, parent.strings, isCID)
|
2002-05-16 18:17:32 +00:00
|
|
|
elif format == 1 or format == 2:
|
|
|
|
charset = parseCharset(numGlyphs, file, parent.strings, isCID, format)
|
|
|
|
else:
|
|
|
|
raise NotImplementedError
|
|
|
|
assert len(charset) == numGlyphs
|
2016-01-24 14:50:57 +00:00
|
|
|
log.log(DEBUG, " charset end at %s", file.tell())
|
2006-10-21 13:41:18 +00:00
|
|
|
else: # offset == 0 -> no charset data.
|
2015-04-26 02:01:01 -04:00
|
|
|
if isCID or "CharStrings" not in parent.rawDict:
|
2006-10-21 13:41:18 +00:00
|
|
|
assert value == 0 # We get here only when processing fontDicts from the FDArray of CFF-CID fonts. Only the real topDict references the chrset.
|
2002-05-16 18:17:32 +00:00
|
|
|
charset = None
|
|
|
|
elif value == 0:
|
2006-10-21 13:41:18 +00:00
|
|
|
charset = cffISOAdobeStrings
|
2002-05-16 18:17:32 +00:00
|
|
|
elif value == 1:
|
2006-10-21 13:41:18 +00:00
|
|
|
charset = cffIExpertStrings
|
2002-05-16 18:17:32 +00:00
|
|
|
elif value == 2:
|
2006-10-21 13:41:18 +00:00
|
|
|
charset = cffExpertSubsetStrings
|
2002-05-16 18:17:32 +00:00
|
|
|
return charset
|
2006-10-21 13:41:18 +00:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def write(self, parent, value):
|
|
|
|
return 0 # dummy value
|
2002-07-23 16:42:11 +00:00
|
|
|
def xmlWrite(self, xmlWriter, name, value, progress):
|
2002-05-24 09:58:04 +00:00
|
|
|
# XXX only write charset when not in OT/TTX context, where we
|
|
|
|
# dump charset as a separate "GlyphOrder" table.
|
|
|
|
##xmlWriter.simpletag("charset")
|
|
|
|
xmlWriter.comment("charset is dumped separately as the 'GlyphOrder' element")
|
2002-05-16 18:17:32 +00:00
|
|
|
xmlWriter.newline()
|
2013-11-27 03:19:32 -05:00
|
|
|
def xmlRead(self, name, attrs, content, parent):
|
2002-05-24 09:58:04 +00:00
|
|
|
if 0:
|
|
|
|
return safeEval(attrs["value"])
|
1999-12-16 21:34:53 +00:00
|
|
|
|
|
|
|
|
2013-11-28 14:26:58 -05:00
|
|
|
class CharsetCompiler(object):
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def __init__(self, strings, charset, parent):
|
|
|
|
assert charset[0] == '.notdef'
|
2003-08-22 19:53:32 +00:00
|
|
|
isCID = hasattr(parent.dictObj, "ROS")
|
|
|
|
data0 = packCharset0(charset, isCID, strings)
|
|
|
|
data = packCharset(charset, isCID, strings)
|
2002-05-24 10:35:13 +00:00
|
|
|
if len(data) < len(data0):
|
|
|
|
self.data = data
|
|
|
|
else:
|
|
|
|
self.data = data0
|
2002-05-23 21:50:36 +00:00
|
|
|
self.parent = parent
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-24 09:58:04 +00:00
|
|
|
def setPos(self, pos, endPos):
|
2002-05-23 21:50:36 +00:00
|
|
|
self.parent.rawDict["charset"] = pos
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def getDataLength(self):
|
|
|
|
return len(self.data)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def toFile(self, file):
|
|
|
|
file.write(self.data)
|
|
|
|
|
|
|
|
|
2003-08-22 19:53:32 +00:00
|
|
|
def getCIDfromName(name, strings):
|
|
|
|
return int(name[3:])
|
|
|
|
|
|
|
|
def getSIDfromName(name, strings):
|
|
|
|
return strings.getSID(name)
|
|
|
|
|
|
|
|
def packCharset0(charset, isCID, strings):
|
2013-12-04 01:15:46 -05:00
|
|
|
fmt = 0
|
|
|
|
data = [packCard8(fmt)]
|
2003-08-22 19:53:32 +00:00
|
|
|
if isCID:
|
|
|
|
getNameID = getCIDfromName
|
|
|
|
else:
|
|
|
|
getNameID = getSIDfromName
|
|
|
|
|
2002-05-24 10:35:13 +00:00
|
|
|
for name in charset[1:]:
|
2003-08-22 19:53:32 +00:00
|
|
|
data.append(packCard16(getNameID(name,strings)))
|
2013-11-27 21:17:35 -05:00
|
|
|
return bytesjoin(data)
|
2002-05-24 10:35:13 +00:00
|
|
|
|
2003-08-22 19:53:32 +00:00
|
|
|
|
|
|
|
def packCharset(charset, isCID, strings):
|
2013-12-04 01:15:46 -05:00
|
|
|
fmt = 1
|
2002-05-24 10:35:13 +00:00
|
|
|
ranges = []
|
|
|
|
first = None
|
|
|
|
end = 0
|
2003-08-22 19:53:32 +00:00
|
|
|
if isCID:
|
|
|
|
getNameID = getCIDfromName
|
|
|
|
else:
|
|
|
|
getNameID = getSIDfromName
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-24 10:35:13 +00:00
|
|
|
for name in charset[1:]:
|
2003-08-22 19:53:32 +00:00
|
|
|
SID = getNameID(name, strings)
|
2002-05-24 10:35:13 +00:00
|
|
|
if first is None:
|
|
|
|
first = SID
|
2013-11-27 02:40:30 -05:00
|
|
|
elif end + 1 != SID:
|
2002-05-24 10:35:13 +00:00
|
|
|
nLeft = end - first
|
|
|
|
if nLeft > 255:
|
2013-12-04 01:15:46 -05:00
|
|
|
fmt = 2
|
2002-05-24 10:35:13 +00:00
|
|
|
ranges.append((first, nLeft))
|
|
|
|
first = SID
|
|
|
|
end = SID
|
2014-07-09 14:30:06 -04:00
|
|
|
if end:
|
|
|
|
nLeft = end - first
|
|
|
|
if nLeft > 255:
|
|
|
|
fmt = 2
|
|
|
|
ranges.append((first, nLeft))
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2013-12-04 01:15:46 -05:00
|
|
|
data = [packCard8(fmt)]
|
|
|
|
if fmt == 1:
|
2002-05-24 10:35:13 +00:00
|
|
|
nLeftFunc = packCard8
|
|
|
|
else:
|
|
|
|
nLeftFunc = packCard16
|
|
|
|
for first, nLeft in ranges:
|
|
|
|
data.append(packCard16(first) + nLeftFunc(nLeft))
|
2013-11-27 21:17:35 -05:00
|
|
|
return bytesjoin(data)
|
2002-05-24 10:35:13 +00:00
|
|
|
|
2006-10-21 13:41:18 +00:00
|
|
|
def parseCharset0(numGlyphs, file, strings, isCID):
|
2002-05-23 21:50:36 +00:00
|
|
|
charset = [".notdef"]
|
2006-10-21 13:41:18 +00:00
|
|
|
if isCID:
|
|
|
|
for i in range(numGlyphs - 1):
|
|
|
|
CID = readCard16(file)
|
2013-11-27 05:47:34 -05:00
|
|
|
charset.append("cid" + str(CID).zfill(5))
|
2006-10-21 13:41:18 +00:00
|
|
|
else:
|
|
|
|
for i in range(numGlyphs - 1):
|
|
|
|
SID = readCard16(file)
|
|
|
|
charset.append(strings[SID])
|
2002-05-23 21:50:36 +00:00
|
|
|
return charset
|
|
|
|
|
2013-12-04 01:15:46 -05:00
|
|
|
def parseCharset(numGlyphs, file, strings, isCID, fmt):
|
2002-05-13 11:25:17 +00:00
|
|
|
charset = ['.notdef']
|
1999-12-16 21:34:53 +00:00
|
|
|
count = 1
|
2013-12-04 01:15:46 -05:00
|
|
|
if fmt == 1:
|
2002-05-17 18:36:07 +00:00
|
|
|
nLeftFunc = readCard8
|
2002-05-16 18:17:32 +00:00
|
|
|
else:
|
2002-05-17 18:36:07 +00:00
|
|
|
nLeftFunc = readCard16
|
2002-05-16 18:17:32 +00:00
|
|
|
while count < numGlyphs:
|
2002-05-17 18:36:07 +00:00
|
|
|
first = readCard16(file)
|
2002-05-16 18:17:32 +00:00
|
|
|
nLeft = nLeftFunc(file)
|
2002-05-13 11:25:17 +00:00
|
|
|
if isCID:
|
|
|
|
for CID in range(first, first+nLeft+1):
|
2013-11-27 05:47:34 -05:00
|
|
|
charset.append("cid" + str(CID).zfill(5))
|
2002-05-13 11:25:17 +00:00
|
|
|
else:
|
|
|
|
for SID in range(first, first+nLeft+1):
|
|
|
|
charset.append(strings[SID])
|
1999-12-16 21:34:53 +00:00
|
|
|
count = count + nLeft + 1
|
2002-05-13 11:25:17 +00:00
|
|
|
return charset
|
1999-12-16 21:34:53 +00:00
|
|
|
|
|
|
|
|
2013-11-28 14:26:58 -05:00
|
|
|
class EncodingCompiler(object):
|
2003-01-03 20:56:01 +00:00
|
|
|
|
|
|
|
def __init__(self, strings, encoding, parent):
|
2008-03-07 19:56:17 +00:00
|
|
|
assert not isinstance(encoding, basestring)
|
2003-01-03 20:56:01 +00:00
|
|
|
data0 = packEncoding0(parent.dictObj.charset, encoding, parent.strings)
|
|
|
|
data1 = packEncoding1(parent.dictObj.charset, encoding, parent.strings)
|
|
|
|
if len(data0) < len(data1):
|
|
|
|
self.data = data0
|
|
|
|
else:
|
|
|
|
self.data = data1
|
|
|
|
self.parent = parent
|
|
|
|
|
|
|
|
def setPos(self, pos, endPos):
|
|
|
|
self.parent.rawDict["Encoding"] = pos
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2003-01-03 20:56:01 +00:00
|
|
|
def getDataLength(self):
|
|
|
|
return len(self.data)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2003-01-03 20:56:01 +00:00
|
|
|
def toFile(self, file):
|
|
|
|
file.write(self.data)
|
|
|
|
|
|
|
|
|
|
|
|
class EncodingConverter(SimpleConverter):
|
|
|
|
|
|
|
|
def read(self, parent, value):
|
|
|
|
if value == 0:
|
|
|
|
return "StandardEncoding"
|
|
|
|
elif value == 1:
|
|
|
|
return "ExpertEncoding"
|
|
|
|
else:
|
|
|
|
assert value > 1
|
|
|
|
file = parent.file
|
|
|
|
file.seek(value)
|
2016-01-24 14:50:57 +00:00
|
|
|
log.log(DEBUG, "loading Encoding at %s", value)
|
2013-12-04 01:15:46 -05:00
|
|
|
fmt = readCard8(file)
|
|
|
|
haveSupplement = fmt & 0x80
|
2003-01-03 20:56:01 +00:00
|
|
|
if haveSupplement:
|
2013-11-27 02:42:28 -05:00
|
|
|
raise NotImplementedError("Encoding supplements are not yet supported")
|
2013-12-04 01:15:46 -05:00
|
|
|
fmt = fmt & 0x7f
|
|
|
|
if fmt == 0:
|
2003-01-03 20:56:01 +00:00
|
|
|
encoding = parseEncoding0(parent.charset, file, haveSupplement,
|
|
|
|
parent.strings)
|
2013-12-04 01:15:46 -05:00
|
|
|
elif fmt == 1:
|
2003-01-03 20:56:01 +00:00
|
|
|
encoding = parseEncoding1(parent.charset, file, haveSupplement,
|
|
|
|
parent.strings)
|
|
|
|
return encoding
|
|
|
|
|
|
|
|
def write(self, parent, value):
|
|
|
|
if value == "StandardEncoding":
|
|
|
|
return 0
|
|
|
|
elif value == "ExpertEncoding":
|
|
|
|
return 1
|
|
|
|
return 0 # dummy value
|
|
|
|
|
|
|
|
def xmlWrite(self, xmlWriter, name, value, progress):
|
|
|
|
if value in ("StandardEncoding", "ExpertEncoding"):
|
|
|
|
xmlWriter.simpletag(name, name=value)
|
|
|
|
xmlWriter.newline()
|
|
|
|
return
|
|
|
|
xmlWriter.begintag(name)
|
|
|
|
xmlWriter.newline()
|
|
|
|
for code in range(len(value)):
|
|
|
|
glyphName = value[code]
|
|
|
|
if glyphName != ".notdef":
|
|
|
|
xmlWriter.simpletag("map", code=hex(code), name=glyphName)
|
|
|
|
xmlWriter.newline()
|
|
|
|
xmlWriter.endtag(name)
|
|
|
|
xmlWriter.newline()
|
|
|
|
|
2013-11-27 03:19:32 -05:00
|
|
|
def xmlRead(self, name, attrs, content, parent):
|
2013-11-27 02:33:03 -05:00
|
|
|
if "name" in attrs:
|
2003-01-03 20:56:01 +00:00
|
|
|
return attrs["name"]
|
|
|
|
encoding = [".notdef"] * 256
|
|
|
|
for element in content:
|
2008-03-07 19:56:17 +00:00
|
|
|
if isinstance(element, basestring):
|
2003-01-03 20:56:01 +00:00
|
|
|
continue
|
|
|
|
name, attrs, content = element
|
|
|
|
code = safeEval(attrs["code"])
|
|
|
|
glyphName = attrs["name"]
|
|
|
|
encoding[code] = glyphName
|
|
|
|
return encoding
|
|
|
|
|
|
|
|
|
|
|
|
def parseEncoding0(charset, file, haveSupplement, strings):
|
|
|
|
nCodes = readCard8(file)
|
|
|
|
encoding = [".notdef"] * 256
|
|
|
|
for glyphID in range(1, nCodes + 1):
|
|
|
|
code = readCard8(file)
|
|
|
|
if code != 0:
|
|
|
|
encoding[code] = charset[glyphID]
|
|
|
|
return encoding
|
|
|
|
|
|
|
|
def parseEncoding1(charset, file, haveSupplement, strings):
|
|
|
|
nRanges = readCard8(file)
|
|
|
|
encoding = [".notdef"] * 256
|
|
|
|
glyphID = 1
|
|
|
|
for i in range(nRanges):
|
|
|
|
code = readCard8(file)
|
|
|
|
nLeft = readCard8(file)
|
|
|
|
for glyphID in range(glyphID, glyphID + nLeft + 1):
|
|
|
|
encoding[code] = charset[glyphID]
|
|
|
|
code = code + 1
|
|
|
|
glyphID = glyphID + 1
|
|
|
|
return encoding
|
|
|
|
|
|
|
|
def packEncoding0(charset, encoding, strings):
|
2013-12-04 01:15:46 -05:00
|
|
|
fmt = 0
|
2003-01-03 20:56:01 +00:00
|
|
|
m = {}
|
|
|
|
for code in range(len(encoding)):
|
|
|
|
name = encoding[code]
|
|
|
|
if name != ".notdef":
|
|
|
|
m[name] = code
|
|
|
|
codes = []
|
|
|
|
for name in charset[1:]:
|
|
|
|
code = m.get(name)
|
|
|
|
codes.append(code)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2003-01-03 20:56:01 +00:00
|
|
|
while codes and codes[-1] is None:
|
|
|
|
codes.pop()
|
|
|
|
|
2013-12-04 01:15:46 -05:00
|
|
|
data = [packCard8(fmt), packCard8(len(codes))]
|
2003-01-03 20:56:01 +00:00
|
|
|
for code in codes:
|
|
|
|
if code is None:
|
|
|
|
code = 0
|
|
|
|
data.append(packCard8(code))
|
2013-11-27 21:17:35 -05:00
|
|
|
return bytesjoin(data)
|
2003-01-03 20:56:01 +00:00
|
|
|
|
|
|
|
def packEncoding1(charset, encoding, strings):
|
2013-12-04 01:15:46 -05:00
|
|
|
fmt = 1
|
2003-01-03 20:56:01 +00:00
|
|
|
m = {}
|
|
|
|
for code in range(len(encoding)):
|
|
|
|
name = encoding[code]
|
|
|
|
if name != ".notdef":
|
|
|
|
m[name] = code
|
|
|
|
ranges = []
|
|
|
|
first = None
|
|
|
|
end = 0
|
|
|
|
for name in charset[1:]:
|
|
|
|
code = m.get(name, -1)
|
|
|
|
if first is None:
|
|
|
|
first = code
|
2013-11-27 02:40:30 -05:00
|
|
|
elif end + 1 != code:
|
2003-01-03 20:56:01 +00:00
|
|
|
nLeft = end - first
|
|
|
|
ranges.append((first, nLeft))
|
|
|
|
first = code
|
|
|
|
end = code
|
|
|
|
nLeft = end - first
|
|
|
|
ranges.append((first, nLeft))
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2003-01-03 20:56:01 +00:00
|
|
|
# remove unencoded glyphs at the end.
|
|
|
|
while ranges and ranges[-1][0] == -1:
|
|
|
|
ranges.pop()
|
|
|
|
|
2013-12-04 01:15:46 -05:00
|
|
|
data = [packCard8(fmt), packCard8(len(ranges))]
|
2003-01-03 20:56:01 +00:00
|
|
|
for first, nLeft in ranges:
|
|
|
|
if first == -1: # unencoded
|
|
|
|
first = 0
|
|
|
|
data.append(packCard8(first) + packCard8(nLeft))
|
2013-11-27 21:17:35 -05:00
|
|
|
return bytesjoin(data)
|
2003-01-03 20:56:01 +00:00
|
|
|
|
|
|
|
|
2002-05-24 09:58:04 +00:00
|
|
|
class FDArrayConverter(TableConverter):
|
2003-08-22 19:53:32 +00:00
|
|
|
|
2002-05-17 18:36:07 +00:00
|
|
|
def read(self, parent, value):
|
|
|
|
file = parent.file
|
|
|
|
file.seek(value)
|
2003-08-22 19:53:32 +00:00
|
|
|
fdArray = FDArrayIndex(file)
|
2002-05-17 18:36:07 +00:00
|
|
|
fdArray.strings = parent.strings
|
|
|
|
fdArray.GlobalSubrs = parent.GlobalSubrs
|
|
|
|
return fdArray
|
|
|
|
|
2003-08-22 19:53:32 +00:00
|
|
|
def write(self, parent, value):
|
|
|
|
return 0 # dummy value
|
|
|
|
|
2013-11-27 03:19:32 -05:00
|
|
|
def xmlRead(self, name, attrs, content, parent):
|
2003-08-22 19:53:32 +00:00
|
|
|
fdArray = FDArrayIndex()
|
|
|
|
for element in content:
|
2008-03-07 19:56:17 +00:00
|
|
|
if isinstance(element, basestring):
|
2003-08-22 19:53:32 +00:00
|
|
|
continue
|
2013-11-27 03:19:32 -05:00
|
|
|
name, attrs, content = element
|
|
|
|
fdArray.fromXML(name, attrs, content)
|
2003-08-22 19:53:32 +00:00
|
|
|
return fdArray
|
|
|
|
|
2002-05-17 18:36:07 +00:00
|
|
|
|
2013-11-28 14:26:58 -05:00
|
|
|
class FDSelectConverter(object):
|
2003-08-22 19:53:32 +00:00
|
|
|
|
2002-05-17 18:36:07 +00:00
|
|
|
def read(self, parent, value):
|
|
|
|
file = parent.file
|
|
|
|
file.seek(value)
|
2003-08-22 19:53:32 +00:00
|
|
|
fdSelect = FDSelect(file, parent.numGlyphs)
|
|
|
|
return fdSelect
|
|
|
|
|
|
|
|
def write(self, parent, value):
|
|
|
|
return 0 # dummy value
|
|
|
|
|
|
|
|
# The FDSelect glyph data is written out to XML in the charstring keys,
|
|
|
|
# so we write out only the format selector
|
|
|
|
def xmlWrite(self, xmlWriter, name, value, progress):
|
|
|
|
xmlWriter.simpletag(name, [('format', value.format)])
|
|
|
|
xmlWriter.newline()
|
|
|
|
|
2013-11-27 03:19:32 -05:00
|
|
|
def xmlRead(self, name, attrs, content, parent):
|
2013-12-04 01:15:46 -05:00
|
|
|
fmt = safeEval(attrs["format"])
|
2003-08-22 19:53:32 +00:00
|
|
|
file = None
|
|
|
|
numGlyphs = None
|
2013-12-04 01:15:46 -05:00
|
|
|
fdSelect = FDSelect(file, numGlyphs, fmt)
|
2003-08-22 19:53:32 +00:00
|
|
|
return fdSelect
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2003-08-22 19:53:32 +00:00
|
|
|
|
|
|
|
def packFDSelect0(fdSelectArray):
|
2013-12-04 01:15:46 -05:00
|
|
|
fmt = 0
|
|
|
|
data = [packCard8(fmt)]
|
2003-08-22 19:53:32 +00:00
|
|
|
for index in fdSelectArray:
|
|
|
|
data.append(packCard8(index))
|
2013-11-27 21:17:35 -05:00
|
|
|
return bytesjoin(data)
|
2003-08-22 19:53:32 +00:00
|
|
|
|
|
|
|
|
|
|
|
def packFDSelect3(fdSelectArray):
|
2013-12-04 01:15:46 -05:00
|
|
|
fmt = 3
|
2003-08-22 19:53:32 +00:00
|
|
|
fdRanges = []
|
|
|
|
first = None
|
|
|
|
end = 0
|
|
|
|
lenArray = len(fdSelectArray)
|
|
|
|
lastFDIndex = -1
|
|
|
|
for i in range(lenArray):
|
|
|
|
fdIndex = fdSelectArray[i]
|
|
|
|
if lastFDIndex != fdIndex:
|
|
|
|
fdRanges.append([i, fdIndex])
|
|
|
|
lastFDIndex = fdIndex
|
|
|
|
sentinelGID = i + 1
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2013-12-04 01:15:46 -05:00
|
|
|
data = [packCard8(fmt)]
|
2003-08-22 19:53:32 +00:00
|
|
|
data.append(packCard16( len(fdRanges) ))
|
|
|
|
for fdRange in fdRanges:
|
|
|
|
data.append(packCard16(fdRange[0]))
|
|
|
|
data.append(packCard8(fdRange[1]))
|
|
|
|
data.append(packCard16(sentinelGID))
|
2013-11-27 21:17:35 -05:00
|
|
|
return bytesjoin(data)
|
2003-08-22 19:53:32 +00:00
|
|
|
|
|
|
|
|
2013-11-28 14:26:58 -05:00
|
|
|
class FDSelectCompiler(object):
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2003-08-22 19:53:32 +00:00
|
|
|
def __init__(self, fdSelect, parent):
|
2013-12-04 01:15:46 -05:00
|
|
|
fmt = fdSelect.format
|
2003-08-22 19:53:32 +00:00
|
|
|
fdSelectArray = fdSelect.gidArray
|
2013-12-04 01:15:46 -05:00
|
|
|
if fmt == 0:
|
2003-08-22 19:53:32 +00:00
|
|
|
self.data = packFDSelect0(fdSelectArray)
|
2013-12-04 01:15:46 -05:00
|
|
|
elif fmt == 3:
|
2003-08-22 19:53:32 +00:00
|
|
|
self.data = packFDSelect3(fdSelectArray)
|
2002-05-17 18:36:07 +00:00
|
|
|
else:
|
2003-08-22 19:53:32 +00:00
|
|
|
# choose smaller of the two formats
|
|
|
|
data0 = packFDSelect0(fdSelectArray)
|
|
|
|
data3 = packFDSelect3(fdSelectArray)
|
|
|
|
if len(data0) < len(data3):
|
|
|
|
self.data = data0
|
|
|
|
fdSelect.format = 0
|
|
|
|
else:
|
|
|
|
self.data = data3
|
|
|
|
fdSelect.format = 3
|
|
|
|
|
|
|
|
self.parent = parent
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2003-08-22 19:53:32 +00:00
|
|
|
def setPos(self, pos, endPos):
|
|
|
|
self.parent.rawDict["FDSelect"] = pos
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2003-08-22 19:53:32 +00:00
|
|
|
def getDataLength(self):
|
|
|
|
return len(self.data)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2003-08-22 19:53:32 +00:00
|
|
|
def toFile(self, file):
|
|
|
|
file.write(self.data)
|
2002-05-17 18:36:07 +00:00
|
|
|
|
|
|
|
|
2002-05-24 09:58:04 +00:00
|
|
|
class ROSConverter(SimpleConverter):
|
2003-08-22 19:53:32 +00:00
|
|
|
|
2002-07-23 16:42:11 +00:00
|
|
|
def xmlWrite(self, xmlWriter, name, value, progress):
|
2002-05-17 19:58:49 +00:00
|
|
|
registry, order, supplement = value
|
2013-11-28 07:10:53 -05:00
|
|
|
xmlWriter.simpletag(name, [('Registry', tostr(registry)), ('Order', tostr(order)),
|
2002-05-23 21:50:36 +00:00
|
|
|
('Supplement', supplement)])
|
2002-05-18 20:07:01 +00:00
|
|
|
xmlWriter.newline()
|
2002-05-17 19:58:49 +00:00
|
|
|
|
2013-11-27 03:19:32 -05:00
|
|
|
def xmlRead(self, name, attrs, content, parent):
|
2003-08-22 19:53:32 +00:00
|
|
|
return (attrs['Registry'], attrs['Order'], safeEval(attrs['Supplement']))
|
|
|
|
|
|
|
|
|
2002-05-16 18:17:32 +00:00
|
|
|
topDictOperators = [
|
2015-04-26 00:54:30 -04:00
|
|
|
# opcode name argument type default converter
|
|
|
|
((12, 30), 'ROS', ('SID', 'SID', 'number'), None, ROSConverter()),
|
|
|
|
((12, 20), 'SyntheticBase', 'number', None, None),
|
|
|
|
(0, 'version', 'SID', None, None),
|
|
|
|
(1, 'Notice', 'SID', None, Latin1Converter()),
|
|
|
|
((12, 0), 'Copyright', 'SID', None, Latin1Converter()),
|
|
|
|
(2, 'FullName', 'SID', None, None),
|
|
|
|
((12, 38), 'FontName', 'SID', None, None),
|
|
|
|
(3, 'FamilyName', 'SID', None, None),
|
|
|
|
(4, 'Weight', 'SID', None, None),
|
|
|
|
((12, 1), 'isFixedPitch', 'number', 0, None),
|
|
|
|
((12, 2), 'ItalicAngle', 'number', 0, None),
|
|
|
|
((12, 3), 'UnderlinePosition', 'number', None, None),
|
|
|
|
((12, 4), 'UnderlineThickness', 'number', 50, None),
|
|
|
|
((12, 5), 'PaintType', 'number', 0, None),
|
|
|
|
((12, 6), 'CharstringType', 'number', 2, None),
|
|
|
|
((12, 7), 'FontMatrix', 'array', [0.001, 0, 0, 0.001, 0, 0], None),
|
|
|
|
(13, 'UniqueID', 'number', None, None),
|
|
|
|
(5, 'FontBBox', 'array', [0, 0, 0, 0], None),
|
|
|
|
((12, 8), 'StrokeWidth', 'number', 0, None),
|
|
|
|
(14, 'XUID', 'array', None, None),
|
|
|
|
((12, 21), 'PostScript', 'SID', None, None),
|
|
|
|
((12, 22), 'BaseFontName', 'SID', None, None),
|
|
|
|
((12, 23), 'BaseFontBlend', 'delta', None, None),
|
|
|
|
((12, 31), 'CIDFontVersion', 'number', 0, None),
|
|
|
|
((12, 32), 'CIDFontRevision', 'number', 0, None),
|
|
|
|
((12, 33), 'CIDFontType', 'number', 0, None),
|
|
|
|
((12, 34), 'CIDCount', 'number', 8720, None),
|
|
|
|
(15, 'charset', 'number', 0, CharsetConverter()),
|
|
|
|
((12, 35), 'UIDBase', 'number', None, None),
|
|
|
|
(16, 'Encoding', 'number', 0, EncodingConverter()),
|
|
|
|
(18, 'Private', ('number', 'number'), None, PrivateDictConverter()),
|
|
|
|
((12, 37), 'FDSelect', 'number', None, FDSelectConverter()),
|
|
|
|
((12, 36), 'FDArray', 'number', None, FDArrayConverter()),
|
|
|
|
(17, 'CharStrings', 'number', None, CharStringsConverter()),
|
2002-05-16 18:17:32 +00:00
|
|
|
]
|
2002-05-15 07:41:30 +00:00
|
|
|
|
2003-08-22 19:53:32 +00:00
|
|
|
# Note! FDSelect and FDArray must both preceed CharStrings in the output XML build order,
|
|
|
|
# in order for the font to compile back from xml.
|
|
|
|
|
|
|
|
|
2002-05-16 18:17:32 +00:00
|
|
|
privateDictOperators = [
|
2015-04-26 00:54:30 -04:00
|
|
|
# opcode name argument type default converter
|
|
|
|
(6, 'BlueValues', 'delta', None, None),
|
|
|
|
(7, 'OtherBlues', 'delta', None, None),
|
|
|
|
(8, 'FamilyBlues', 'delta', None, None),
|
|
|
|
(9, 'FamilyOtherBlues', 'delta', None, None),
|
|
|
|
((12, 9), 'BlueScale', 'number', 0.039625, None),
|
|
|
|
((12, 10), 'BlueShift', 'number', 7, None),
|
|
|
|
((12, 11), 'BlueFuzz', 'number', 1, None),
|
|
|
|
(10, 'StdHW', 'number', None, None),
|
|
|
|
(11, 'StdVW', 'number', None, None),
|
|
|
|
((12, 12), 'StemSnapH', 'delta', None, None),
|
|
|
|
((12, 13), 'StemSnapV', 'delta', None, None),
|
|
|
|
((12, 14), 'ForceBold', 'number', 0, None),
|
|
|
|
((12, 15), 'ForceBoldThreshold', 'number', None, None), # deprecated
|
|
|
|
((12, 16), 'lenIV', 'number', None, None), # deprecated
|
|
|
|
((12, 17), 'LanguageGroup', 'number', 0, None),
|
|
|
|
((12, 18), 'ExpansionFactor', 'number', 0.06, None),
|
|
|
|
((12, 19), 'initialRandomSeed', 'number', 0, None),
|
|
|
|
(20, 'defaultWidthX', 'number', 0, None),
|
|
|
|
(21, 'nominalWidthX', 'number', 0, None),
|
|
|
|
(19, 'Subrs', 'number', None, SubrsConverter()),
|
2002-05-16 18:17:32 +00:00
|
|
|
]
|
2002-05-15 07:41:30 +00:00
|
|
|
|
2002-05-24 09:58:04 +00:00
|
|
|
def addConverters(table):
|
|
|
|
for i in range(len(table)):
|
|
|
|
op, name, arg, default, conv = table[i]
|
|
|
|
if conv is not None:
|
|
|
|
continue
|
|
|
|
if arg in ("delta", "array"):
|
|
|
|
conv = ArrayConverter()
|
|
|
|
elif arg == "number":
|
|
|
|
conv = NumberConverter()
|
|
|
|
elif arg == "SID":
|
2013-11-28 07:10:53 -05:00
|
|
|
conv = ASCIIConverter()
|
2002-05-24 09:58:04 +00:00
|
|
|
else:
|
2013-12-04 01:15:46 -05:00
|
|
|
assert False
|
2002-05-24 09:58:04 +00:00
|
|
|
table[i] = op, name, arg, default, conv
|
|
|
|
|
|
|
|
addConverters(privateDictOperators)
|
|
|
|
addConverters(topDictOperators)
|
|
|
|
|
2002-05-15 07:41:30 +00:00
|
|
|
|
2002-05-16 18:17:32 +00:00
|
|
|
class TopDictDecompiler(psCharStrings.DictDecompiler):
|
|
|
|
operators = buildOperatorDict(topDictOperators)
|
2002-05-15 07:41:30 +00:00
|
|
|
|
2002-05-14 12:22:03 +00:00
|
|
|
|
2002-05-16 18:17:32 +00:00
|
|
|
class PrivateDictDecompiler(psCharStrings.DictDecompiler):
|
|
|
|
operators = buildOperatorDict(privateDictOperators)
|
2002-05-14 12:22:03 +00:00
|
|
|
|
2002-05-16 18:17:32 +00:00
|
|
|
|
2013-11-28 14:26:58 -05:00
|
|
|
class DictCompiler(object):
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def __init__(self, dictObj, strings, parent):
|
|
|
|
assert isinstance(strings, IndexedStrings)
|
|
|
|
self.dictObj = dictObj
|
|
|
|
self.strings = strings
|
|
|
|
self.parent = parent
|
|
|
|
rawDict = {}
|
|
|
|
for name in dictObj.order:
|
|
|
|
value = getattr(dictObj, name, None)
|
|
|
|
if value is None:
|
|
|
|
continue
|
|
|
|
conv = dictObj.converters[name]
|
2002-05-24 09:58:04 +00:00
|
|
|
value = conv.write(dictObj, value)
|
2002-05-23 21:50:36 +00:00
|
|
|
if value == dictObj.defaults.get(name):
|
|
|
|
continue
|
|
|
|
rawDict[name] = value
|
|
|
|
self.rawDict = rawDict
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-24 09:58:04 +00:00
|
|
|
def setPos(self, pos, endPos):
|
2002-05-23 21:50:36 +00:00
|
|
|
pass
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def getDataLength(self):
|
2002-05-24 09:58:04 +00:00
|
|
|
return len(self.compile("getDataLength"))
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-24 09:58:04 +00:00
|
|
|
def compile(self, reason):
|
2016-01-24 14:50:57 +00:00
|
|
|
log.log(DEBUG, "-- compiling %s for %s", self.__class__.__name__, reason)
|
2002-05-23 21:50:36 +00:00
|
|
|
rawDict = self.rawDict
|
|
|
|
data = []
|
|
|
|
for name in self.dictObj.order:
|
|
|
|
value = rawDict.get(name)
|
|
|
|
if value is None:
|
|
|
|
continue
|
|
|
|
op, argType = self.opcodes[name]
|
2008-03-07 19:56:17 +00:00
|
|
|
if isinstance(argType, tuple):
|
2002-05-23 21:50:36 +00:00
|
|
|
l = len(argType)
|
|
|
|
assert len(value) == l, "value doesn't match arg type"
|
|
|
|
for i in range(l):
|
2003-08-22 19:53:32 +00:00
|
|
|
arg = argType[i]
|
2002-05-23 21:50:36 +00:00
|
|
|
v = value[i]
|
|
|
|
arghandler = getattr(self, "arg_" + arg)
|
|
|
|
data.append(arghandler(v))
|
|
|
|
else:
|
|
|
|
arghandler = getattr(self, "arg_" + argType)
|
|
|
|
data.append(arghandler(value))
|
|
|
|
data.append(op)
|
2013-11-27 21:17:35 -05:00
|
|
|
return bytesjoin(data)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def toFile(self, file):
|
2002-05-24 09:58:04 +00:00
|
|
|
file.write(self.compile("toFile"))
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def arg_number(self, num):
|
|
|
|
return encodeNumber(num)
|
|
|
|
def arg_SID(self, s):
|
|
|
|
return psCharStrings.encodeIntCFF(self.strings.getSID(s))
|
|
|
|
def arg_array(self, value):
|
|
|
|
data = []
|
|
|
|
for num in value:
|
|
|
|
data.append(encodeNumber(num))
|
2013-11-27 21:17:35 -05:00
|
|
|
return bytesjoin(data)
|
2002-05-23 21:50:36 +00:00
|
|
|
def arg_delta(self, value):
|
|
|
|
out = []
|
|
|
|
last = 0
|
|
|
|
for v in value:
|
|
|
|
out.append(v - last)
|
|
|
|
last = v
|
|
|
|
data = []
|
|
|
|
for num in out:
|
|
|
|
data.append(encodeNumber(num))
|
2013-11-27 21:17:35 -05:00
|
|
|
return bytesjoin(data)
|
2002-05-23 21:50:36 +00:00
|
|
|
|
|
|
|
|
|
|
|
def encodeNumber(num):
|
2008-03-07 19:49:25 +00:00
|
|
|
if isinstance(num, float):
|
2002-05-23 21:50:36 +00:00
|
|
|
return psCharStrings.encodeFloat(num)
|
|
|
|
else:
|
|
|
|
return psCharStrings.encodeIntCFF(num)
|
|
|
|
|
|
|
|
|
|
|
|
class TopDictCompiler(DictCompiler):
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
opcodes = buildOpcodeDict(topDictOperators)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def getChildren(self, strings):
|
|
|
|
children = []
|
2003-08-22 19:53:32 +00:00
|
|
|
if hasattr(self.dictObj, "charset") and self.dictObj.charset:
|
2002-05-23 21:50:36 +00:00
|
|
|
children.append(CharsetCompiler(strings, self.dictObj.charset, self))
|
2003-01-03 20:56:01 +00:00
|
|
|
if hasattr(self.dictObj, "Encoding"):
|
|
|
|
encoding = self.dictObj.Encoding
|
2008-03-07 19:56:17 +00:00
|
|
|
if not isinstance(encoding, basestring):
|
2003-01-03 20:56:01 +00:00
|
|
|
children.append(EncodingCompiler(strings, encoding, self))
|
2003-08-25 07:37:25 +00:00
|
|
|
if hasattr(self.dictObj, "FDSelect"):
|
2003-08-22 19:53:32 +00:00
|
|
|
# I have not yet supported merging a ttx CFF-CID font, as there are interesting
|
|
|
|
# issues about merging the FDArrays. Here I assume that
|
|
|
|
# either the font was read from XML, and teh FDSelect indices are all
|
|
|
|
# in the charstring data, or the FDSelect array is already fully defined.
|
|
|
|
fdSelect = self.dictObj.FDSelect
|
|
|
|
if len(fdSelect) == 0: # probably read in from XML; assume fdIndex in CharString data
|
|
|
|
charStrings = self.dictObj.CharStrings
|
|
|
|
for name in self.dictObj.charset:
|
|
|
|
fdSelect.append(charStrings[name].fdSelectIndex)
|
|
|
|
fdSelectComp = FDSelectCompiler(fdSelect, self)
|
|
|
|
children.append(fdSelectComp)
|
2002-05-23 21:50:36 +00:00
|
|
|
if hasattr(self.dictObj, "CharStrings"):
|
|
|
|
items = []
|
|
|
|
charStrings = self.dictObj.CharStrings
|
|
|
|
for name in self.dictObj.charset:
|
|
|
|
items.append(charStrings[name])
|
|
|
|
charStringsComp = CharStringsCompiler(items, strings, self)
|
|
|
|
children.append(charStringsComp)
|
2003-08-25 07:37:25 +00:00
|
|
|
if hasattr(self.dictObj, "FDArray"):
|
2003-08-22 19:53:32 +00:00
|
|
|
# I have not yet supported merging a ttx CFF-CID font, as there are interesting
|
|
|
|
# issues about merging the FDArrays. Here I assume that the FDArray info is correct
|
|
|
|
# and complete.
|
|
|
|
fdArrayIndexComp = self.dictObj.FDArray.getCompiler(strings, self)
|
|
|
|
children.append(fdArrayIndexComp)
|
|
|
|
children.extend(fdArrayIndexComp.getChildren(strings))
|
|
|
|
if hasattr(self.dictObj, "Private"):
|
|
|
|
privComp = self.dictObj.Private.getCompiler(strings, self)
|
|
|
|
children.append(privComp)
|
|
|
|
children.extend(privComp.getChildren(strings))
|
|
|
|
return children
|
|
|
|
|
|
|
|
|
|
|
|
class FontDictCompiler(DictCompiler):
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2003-08-22 19:53:32 +00:00
|
|
|
opcodes = buildOpcodeDict(topDictOperators)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2003-08-22 19:53:32 +00:00
|
|
|
def getChildren(self, strings):
|
|
|
|
children = []
|
2002-05-23 21:50:36 +00:00
|
|
|
if hasattr(self.dictObj, "Private"):
|
|
|
|
privComp = self.dictObj.Private.getCompiler(strings, self)
|
|
|
|
children.append(privComp)
|
|
|
|
children.extend(privComp.getChildren(strings))
|
|
|
|
return children
|
|
|
|
|
|
|
|
|
|
|
|
class PrivateDictCompiler(DictCompiler):
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
opcodes = buildOpcodeDict(privateDictOperators)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-24 09:58:04 +00:00
|
|
|
def setPos(self, pos, endPos):
|
|
|
|
size = endPos - pos
|
2002-05-23 21:50:36 +00:00
|
|
|
self.parent.rawDict["Private"] = size, pos
|
|
|
|
self.pos = pos
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def getChildren(self, strings):
|
|
|
|
children = []
|
|
|
|
if hasattr(self.dictObj, "Subrs"):
|
|
|
|
children.append(self.dictObj.Subrs.getCompiler(strings, self))
|
|
|
|
return children
|
|
|
|
|
2002-05-16 18:17:32 +00:00
|
|
|
|
2013-11-28 14:26:58 -05:00
|
|
|
class BaseDict(object):
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-24 09:58:04 +00:00
|
|
|
def __init__(self, strings=None, file=None, offset=None):
|
2002-05-16 18:17:32 +00:00
|
|
|
self.rawDict = {}
|
2016-01-24 14:50:57 +00:00
|
|
|
if offset is not None:
|
|
|
|
log.log(DEBUG, "loading %s at %s", self.__class__.__name__, offset)
|
2002-05-16 18:17:32 +00:00
|
|
|
self.file = file
|
|
|
|
self.offset = offset
|
|
|
|
self.strings = strings
|
2002-05-17 19:58:49 +00:00
|
|
|
self.skipNames = []
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-16 18:17:32 +00:00
|
|
|
def decompile(self, data):
|
2016-01-24 14:50:57 +00:00
|
|
|
log.log(DEBUG, " length %s is %d", self.__class__.__name__, len(data))
|
2002-05-23 21:50:36 +00:00
|
|
|
dec = self.decompilerClass(self.strings)
|
2002-05-16 18:17:32 +00:00
|
|
|
dec.decompile(data)
|
|
|
|
self.rawDict = dec.getDict()
|
|
|
|
self.postDecompile()
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-16 18:17:32 +00:00
|
|
|
def postDecompile(self):
|
|
|
|
pass
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def getCompiler(self, strings, parent):
|
|
|
|
return self.compilerClass(self, strings, parent)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-16 18:17:32 +00:00
|
|
|
def __getattr__(self, name):
|
|
|
|
value = self.rawDict.get(name)
|
|
|
|
if value is None:
|
|
|
|
value = self.defaults.get(name)
|
|
|
|
if value is None:
|
2013-11-27 02:42:28 -05:00
|
|
|
raise AttributeError(name)
|
2002-05-16 18:17:32 +00:00
|
|
|
conv = self.converters[name]
|
2002-05-24 09:58:04 +00:00
|
|
|
value = conv.read(self, value)
|
2002-05-16 18:17:32 +00:00
|
|
|
setattr(self, name, value)
|
|
|
|
return value
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-16 18:17:32 +00:00
|
|
|
def toXML(self, xmlWriter, progress):
|
|
|
|
for name in self.order:
|
2002-05-17 19:58:49 +00:00
|
|
|
if name in self.skipNames:
|
|
|
|
continue
|
2002-05-16 18:17:32 +00:00
|
|
|
value = getattr(self, name, None)
|
|
|
|
if value is None:
|
|
|
|
continue
|
2002-05-24 09:58:04 +00:00
|
|
|
conv = self.converters[name]
|
2002-07-23 16:42:11 +00:00
|
|
|
conv.xmlWrite(xmlWriter, name, value, progress)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2013-11-27 03:19:32 -05:00
|
|
|
def fromXML(self, name, attrs, content):
|
2002-05-24 09:58:04 +00:00
|
|
|
conv = self.converters[name]
|
2013-11-27 03:19:32 -05:00
|
|
|
value = conv.xmlRead(name, attrs, content, self)
|
2002-05-24 09:58:04 +00:00
|
|
|
setattr(self, name, value)
|
2002-05-14 12:22:03 +00:00
|
|
|
|
|
|
|
|
2002-05-16 18:17:32 +00:00
|
|
|
class TopDict(BaseDict):
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-16 18:17:32 +00:00
|
|
|
defaults = buildDefaults(topDictOperators)
|
|
|
|
converters = buildConverters(topDictOperators)
|
|
|
|
order = buildOrder(topDictOperators)
|
2002-05-23 21:50:36 +00:00
|
|
|
decompilerClass = TopDictDecompiler
|
|
|
|
compilerClass = TopDictCompiler
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-24 09:58:04 +00:00
|
|
|
def __init__(self, strings=None, file=None, offset=None, GlobalSubrs=None):
|
2002-05-16 18:38:03 +00:00
|
|
|
BaseDict.__init__(self, strings, file, offset)
|
|
|
|
self.GlobalSubrs = GlobalSubrs
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-16 18:17:32 +00:00
|
|
|
def getGlyphOrder(self):
|
|
|
|
return self.charset
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-16 18:17:32 +00:00
|
|
|
def postDecompile(self):
|
|
|
|
offset = self.rawDict.get("CharStrings")
|
|
|
|
if offset is None:
|
|
|
|
return
|
|
|
|
# get the number of glyphs beforehand.
|
|
|
|
self.file.seek(offset)
|
2002-05-17 18:36:07 +00:00
|
|
|
self.numGlyphs = readCard16(self.file)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-16 18:38:03 +00:00
|
|
|
def toXML(self, xmlWriter, progress):
|
2002-05-17 19:58:49 +00:00
|
|
|
if hasattr(self, "CharStrings"):
|
2002-07-23 16:42:11 +00:00
|
|
|
self.decompileAllCharStrings(progress)
|
2003-08-22 19:53:32 +00:00
|
|
|
if hasattr(self, "ROS"):
|
|
|
|
self.skipNames = ['Encoding']
|
2002-05-17 19:58:49 +00:00
|
|
|
if not hasattr(self, "ROS") or not hasattr(self, "CharStrings"):
|
|
|
|
# these values have default values, but I only want them to show up
|
|
|
|
# in CID fonts.
|
|
|
|
self.skipNames = ['CIDFontVersion', 'CIDFontRevision', 'CIDFontType',
|
|
|
|
'CIDCount']
|
2002-05-16 18:38:03 +00:00
|
|
|
BaseDict.toXML(self, xmlWriter, progress)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-07-23 16:42:11 +00:00
|
|
|
def decompileAllCharStrings(self, progress):
|
2002-05-24 09:58:04 +00:00
|
|
|
# XXX only when doing ttdump -i?
|
2002-07-23 16:42:11 +00:00
|
|
|
i = 0
|
2002-05-17 18:36:07 +00:00
|
|
|
for charString in self.CharStrings.values():
|
2003-08-22 19:53:32 +00:00
|
|
|
try:
|
|
|
|
charString.decompile()
|
|
|
|
except:
|
2016-01-24 14:50:57 +00:00
|
|
|
log.error("Error in charstring %s", i)
|
2003-08-22 19:53:32 +00:00
|
|
|
import sys
|
2013-12-04 01:15:46 -05:00
|
|
|
typ, value = sys.exc_info()[0:2]
|
|
|
|
raise typ(value)
|
2002-07-23 16:42:11 +00:00
|
|
|
if not i % 30 and progress:
|
|
|
|
progress.increment(0) # update
|
|
|
|
i = i + 1
|
2002-05-16 18:17:32 +00:00
|
|
|
|
|
|
|
|
2003-08-22 19:53:32 +00:00
|
|
|
class FontDict(BaseDict):
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2003-08-22 19:53:32 +00:00
|
|
|
defaults = buildDefaults(topDictOperators)
|
|
|
|
converters = buildConverters(topDictOperators)
|
|
|
|
order = buildOrder(topDictOperators)
|
|
|
|
decompilerClass = None
|
|
|
|
compilerClass = FontDictCompiler
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2003-08-22 19:53:32 +00:00
|
|
|
def __init__(self, strings=None, file=None, offset=None, GlobalSubrs=None):
|
|
|
|
BaseDict.__init__(self, strings, file, offset)
|
|
|
|
self.GlobalSubrs = GlobalSubrs
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2003-08-22 19:53:32 +00:00
|
|
|
def getGlyphOrder(self):
|
|
|
|
return self.charset
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2003-08-22 19:53:32 +00:00
|
|
|
def toXML(self, xmlWriter, progress):
|
|
|
|
self.skipNames = ['Encoding']
|
|
|
|
BaseDict.toXML(self, xmlWriter, progress)
|
|
|
|
|
|
|
|
|
2002-05-16 18:17:32 +00:00
|
|
|
class PrivateDict(BaseDict):
|
|
|
|
defaults = buildDefaults(privateDictOperators)
|
|
|
|
converters = buildConverters(privateDictOperators)
|
|
|
|
order = buildOrder(privateDictOperators)
|
2002-05-23 21:50:36 +00:00
|
|
|
decompilerClass = PrivateDictDecompiler
|
|
|
|
compilerClass = PrivateDictCompiler
|
2002-05-16 18:17:32 +00:00
|
|
|
|
|
|
|
|
2013-11-28 14:26:58 -05:00
|
|
|
class IndexedStrings(object):
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-17 07:06:32 +00:00
|
|
|
"""SID -> string mapping."""
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-17 07:06:32 +00:00
|
|
|
def __init__(self, file=None):
|
|
|
|
if file is None:
|
2002-05-14 12:22:03 +00:00
|
|
|
strings = []
|
2002-05-17 07:06:32 +00:00
|
|
|
else:
|
2013-12-10 18:54:26 -05:00
|
|
|
strings = [tostr(s, encoding="latin1") for s in Index(file)]
|
2002-05-14 12:22:03 +00:00
|
|
|
self.strings = strings
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def getCompiler(self):
|
|
|
|
return IndexedStringsCompiler(self, None, None)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def __len__(self):
|
|
|
|
return len(self.strings)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-14 12:22:03 +00:00
|
|
|
def __getitem__(self, SID):
|
|
|
|
if SID < cffStandardStringCount:
|
|
|
|
return cffStandardStrings[SID]
|
|
|
|
else:
|
|
|
|
return self.strings[SID - cffStandardStringCount]
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-14 12:22:03 +00:00
|
|
|
def getSID(self, s):
|
|
|
|
if not hasattr(self, "stringMapping"):
|
|
|
|
self.buildStringMapping()
|
cffLib: use tostr() when getting SID of indexed string (or we end up with duplicates in py3)
I noticed this issue while porting compreffor to py3. In my test fonts, the binary
CFF tables as generated with python 2 sometimes were slightly different from the
ones generated with python 3, although the TTX dump was identical!
It turns out, when running in Python 3, cffLib adds extra entries to the
list of CFF indexed strings, because of bytes vs str.
The `IndexedStrings.getSID` method takes an input string 's' and and returns
the SID integer for that string. If it's a new string, it gets appended to the
list, as well as to an internal strings-to-SID mapping, so that the same SID
value is returned for any given string.
The problem with python 3 was that, if the input string was of `bytes` type
instead of `str`, then the test for inclusion (the dict's `__contains__`)
would return False, and as a result the "same" string (e.g. "Regular" and
b"Regular") could be encoded twice in the list of CFF strings.
(yes, we desperately need unit tests for cffLib...)
2016-05-16 13:10:39 +01:00
|
|
|
s = tostr(s, encoding="latin1")
|
2013-11-27 02:33:03 -05:00
|
|
|
if s in cffStandardStringMapping:
|
2002-05-14 12:22:03 +00:00
|
|
|
SID = cffStandardStringMapping[s]
|
2013-11-27 02:33:03 -05:00
|
|
|
elif s in self.stringMapping:
|
2002-05-14 12:22:03 +00:00
|
|
|
SID = self.stringMapping[s]
|
|
|
|
else:
|
|
|
|
SID = len(self.strings) + cffStandardStringCount
|
|
|
|
self.strings.append(s)
|
|
|
|
self.stringMapping[s] = SID
|
|
|
|
return SID
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-14 12:22:03 +00:00
|
|
|
def getStrings(self):
|
|
|
|
return self.strings
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-14 12:22:03 +00:00
|
|
|
def buildStringMapping(self):
|
|
|
|
self.stringMapping = {}
|
|
|
|
for index in range(len(self.strings)):
|
|
|
|
self.stringMapping[self.strings[index]] = index + cffStandardStringCount
|
|
|
|
|
|
|
|
|
1999-12-16 21:34:53 +00:00
|
|
|
# The 391 Standard Strings as used in the CFF format.
|
|
|
|
# from Adobe Technical None #5176, version 1.0, 18 March 1998
|
|
|
|
|
2015-04-26 02:01:01 -04:00
|
|
|
cffStandardStrings = ['.notdef', 'space', 'exclam', 'quotedbl', 'numbersign',
|
|
|
|
'dollar', 'percent', 'ampersand', 'quoteright', 'parenleft', 'parenright',
|
|
|
|
'asterisk', 'plus', 'comma', 'hyphen', 'period', 'slash', 'zero', 'one',
|
|
|
|
'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'colon',
|
|
|
|
'semicolon', 'less', 'equal', 'greater', 'question', 'at', 'A', 'B', 'C',
|
|
|
|
'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R',
|
|
|
|
'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'bracketleft', 'backslash',
|
|
|
|
'bracketright', 'asciicircum', 'underscore', 'quoteleft', 'a', 'b', 'c',
|
|
|
|
'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r',
|
|
|
|
's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'braceleft', 'bar', 'braceright',
|
|
|
|
'asciitilde', 'exclamdown', 'cent', 'sterling', 'fraction', 'yen', 'florin',
|
|
|
|
'section', 'currency', 'quotesingle', 'quotedblleft', 'guillemotleft',
|
|
|
|
'guilsinglleft', 'guilsinglright', 'fi', 'fl', 'endash', 'dagger',
|
|
|
|
'daggerdbl', 'periodcentered', 'paragraph', 'bullet', 'quotesinglbase',
|
|
|
|
'quotedblbase', 'quotedblright', 'guillemotright', 'ellipsis', 'perthousand',
|
|
|
|
'questiondown', 'grave', 'acute', 'circumflex', 'tilde', 'macron', 'breve',
|
|
|
|
'dotaccent', 'dieresis', 'ring', 'cedilla', 'hungarumlaut', 'ogonek', 'caron',
|
|
|
|
'emdash', 'AE', 'ordfeminine', 'Lslash', 'Oslash', 'OE', 'ordmasculine', 'ae',
|
|
|
|
'dotlessi', 'lslash', 'oslash', 'oe', 'germandbls', 'onesuperior',
|
|
|
|
'logicalnot', 'mu', 'trademark', 'Eth', 'onehalf', 'plusminus', 'Thorn',
|
|
|
|
'onequarter', 'divide', 'brokenbar', 'degree', 'thorn', 'threequarters',
|
|
|
|
'twosuperior', 'registered', 'minus', 'eth', 'multiply', 'threesuperior',
|
|
|
|
'copyright', 'Aacute', 'Acircumflex', 'Adieresis', 'Agrave', 'Aring',
|
|
|
|
'Atilde', 'Ccedilla', 'Eacute', 'Ecircumflex', 'Edieresis', 'Egrave',
|
|
|
|
'Iacute', 'Icircumflex', 'Idieresis', 'Igrave', 'Ntilde', 'Oacute',
|
|
|
|
'Ocircumflex', 'Odieresis', 'Ograve', 'Otilde', 'Scaron', 'Uacute',
|
|
|
|
'Ucircumflex', 'Udieresis', 'Ugrave', 'Yacute', 'Ydieresis', 'Zcaron',
|
|
|
|
'aacute', 'acircumflex', 'adieresis', 'agrave', 'aring', 'atilde', 'ccedilla',
|
|
|
|
'eacute', 'ecircumflex', 'edieresis', 'egrave', 'iacute', 'icircumflex',
|
|
|
|
'idieresis', 'igrave', 'ntilde', 'oacute', 'ocircumflex', 'odieresis',
|
|
|
|
'ograve', 'otilde', 'scaron', 'uacute', 'ucircumflex', 'udieresis', 'ugrave',
|
|
|
|
'yacute', 'ydieresis', 'zcaron', 'exclamsmall', 'Hungarumlautsmall',
|
|
|
|
'dollaroldstyle', 'dollarsuperior', 'ampersandsmall', 'Acutesmall',
|
|
|
|
'parenleftsuperior', 'parenrightsuperior', 'twodotenleader', 'onedotenleader',
|
|
|
|
'zerooldstyle', 'oneoldstyle', 'twooldstyle', 'threeoldstyle', 'fouroldstyle',
|
|
|
|
'fiveoldstyle', 'sixoldstyle', 'sevenoldstyle', 'eightoldstyle',
|
|
|
|
'nineoldstyle', 'commasuperior', 'threequartersemdash', 'periodsuperior',
|
|
|
|
'questionsmall', 'asuperior', 'bsuperior', 'centsuperior', 'dsuperior',
|
|
|
|
'esuperior', 'isuperior', 'lsuperior', 'msuperior', 'nsuperior', 'osuperior',
|
|
|
|
'rsuperior', 'ssuperior', 'tsuperior', 'ff', 'ffi', 'ffl', 'parenleftinferior',
|
|
|
|
'parenrightinferior', 'Circumflexsmall', 'hyphensuperior', 'Gravesmall',
|
|
|
|
'Asmall', 'Bsmall', 'Csmall', 'Dsmall', 'Esmall', 'Fsmall', 'Gsmall', 'Hsmall',
|
|
|
|
'Ismall', 'Jsmall', 'Ksmall', 'Lsmall', 'Msmall', 'Nsmall', 'Osmall', 'Psmall',
|
|
|
|
'Qsmall', 'Rsmall', 'Ssmall', 'Tsmall', 'Usmall', 'Vsmall', 'Wsmall', 'Xsmall',
|
|
|
|
'Ysmall', 'Zsmall', 'colonmonetary', 'onefitted', 'rupiah', 'Tildesmall',
|
|
|
|
'exclamdownsmall', 'centoldstyle', 'Lslashsmall', 'Scaronsmall', 'Zcaronsmall',
|
|
|
|
'Dieresissmall', 'Brevesmall', 'Caronsmall', 'Dotaccentsmall', 'Macronsmall',
|
|
|
|
'figuredash', 'hypheninferior', 'Ogoneksmall', 'Ringsmall', 'Cedillasmall',
|
|
|
|
'questiondownsmall', 'oneeighth', 'threeeighths', 'fiveeighths', 'seveneighths',
|
|
|
|
'onethird', 'twothirds', 'zerosuperior', 'foursuperior', 'fivesuperior',
|
|
|
|
'sixsuperior', 'sevensuperior', 'eightsuperior', 'ninesuperior', 'zeroinferior',
|
|
|
|
'oneinferior', 'twoinferior', 'threeinferior', 'fourinferior', 'fiveinferior',
|
|
|
|
'sixinferior', 'seveninferior', 'eightinferior', 'nineinferior', 'centinferior',
|
|
|
|
'dollarinferior', 'periodinferior', 'commainferior', 'Agravesmall',
|
|
|
|
'Aacutesmall', 'Acircumflexsmall', 'Atildesmall', 'Adieresissmall', 'Aringsmall',
|
|
|
|
'AEsmall', 'Ccedillasmall', 'Egravesmall', 'Eacutesmall', 'Ecircumflexsmall',
|
|
|
|
'Edieresissmall', 'Igravesmall', 'Iacutesmall', 'Icircumflexsmall',
|
|
|
|
'Idieresissmall', 'Ethsmall', 'Ntildesmall', 'Ogravesmall', 'Oacutesmall',
|
|
|
|
'Ocircumflexsmall', 'Otildesmall', 'Odieresissmall', 'OEsmall', 'Oslashsmall',
|
|
|
|
'Ugravesmall', 'Uacutesmall', 'Ucircumflexsmall', 'Udieresissmall',
|
|
|
|
'Yacutesmall', 'Thornsmall', 'Ydieresissmall', '001.000', '001.001', '001.002',
|
|
|
|
'001.003', 'Black', 'Bold', 'Book', 'Light', 'Medium', 'Regular', 'Roman',
|
1999-12-16 21:34:53 +00:00
|
|
|
'Semibold'
|
|
|
|
]
|
|
|
|
|
|
|
|
cffStandardStringCount = 391
|
|
|
|
assert len(cffStandardStrings) == cffStandardStringCount
|
|
|
|
# build reverse mapping
|
|
|
|
cffStandardStringMapping = {}
|
|
|
|
for _i in range(cffStandardStringCount):
|
|
|
|
cffStandardStringMapping[cffStandardStrings[_i]] = _i
|
2006-10-21 13:41:18 +00:00
|
|
|
|
|
|
|
cffISOAdobeStrings = [".notdef", "space", "exclam", "quotedbl", "numbersign",
|
|
|
|
"dollar", "percent", "ampersand", "quoteright", "parenleft", "parenright",
|
|
|
|
"asterisk", "plus", "comma", "hyphen", "period", "slash", "zero", "one", "two",
|
|
|
|
"three", "four", "five", "six", "seven", "eight", "nine", "colon", "semicolon",
|
|
|
|
"less", "equal", "greater", "question", "at", "A", "B", "C", "D", "E", "F", "G",
|
|
|
|
"H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W",
|
|
|
|
"X", "Y", "Z", "bracketleft", "backslash", "bracketright", "asciicircum",
|
|
|
|
"underscore", "quoteleft", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j",
|
|
|
|
"k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z",
|
|
|
|
"braceleft", "bar", "braceright", "asciitilde", "exclamdown", "cent",
|
|
|
|
"sterling", "fraction", "yen", "florin", "section", "currency", "quotesingle",
|
|
|
|
"quotedblleft", "guillemotleft", "guilsinglleft", "guilsinglright", "fi", "fl",
|
|
|
|
"endash", "dagger", "daggerdbl", "periodcentered", "paragraph", "bullet",
|
|
|
|
"quotesinglbase", "quotedblbase", "quotedblright", "guillemotright", "ellipsis",
|
|
|
|
"perthousand", "questiondown", "grave", "acute", "circumflex", "tilde",
|
|
|
|
"macron", "breve", "dotaccent", "dieresis", "ring", "cedilla", "hungarumlaut",
|
|
|
|
"ogonek", "caron", "emdash", "AE", "ordfeminine", "Lslash", "Oslash", "OE",
|
|
|
|
"ordmasculine", "ae", "dotlessi", "lslash", "oslash", "oe", "germandbls",
|
|
|
|
"onesuperior", "logicalnot", "mu", "trademark", "Eth", "onehalf", "plusminus",
|
|
|
|
"Thorn", "onequarter", "divide", "brokenbar", "degree", "thorn",
|
|
|
|
"threequarters", "twosuperior", "registered", "minus", "eth", "multiply",
|
|
|
|
"threesuperior", "copyright", "Aacute", "Acircumflex", "Adieresis", "Agrave",
|
|
|
|
"Aring", "Atilde", "Ccedilla", "Eacute", "Ecircumflex", "Edieresis", "Egrave",
|
|
|
|
"Iacute", "Icircumflex", "Idieresis", "Igrave", "Ntilde", "Oacute",
|
|
|
|
"Ocircumflex", "Odieresis", "Ograve", "Otilde", "Scaron", "Uacute",
|
|
|
|
"Ucircumflex", "Udieresis", "Ugrave", "Yacute", "Ydieresis", "Zcaron", "aacute",
|
|
|
|
"acircumflex", "adieresis", "agrave", "aring", "atilde", "ccedilla", "eacute",
|
|
|
|
"ecircumflex", "edieresis", "egrave", "iacute", "icircumflex", "idieresis",
|
|
|
|
"igrave", "ntilde", "oacute", "ocircumflex", "odieresis", "ograve", "otilde",
|
|
|
|
"scaron", "uacute", "ucircumflex", "udieresis", "ugrave", "yacute", "ydieresis",
|
|
|
|
"zcaron"]
|
|
|
|
|
|
|
|
cffISOAdobeStringCount = 229
|
|
|
|
assert len(cffISOAdobeStrings) == cffISOAdobeStringCount
|
|
|
|
|
|
|
|
cffIExpertStrings = [".notdef", "space", "exclamsmall", "Hungarumlautsmall",
|
|
|
|
"dollaroldstyle", "dollarsuperior", "ampersandsmall", "Acutesmall",
|
|
|
|
"parenleftsuperior", "parenrightsuperior", "twodotenleader", "onedotenleader",
|
|
|
|
"comma", "hyphen", "period", "fraction", "zerooldstyle", "oneoldstyle",
|
|
|
|
"twooldstyle", "threeoldstyle", "fouroldstyle", "fiveoldstyle", "sixoldstyle",
|
|
|
|
"sevenoldstyle", "eightoldstyle", "nineoldstyle", "colon", "semicolon",
|
|
|
|
"commasuperior", "threequartersemdash", "periodsuperior", "questionsmall",
|
|
|
|
"asuperior", "bsuperior", "centsuperior", "dsuperior", "esuperior", "isuperior",
|
|
|
|
"lsuperior", "msuperior", "nsuperior", "osuperior", "rsuperior", "ssuperior",
|
|
|
|
"tsuperior", "ff", "fi", "fl", "ffi", "ffl", "parenleftinferior",
|
|
|
|
"parenrightinferior", "Circumflexsmall", "hyphensuperior", "Gravesmall",
|
|
|
|
"Asmall", "Bsmall", "Csmall", "Dsmall", "Esmall", "Fsmall", "Gsmall", "Hsmall",
|
|
|
|
"Ismall", "Jsmall", "Ksmall", "Lsmall", "Msmall", "Nsmall", "Osmall", "Psmall",
|
|
|
|
"Qsmall", "Rsmall", "Ssmall", "Tsmall", "Usmall", "Vsmall", "Wsmall", "Xsmall",
|
|
|
|
"Ysmall", "Zsmall", "colonmonetary", "onefitted", "rupiah", "Tildesmall",
|
|
|
|
"exclamdownsmall", "centoldstyle", "Lslashsmall", "Scaronsmall", "Zcaronsmall",
|
|
|
|
"Dieresissmall", "Brevesmall", "Caronsmall", "Dotaccentsmall", "Macronsmall",
|
|
|
|
"figuredash", "hypheninferior", "Ogoneksmall", "Ringsmall", "Cedillasmall",
|
|
|
|
"onequarter", "onehalf", "threequarters", "questiondownsmall", "oneeighth",
|
|
|
|
"threeeighths", "fiveeighths", "seveneighths", "onethird", "twothirds",
|
|
|
|
"zerosuperior", "onesuperior", "twosuperior", "threesuperior", "foursuperior",
|
|
|
|
"fivesuperior", "sixsuperior", "sevensuperior", "eightsuperior", "ninesuperior",
|
|
|
|
"zeroinferior", "oneinferior", "twoinferior", "threeinferior", "fourinferior",
|
|
|
|
"fiveinferior", "sixinferior", "seveninferior", "eightinferior", "nineinferior",
|
|
|
|
"centinferior", "dollarinferior", "periodinferior", "commainferior",
|
|
|
|
"Agravesmall", "Aacutesmall", "Acircumflexsmall", "Atildesmall",
|
|
|
|
"Adieresissmall", "Aringsmall", "AEsmall", "Ccedillasmall", "Egravesmall",
|
|
|
|
"Eacutesmall", "Ecircumflexsmall", "Edieresissmall", "Igravesmall",
|
|
|
|
"Iacutesmall", "Icircumflexsmall", "Idieresissmall", "Ethsmall", "Ntildesmall",
|
|
|
|
"Ogravesmall", "Oacutesmall", "Ocircumflexsmall", "Otildesmall",
|
|
|
|
"Odieresissmall", "OEsmall", "Oslashsmall", "Ugravesmall", "Uacutesmall",
|
|
|
|
"Ucircumflexsmall", "Udieresissmall", "Yacutesmall", "Thornsmall",
|
|
|
|
"Ydieresissmall"]
|
|
|
|
|
|
|
|
cffExpertStringCount = 166
|
|
|
|
assert len(cffIExpertStrings) == cffExpertStringCount
|
|
|
|
|
|
|
|
cffExpertSubsetStrings = [".notdef", "space", "dollaroldstyle",
|
|
|
|
"dollarsuperior", "parenleftsuperior", "parenrightsuperior", "twodotenleader",
|
|
|
|
"onedotenleader", "comma", "hyphen", "period", "fraction", "zerooldstyle",
|
|
|
|
"oneoldstyle", "twooldstyle", "threeoldstyle", "fouroldstyle", "fiveoldstyle",
|
|
|
|
"sixoldstyle", "sevenoldstyle", "eightoldstyle", "nineoldstyle", "colon",
|
|
|
|
"semicolon", "commasuperior", "threequartersemdash", "periodsuperior",
|
|
|
|
"asuperior", "bsuperior", "centsuperior", "dsuperior", "esuperior", "isuperior",
|
|
|
|
"lsuperior", "msuperior", "nsuperior", "osuperior", "rsuperior", "ssuperior",
|
|
|
|
"tsuperior", "ff", "fi", "fl", "ffi", "ffl", "parenleftinferior",
|
|
|
|
"parenrightinferior", "hyphensuperior", "colonmonetary", "onefitted", "rupiah",
|
|
|
|
"centoldstyle", "figuredash", "hypheninferior", "onequarter", "onehalf",
|
|
|
|
"threequarters", "oneeighth", "threeeighths", "fiveeighths", "seveneighths",
|
|
|
|
"onethird", "twothirds", "zerosuperior", "onesuperior", "twosuperior",
|
|
|
|
"threesuperior", "foursuperior", "fivesuperior", "sixsuperior", "sevensuperior",
|
|
|
|
"eightsuperior", "ninesuperior", "zeroinferior", "oneinferior", "twoinferior",
|
|
|
|
"threeinferior", "fourinferior", "fiveinferior", "sixinferior", "seveninferior",
|
|
|
|
"eightinferior", "nineinferior", "centinferior", "dollarinferior",
|
|
|
|
"periodinferior", "commainferior"]
|
|
|
|
|
|
|
|
cffExpertSubsetStringCount = 87
|
|
|
|
assert len(cffExpertSubsetStrings) == cffExpertSubsetStringCount
|