1999-12-16 21:34:53 +00:00
|
|
|
"""cffLib.py -- read/write tools for Adobe CFF fonts."""
|
|
|
|
|
2013-11-27 17:27:45 -05:00
|
|
|
from fontTools.misc.py23 import *
|
2013-09-17 16:59:39 -04:00
|
|
|
from fontTools.misc import sstruct
|
2000-01-16 22:14:02 +00:00
|
|
|
from fontTools.misc import psCharStrings
|
2017-05-19 16:55:10 +09:00
|
|
|
from fontTools.misc.arrayTools import unionRect, intRect
|
2002-05-24 09:58:04 +00:00
|
|
|
from fontTools.misc.textTools import safeEval
|
2017-01-12 15:23:12 -08:00
|
|
|
from fontTools.ttLib import TTFont
|
2017-06-21 10:32:58 +01:00
|
|
|
from fontTools.ttLib.tables.otBase import OTTableWriter
|
|
|
|
from fontTools.ttLib.tables.otBase import OTTableReader
|
2017-01-12 15:23:12 -08:00
|
|
|
from fontTools.ttLib.tables import otTables as ot
|
2013-11-27 17:27:45 -05:00
|
|
|
import struct
|
2016-01-24 14:50:57 +00:00
|
|
|
import logging
|
2017-01-12 15:23:12 -08:00
|
|
|
import re
|
2016-01-24 14:50:57 +00:00
|
|
|
|
|
|
|
# mute cffLib debug messages when running ttx in verbose mode
|
|
|
|
DEBUG = logging.DEBUG - 1
|
|
|
|
log = logging.getLogger(__name__)
|
2002-05-17 07:06:32 +00:00
|
|
|
|
1999-12-16 21:34:53 +00:00
|
|
|
cffHeaderFormat = """
|
|
|
|
major: B
|
|
|
|
minor: B
|
|
|
|
hdrSize: B
|
|
|
|
"""
|
|
|
|
|
2017-06-21 10:32:58 +01:00
|
|
|
maxStackLimit = 513
|
2017-05-16 19:32:27 -07:00
|
|
|
# maxstack operator has been deprecated. max stack is now always 513.
|
|
|
|
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2013-11-28 14:26:58 -05:00
|
|
|
class CFFFontSet(object):
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2017-07-19 18:18:58 +01:00
|
|
|
def decompile(self, file, otFont, isCFF2=None):
|
2017-01-12 15:23:12 -08:00
|
|
|
self.otFont = otFont
|
|
|
|
sstruct.unpack(cffHeaderFormat, file.read(3), self)
|
2017-07-19 18:18:58 +01:00
|
|
|
if isCFF2 is not None:
|
|
|
|
# called from ttLib: assert 'major' as read from file matches the
|
|
|
|
# expected version
|
|
|
|
expected_major = (2 if isCFF2 else 1)
|
|
|
|
if self.major != expected_major:
|
|
|
|
raise ValueError(
|
|
|
|
"Invalid CFF 'major' version: expected %d, found %d" %
|
|
|
|
(expected_major, self.major))
|
|
|
|
else:
|
|
|
|
# use 'major' version from file to determine if isCFF2
|
|
|
|
assert self.major in (1, 2), "Unknown CFF format"
|
|
|
|
isCFF2 = self.major == 2
|
|
|
|
if not isCFF2:
|
2017-01-12 15:23:12 -08:00
|
|
|
self.offSize = struct.unpack("B", file.read(1))[0]
|
|
|
|
file.seek(self.hdrSize)
|
2017-07-19 18:18:58 +01:00
|
|
|
self.fontNames = list(tostr(s) for s in Index(file, isCFF2=isCFF2))
|
|
|
|
self.topDictIndex = TopDictIndex(file, isCFF2=isCFF2)
|
|
|
|
self.strings = IndexedStrings(file)
|
|
|
|
else: # isCFF2
|
2017-01-12 15:23:12 -08:00
|
|
|
self.topDictSize = struct.unpack(">H", file.read(2))[0]
|
|
|
|
file.seek(self.hdrSize)
|
|
|
|
self.fontNames = ["CFF2Font"]
|
|
|
|
cff2GetGlyphOrder = otFont.getGlyphOrder
|
2017-06-21 10:32:58 +01:00
|
|
|
# in CFF2, offsetSize is the size of the TopDict data.
|
|
|
|
self.topDictIndex = TopDictIndex(
|
2017-07-19 18:18:58 +01:00
|
|
|
file, cff2GetGlyphOrder, self.topDictSize, isCFF2=isCFF2)
|
2017-01-12 15:23:12 -08:00
|
|
|
self.strings = None
|
2017-07-19 18:18:58 +01:00
|
|
|
self.GlobalSubrs = GlobalSubrsIndex(file, isCFF2=isCFF2)
|
|
|
|
self.topDictIndex.strings = self.strings
|
|
|
|
self.topDictIndex.GlobalSubrs = self.GlobalSubrs
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-16 18:17:32 +00:00
|
|
|
def __len__(self):
|
|
|
|
return len(self.fontNames)
|
2017-03-09 21:30:28 -08:00
|
|
|
|
2002-05-16 18:17:32 +00:00
|
|
|
def keys(self):
|
2003-08-25 07:37:25 +00:00
|
|
|
return list(self.fontNames)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-17 07:06:32 +00:00
|
|
|
def values(self):
|
|
|
|
return self.topDictIndex
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2017-07-26 18:51:42 +01:00
|
|
|
def __getitem__(self, nameOrIndex):
|
|
|
|
""" Return TopDict instance identified by name (str) or index (int
|
|
|
|
or any object that implements `__index__`).
|
|
|
|
"""
|
|
|
|
if hasattr(nameOrIndex, "__index__"):
|
|
|
|
index = nameOrIndex.__index__()
|
|
|
|
elif isinstance(nameOrIndex, basestring):
|
|
|
|
name = nameOrIndex
|
|
|
|
try:
|
|
|
|
index = self.fontNames.index(name)
|
|
|
|
except ValueError:
|
|
|
|
raise KeyError(nameOrIndex)
|
|
|
|
else:
|
|
|
|
raise TypeError(nameOrIndex)
|
2002-05-16 18:38:03 +00:00
|
|
|
return self.topDictIndex[index]
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2017-07-19 18:18:58 +01:00
|
|
|
def compile(self, file, otFont, isCFF2=None):
|
2017-01-12 15:23:12 -08:00
|
|
|
self.otFont = otFont
|
2017-07-19 18:18:58 +01:00
|
|
|
if isCFF2 is not None:
|
|
|
|
# called from ttLib: assert 'major' value matches expected version
|
|
|
|
expected_major = (2 if isCFF2 else 1)
|
|
|
|
if self.major != expected_major:
|
|
|
|
raise ValueError(
|
|
|
|
"Invalid CFF 'major' version: expected %d, found %d" %
|
|
|
|
(expected_major, self.major))
|
|
|
|
else:
|
|
|
|
# use current 'major' value to determine output format
|
|
|
|
assert self.major in (1, 2), "Unknown CFF format"
|
|
|
|
isCFF2 = self.major == 2
|
2017-05-18 17:58:39 +09:00
|
|
|
|
|
|
|
if otFont.recalcBBoxes and not isCFF2:
|
|
|
|
for topDict in self.topDictIndex:
|
|
|
|
topDict.recalcFontBBox()
|
|
|
|
|
2017-07-19 18:18:58 +01:00
|
|
|
if not isCFF2:
|
|
|
|
strings = IndexedStrings()
|
2017-01-12 15:23:12 -08:00
|
|
|
else:
|
|
|
|
strings = None
|
2017-07-19 18:18:58 +01:00
|
|
|
writer = CFFWriter(isCFF2)
|
|
|
|
topCompiler = self.topDictIndex.getCompiler(strings, self, isCFF2=isCFF2)
|
|
|
|
if isCFF2:
|
2017-01-12 15:23:12 -08:00
|
|
|
self.hdrSize = 5
|
|
|
|
writer.add(sstruct.pack(cffHeaderFormat, self))
|
|
|
|
# Note: topDictSize will most likely change in CFFWriter.toFile().
|
2017-06-21 10:32:58 +01:00
|
|
|
self.topDictSize = topCompiler.getDataLength()
|
2017-01-12 15:23:12 -08:00
|
|
|
writer.add(struct.pack(">H", self.topDictSize))
|
|
|
|
else:
|
|
|
|
self.hdrSize = 4
|
2017-06-21 10:32:58 +01:00
|
|
|
self.offSize = 4 # will most likely change in CFFWriter.toFile().
|
2017-01-12 15:23:12 -08:00
|
|
|
writer.add(sstruct.pack(cffHeaderFormat, self))
|
|
|
|
writer.add(struct.pack("B", self.offSize))
|
2017-07-19 18:18:58 +01:00
|
|
|
if not isCFF2:
|
|
|
|
fontNames = Index()
|
2017-01-12 15:23:12 -08:00
|
|
|
for name in self.fontNames:
|
|
|
|
fontNames.append(name)
|
2017-07-19 18:18:58 +01:00
|
|
|
writer.add(fontNames.getCompiler(strings, self, isCFF2=isCFF2))
|
2002-05-23 21:50:36 +00:00
|
|
|
writer.add(topCompiler)
|
2017-07-19 18:18:58 +01:00
|
|
|
if not isCFF2:
|
2017-01-12 15:23:12 -08:00
|
|
|
writer.add(strings.getCompiler())
|
2017-07-19 18:18:58 +01:00
|
|
|
writer.add(self.GlobalSubrs.getCompiler(strings, self, isCFF2=isCFF2))
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-24 09:58:04 +00:00
|
|
|
for topDict in self.topDictIndex:
|
|
|
|
if not hasattr(topDict, "charset") or topDict.charset is None:
|
|
|
|
charset = otFont.getGlyphOrder()
|
|
|
|
topDict.charset = charset
|
2017-01-12 15:23:12 -08:00
|
|
|
children = topCompiler.getChildren(strings)
|
|
|
|
for child in children:
|
2002-05-23 21:50:36 +00:00
|
|
|
writer.add(child)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
writer.toFile(file)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2018-01-25 17:30:23 -08:00
|
|
|
def toXML(self, xmlWriter):
|
2016-09-20 19:48:23 -07:00
|
|
|
xmlWriter.simpletag("major", value=self.major)
|
|
|
|
xmlWriter.newline()
|
|
|
|
xmlWriter.simpletag("minor", value=self.minor)
|
|
|
|
xmlWriter.newline()
|
1999-12-16 21:34:53 +00:00
|
|
|
for fontName in self.fontNames:
|
2013-11-28 07:10:53 -05:00
|
|
|
xmlWriter.begintag("CFFFont", name=tostr(fontName))
|
1999-12-16 21:34:53 +00:00
|
|
|
xmlWriter.newline()
|
2002-05-16 18:17:32 +00:00
|
|
|
font = self[fontName]
|
2018-01-25 17:30:23 -08:00
|
|
|
font.toXML(xmlWriter)
|
1999-12-16 21:34:53 +00:00
|
|
|
xmlWriter.endtag("CFFFont")
|
|
|
|
xmlWriter.newline()
|
|
|
|
xmlWriter.newline()
|
|
|
|
xmlWriter.begintag("GlobalSubrs")
|
|
|
|
xmlWriter.newline()
|
2018-01-25 17:30:23 -08:00
|
|
|
self.GlobalSubrs.toXML(xmlWriter)
|
1999-12-16 21:34:53 +00:00
|
|
|
xmlWriter.endtag("GlobalSubrs")
|
|
|
|
xmlWriter.newline()
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2017-06-21 10:32:58 +01:00
|
|
|
def fromXML(self, name, attrs, content, otFont=None):
|
2017-01-12 15:23:12 -08:00
|
|
|
self.otFont = otFont
|
|
|
|
|
2017-06-21 10:32:58 +01:00
|
|
|
# set defaults. These will be replaced if there are entries for them
|
|
|
|
# in the XML file.
|
2017-01-12 15:23:12 -08:00
|
|
|
if not hasattr(self, "major"):
|
2002-05-24 09:58:04 +00:00
|
|
|
self.major = 1
|
2017-01-12 15:23:12 -08:00
|
|
|
if not hasattr(self, "minor"):
|
2002-05-24 09:58:04 +00:00
|
|
|
self.minor = 0
|
2017-03-09 21:30:28 -08:00
|
|
|
|
2002-05-24 09:58:04 +00:00
|
|
|
if name == "CFFFont":
|
2017-01-12 15:23:12 -08:00
|
|
|
if self.major == 1:
|
2017-07-19 18:18:58 +01:00
|
|
|
if not hasattr(self, "offSize"):
|
|
|
|
# this will be recalculated when the cff is compiled.
|
|
|
|
self.offSize = 4
|
|
|
|
if not hasattr(self, "hdrSize"):
|
|
|
|
self.hdrSize = 4
|
2017-01-12 15:23:12 -08:00
|
|
|
if not hasattr(self, "GlobalSubrs"):
|
2017-07-19 18:18:58 +01:00
|
|
|
self.GlobalSubrs = GlobalSubrsIndex()
|
2017-01-12 15:23:12 -08:00
|
|
|
if not hasattr(self, "fontNames"):
|
|
|
|
self.fontNames = []
|
2017-07-19 18:18:58 +01:00
|
|
|
self.topDictIndex = TopDictIndex()
|
2017-01-12 15:23:12 -08:00
|
|
|
fontName = attrs["name"]
|
|
|
|
self.fontNames.append(fontName)
|
2017-07-19 18:18:58 +01:00
|
|
|
topDict = TopDict(GlobalSubrs=self.GlobalSubrs)
|
2017-01-12 15:23:12 -08:00
|
|
|
topDict.charset = None # gets filled in later
|
|
|
|
elif self.major == 2:
|
2017-07-19 18:18:58 +01:00
|
|
|
if not hasattr(self, "hdrSize"):
|
|
|
|
self.hdrSize = 5
|
2017-01-12 15:23:12 -08:00
|
|
|
if not hasattr(self, "GlobalSubrs"):
|
2017-07-19 18:18:58 +01:00
|
|
|
self.GlobalSubrs = GlobalSubrsIndex()
|
2017-01-12 15:23:12 -08:00
|
|
|
if not hasattr(self, "fontNames"):
|
2017-07-19 18:18:58 +01:00
|
|
|
self.fontNames = ["CFF2Font"]
|
2017-01-12 15:23:12 -08:00
|
|
|
cff2GetGlyphOrder = self.otFont.getGlyphOrder
|
2017-06-21 10:32:58 +01:00
|
|
|
topDict = TopDict(
|
2017-07-19 18:18:58 +01:00
|
|
|
GlobalSubrs=self.GlobalSubrs,
|
2017-06-21 10:32:58 +01:00
|
|
|
cff2GetGlyphOrder=cff2GetGlyphOrder)
|
2017-07-19 18:18:58 +01:00
|
|
|
self.topDictIndex = TopDictIndex(None, cff2GetGlyphOrder, None)
|
2002-05-24 09:58:04 +00:00
|
|
|
self.topDictIndex.append(topDict)
|
|
|
|
for element in content:
|
2008-03-07 19:56:17 +00:00
|
|
|
if isinstance(element, basestring):
|
2002-05-24 09:58:04 +00:00
|
|
|
continue
|
2013-11-27 03:19:32 -05:00
|
|
|
name, attrs, content = element
|
|
|
|
topDict.fromXML(name, attrs, content)
|
2019-05-01 16:01:43 -07:00
|
|
|
|
|
|
|
if hasattr(topDict, "VarStore") and topDict.FDArray[0].vstore is None:
|
|
|
|
fdArray = topDict.FDArray
|
|
|
|
for fontDict in fdArray:
|
|
|
|
if hasattr(fontDict, "Private"):
|
|
|
|
fontDict.Private.vstore = topDict.VarStore
|
|
|
|
|
2002-05-24 09:58:04 +00:00
|
|
|
elif name == "GlobalSubrs":
|
2017-05-09 11:26:53 -07:00
|
|
|
subrCharStringClass = psCharStrings.T2CharString
|
|
|
|
if not hasattr(self, "GlobalSubrs"):
|
2017-07-19 18:18:58 +01:00
|
|
|
self.GlobalSubrs = GlobalSubrsIndex()
|
2002-05-24 09:58:04 +00:00
|
|
|
for element in content:
|
2008-03-07 19:56:17 +00:00
|
|
|
if isinstance(element, basestring):
|
2002-05-24 09:58:04 +00:00
|
|
|
continue
|
|
|
|
name, attrs, content = element
|
2017-01-12 15:23:12 -08:00
|
|
|
subr = subrCharStringClass()
|
2013-11-27 03:19:32 -05:00
|
|
|
subr.fromXML(name, attrs, content)
|
2002-05-24 09:58:04 +00:00
|
|
|
self.GlobalSubrs.append(subr)
|
2017-01-12 15:23:12 -08:00
|
|
|
elif name == "major":
|
|
|
|
self.major = int(attrs['value'])
|
|
|
|
elif name == "minor":
|
|
|
|
self.minor = int(attrs['value'])
|
2017-03-09 21:30:28 -08:00
|
|
|
|
2017-01-12 15:23:12 -08:00
|
|
|
def convertCFFToCFF2(self, otFont):
|
|
|
|
# This assumes a decompiled CFF table.
|
|
|
|
self.major = 2
|
|
|
|
cff2GetGlyphOrder = self.otFont.getGlyphOrder
|
2017-07-19 18:18:58 +01:00
|
|
|
topDictData = TopDictIndex(None, cff2GetGlyphOrder, None)
|
2017-01-12 15:23:12 -08:00
|
|
|
topDictData.items = self.topDictIndex.items
|
|
|
|
self.topDictIndex = topDictData
|
2017-06-21 10:32:58 +01:00
|
|
|
topDict = topDictData[0]
|
2017-01-12 15:23:12 -08:00
|
|
|
if hasattr(topDict, 'Private'):
|
|
|
|
privateDict = topDict.Private
|
|
|
|
else:
|
|
|
|
privateDict = None
|
|
|
|
opOrder = buildOrder(topDictOperators2)
|
|
|
|
topDict.order = opOrder
|
|
|
|
topDict.cff2GetGlyphOrder = cff2GetGlyphOrder
|
|
|
|
for entry in topDictOperators:
|
|
|
|
key = entry[1]
|
2017-06-21 10:32:58 +01:00
|
|
|
if key not in opOrder:
|
2017-01-12 15:23:12 -08:00
|
|
|
if key in topDict.rawDict:
|
|
|
|
del topDict.rawDict[key]
|
|
|
|
if hasattr(topDict, key):
|
2018-03-06 21:39:21 -08:00
|
|
|
delattr(topDict, key)
|
2017-03-09 21:30:28 -08:00
|
|
|
|
2017-01-12 15:23:12 -08:00
|
|
|
if not hasattr(topDict, "FDArray"):
|
2017-07-19 18:18:58 +01:00
|
|
|
fdArray = topDict.FDArray = FDArrayIndex()
|
2017-01-12 15:23:12 -08:00
|
|
|
fdArray.strings = None
|
|
|
|
fdArray.GlobalSubrs = topDict.GlobalSubrs
|
|
|
|
topDict.GlobalSubrs.fdArray = fdArray
|
|
|
|
charStrings = topDict.CharStrings
|
|
|
|
if charStrings.charStringsAreIndexed:
|
|
|
|
charStrings.charStringsIndex.fdArray = fdArray
|
|
|
|
else:
|
|
|
|
charStrings.fdArray = fdArray
|
2018-03-07 14:19:49 -08:00
|
|
|
fontDict = FontDict()
|
|
|
|
fontDict.setCFF2(True)
|
2017-01-12 15:23:12 -08:00
|
|
|
fdArray.append(fontDict)
|
|
|
|
fontDict.Private = privateDict
|
|
|
|
privateOpOrder = buildOrder(privateDictOperators2)
|
|
|
|
for entry in privateDictOperators:
|
|
|
|
key = entry[1]
|
2017-06-21 10:32:58 +01:00
|
|
|
if key not in privateOpOrder:
|
2017-01-12 15:23:12 -08:00
|
|
|
if key in privateDict.rawDict:
|
2017-06-21 10:32:58 +01:00
|
|
|
# print "Removing private dict", key
|
2017-01-12 15:23:12 -08:00
|
|
|
del privateDict.rawDict[key]
|
|
|
|
if hasattr(privateDict, key):
|
2018-03-06 21:39:21 -08:00
|
|
|
delattr(privateDict, key)
|
2017-06-21 10:32:58 +01:00
|
|
|
# print "Removing privateDict attr", key
|
2017-01-12 15:23:12 -08:00
|
|
|
else:
|
|
|
|
# clean up the PrivateDicts in the fdArray
|
2018-03-06 17:34:33 -08:00
|
|
|
fdArray = topDict.FDArray
|
2017-01-12 15:23:12 -08:00
|
|
|
privateOpOrder = buildOrder(privateDictOperators2)
|
|
|
|
for fontDict in fdArray:
|
2018-03-07 14:19:49 -08:00
|
|
|
fontDict.setCFF2(True)
|
2018-03-06 17:34:33 -08:00
|
|
|
for key in fontDict.rawDict.keys():
|
|
|
|
if key not in fontDict.order:
|
|
|
|
del fontDict.rawDict[key]
|
|
|
|
if hasattr(fontDict, key):
|
2018-03-06 21:39:21 -08:00
|
|
|
delattr(fontDict, key)
|
2018-03-06 17:34:33 -08:00
|
|
|
|
2017-01-12 15:23:12 -08:00
|
|
|
privateDict = fontDict.Private
|
|
|
|
for entry in privateDictOperators:
|
|
|
|
key = entry[1]
|
2017-06-21 10:32:58 +01:00
|
|
|
if key not in privateOpOrder:
|
2017-01-12 15:23:12 -08:00
|
|
|
if key in privateDict.rawDict:
|
2017-06-21 10:32:58 +01:00
|
|
|
# print "Removing private dict", key
|
2017-01-12 15:23:12 -08:00
|
|
|
del privateDict.rawDict[key]
|
|
|
|
if hasattr(privateDict, key):
|
2018-03-06 21:39:21 -08:00
|
|
|
delattr(privateDict, key)
|
2017-06-21 10:32:58 +01:00
|
|
|
# print "Removing privateDict attr", key
|
2017-01-12 15:23:12 -08:00
|
|
|
# At this point, the Subrs and Charstrings are all still T2Charstring class
|
|
|
|
# easiest to fix this by compiling, then decompiling again
|
|
|
|
file = BytesIO()
|
2017-07-19 18:18:58 +01:00
|
|
|
self.compile(file, otFont, isCFF2=True)
|
2017-01-12 15:23:12 -08:00
|
|
|
file.seek(0)
|
2017-07-19 18:18:58 +01:00
|
|
|
self.decompile(file, otFont, isCFF2=True)
|
1999-12-16 21:34:53 +00:00
|
|
|
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2013-11-28 14:26:58 -05:00
|
|
|
class CFFWriter(object):
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2017-07-19 18:18:58 +01:00
|
|
|
def __init__(self, isCFF2):
|
2002-05-23 21:50:36 +00:00
|
|
|
self.data = []
|
2017-07-19 18:18:58 +01:00
|
|
|
self.isCFF2 = isCFF2
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def add(self, table):
|
|
|
|
self.data.append(table)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def toFile(self, file):
|
|
|
|
lastPosList = None
|
|
|
|
count = 1
|
2013-11-27 04:15:34 -05:00
|
|
|
while True:
|
2016-01-24 14:50:57 +00:00
|
|
|
log.log(DEBUG, "CFFWriter.toFile() iteration: %d", count)
|
2002-05-24 09:58:04 +00:00
|
|
|
count = count + 1
|
2002-05-23 21:50:36 +00:00
|
|
|
pos = 0
|
|
|
|
posList = [pos]
|
|
|
|
for item in self.data:
|
|
|
|
if hasattr(item, "getDataLength"):
|
2002-05-24 09:58:04 +00:00
|
|
|
endPos = pos + item.getDataLength()
|
2017-07-19 18:18:58 +01:00
|
|
|
if isinstance(item, TopDictIndexCompiler) and item.isCFF2:
|
2017-01-12 15:23:12 -08:00
|
|
|
self.topDictSize = item.getDataLength()
|
2002-05-23 21:50:36 +00:00
|
|
|
else:
|
2002-05-24 09:58:04 +00:00
|
|
|
endPos = pos + len(item)
|
|
|
|
if hasattr(item, "setPos"):
|
|
|
|
item.setPos(pos, endPos)
|
|
|
|
pos = endPos
|
2002-05-23 21:50:36 +00:00
|
|
|
posList.append(pos)
|
|
|
|
if posList == lastPosList:
|
|
|
|
break
|
|
|
|
lastPosList = posList
|
2016-01-24 14:50:57 +00:00
|
|
|
log.log(DEBUG, "CFFWriter.toFile() writing to file.")
|
2002-05-23 21:50:36 +00:00
|
|
|
begin = file.tell()
|
2017-07-19 18:18:58 +01:00
|
|
|
if self.isCFF2:
|
2017-05-16 19:32:27 -07:00
|
|
|
self.data[1] = struct.pack(">H", self.topDictSize)
|
|
|
|
else:
|
2017-01-12 15:23:12 -08:00
|
|
|
self.offSize = calcOffSize(lastPosList[-1])
|
|
|
|
self.data[1] = struct.pack("B", self.offSize)
|
2002-05-23 21:50:36 +00:00
|
|
|
posList = [0]
|
|
|
|
for item in self.data:
|
|
|
|
if hasattr(item, "toFile"):
|
|
|
|
item.toFile(file)
|
|
|
|
else:
|
|
|
|
file.write(item)
|
|
|
|
posList.append(file.tell() - begin)
|
|
|
|
assert posList == lastPosList
|
|
|
|
|
|
|
|
|
|
|
|
def calcOffSize(largestOffset):
|
|
|
|
if largestOffset < 0x100:
|
|
|
|
offSize = 1
|
|
|
|
elif largestOffset < 0x10000:
|
|
|
|
offSize = 2
|
|
|
|
elif largestOffset < 0x1000000:
|
|
|
|
offSize = 3
|
|
|
|
else:
|
|
|
|
offSize = 4
|
|
|
|
return offSize
|
|
|
|
|
|
|
|
|
2013-11-28 14:26:58 -05:00
|
|
|
class IndexCompiler(object):
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2017-07-19 18:18:58 +01:00
|
|
|
def __init__(self, items, strings, parent, isCFF2=None):
|
|
|
|
if isCFF2 is None and hasattr(parent, "isCFF2"):
|
|
|
|
isCFF2 = parent.isCFF2
|
|
|
|
assert isCFF2 is not None
|
|
|
|
self.isCFF2 = isCFF2
|
2002-05-23 21:50:36 +00:00
|
|
|
self.items = self.getItems(items, strings)
|
|
|
|
self.parent = parent
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def getItems(self, items, strings):
|
|
|
|
return items
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def getOffsets(self):
|
2016-09-20 19:42:18 -07:00
|
|
|
# An empty INDEX contains only the count field.
|
|
|
|
if self.items:
|
|
|
|
pos = 1
|
|
|
|
offsets = [pos]
|
|
|
|
for item in self.items:
|
|
|
|
if hasattr(item, "getDataLength"):
|
|
|
|
pos = pos + item.getDataLength()
|
|
|
|
else:
|
|
|
|
pos = pos + len(item)
|
|
|
|
offsets.append(pos)
|
|
|
|
else:
|
|
|
|
offsets = []
|
2002-05-23 21:50:36 +00:00
|
|
|
return offsets
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def getDataLength(self):
|
2017-07-19 18:18:58 +01:00
|
|
|
if self.isCFF2:
|
2017-01-12 15:23:12 -08:00
|
|
|
countSize = 4
|
|
|
|
else:
|
|
|
|
countSize = 2
|
2017-03-09 21:30:28 -08:00
|
|
|
|
2016-09-20 19:42:18 -07:00
|
|
|
if self.items:
|
|
|
|
lastOffset = self.getOffsets()[-1]
|
|
|
|
offSize = calcOffSize(lastOffset)
|
|
|
|
dataLength = (
|
2017-01-12 15:23:12 -08:00
|
|
|
countSize + # count
|
2016-09-20 19:42:18 -07:00
|
|
|
1 + # offSize
|
|
|
|
(len(self.items) + 1) * offSize + # the offsets
|
|
|
|
lastOffset - 1 # size of object data
|
|
|
|
)
|
|
|
|
else:
|
2017-06-21 10:32:58 +01:00
|
|
|
# count. For empty INDEX tables, this is the only entry.
|
|
|
|
dataLength = countSize
|
2016-09-20 19:42:18 -07:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
return dataLength
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def toFile(self, file):
|
|
|
|
offsets = self.getOffsets()
|
2017-07-19 18:18:58 +01:00
|
|
|
if self.isCFF2:
|
2017-01-12 15:23:12 -08:00
|
|
|
writeCard32(file, len(self.items))
|
|
|
|
else:
|
|
|
|
writeCard16(file, len(self.items))
|
2016-09-20 19:42:18 -07:00
|
|
|
# An empty INDEX contains only the count field.
|
|
|
|
if self.items:
|
|
|
|
offSize = calcOffSize(offsets[-1])
|
|
|
|
writeCard8(file, offSize)
|
|
|
|
offSize = -offSize
|
|
|
|
pack = struct.pack
|
|
|
|
for offset in offsets:
|
|
|
|
binOffset = pack(">l", offset)[offSize:]
|
|
|
|
assert len(binOffset) == -offSize
|
|
|
|
file.write(binOffset)
|
|
|
|
for item in self.items:
|
|
|
|
if hasattr(item, "toFile"):
|
|
|
|
item.toFile(file)
|
|
|
|
else:
|
2017-01-12 15:23:12 -08:00
|
|
|
data = tobytes(item, encoding="latin1")
|
|
|
|
file.write(data)
|
2002-05-23 21:50:36 +00:00
|
|
|
|
|
|
|
|
|
|
|
class IndexedStringsCompiler(IndexCompiler):
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def getItems(self, items, strings):
|
|
|
|
return items.strings
|
|
|
|
|
|
|
|
|
|
|
|
class TopDictIndexCompiler(IndexCompiler):
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def getItems(self, items, strings):
|
|
|
|
out = []
|
|
|
|
for item in items:
|
|
|
|
out.append(item.getCompiler(strings, self))
|
|
|
|
return out
|
2017-03-09 21:30:28 -08:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def getChildren(self, strings):
|
|
|
|
children = []
|
|
|
|
for topDict in self.items:
|
|
|
|
children.extend(topDict.getChildren(strings))
|
|
|
|
return children
|
|
|
|
|
2017-01-12 15:23:12 -08:00
|
|
|
def getOffsets(self):
|
2017-07-19 18:18:58 +01:00
|
|
|
if self.isCFF2:
|
|
|
|
offsets = [0, self.items[0].getDataLength()]
|
|
|
|
return offsets
|
|
|
|
else:
|
|
|
|
return super(TopDictIndexCompiler, self).getOffsets()
|
2017-03-09 21:30:28 -08:00
|
|
|
|
2017-01-12 15:23:12 -08:00
|
|
|
def getDataLength(self):
|
2017-07-19 18:18:58 +01:00
|
|
|
if self.isCFF2:
|
|
|
|
dataLength = self.items[0].getDataLength()
|
|
|
|
return dataLength
|
|
|
|
else:
|
|
|
|
return super(TopDictIndexCompiler, self).getDataLength()
|
2017-01-12 15:23:12 -08:00
|
|
|
|
|
|
|
def toFile(self, file):
|
2017-07-19 18:18:58 +01:00
|
|
|
if self.isCFF2:
|
|
|
|
self.items[0].toFile(file)
|
|
|
|
else:
|
|
|
|
super(TopDictIndexCompiler, self).toFile(file)
|
2017-01-12 15:23:12 -08:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
|
2003-08-22 19:53:32 +00:00
|
|
|
class FDArrayIndexCompiler(IndexCompiler):
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2003-08-22 19:53:32 +00:00
|
|
|
def getItems(self, items, strings):
|
|
|
|
out = []
|
|
|
|
for item in items:
|
|
|
|
out.append(item.getCompiler(strings, self))
|
|
|
|
return out
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2003-08-22 19:53:32 +00:00
|
|
|
def getChildren(self, strings):
|
|
|
|
children = []
|
|
|
|
for fontDict in self.items:
|
|
|
|
children.extend(fontDict.getChildren(strings))
|
|
|
|
return children
|
|
|
|
|
|
|
|
def toFile(self, file):
|
|
|
|
offsets = self.getOffsets()
|
2017-07-19 18:18:58 +01:00
|
|
|
if self.isCFF2:
|
2017-01-12 15:23:12 -08:00
|
|
|
writeCard32(file, len(self.items))
|
|
|
|
else:
|
|
|
|
writeCard16(file, len(self.items))
|
2003-08-22 19:53:32 +00:00
|
|
|
offSize = calcOffSize(offsets[-1])
|
|
|
|
writeCard8(file, offSize)
|
|
|
|
offSize = -offSize
|
|
|
|
pack = struct.pack
|
|
|
|
for offset in offsets:
|
|
|
|
binOffset = pack(">l", offset)[offSize:]
|
|
|
|
assert len(binOffset) == -offSize
|
|
|
|
file.write(binOffset)
|
|
|
|
for item in self.items:
|
|
|
|
if hasattr(item, "toFile"):
|
|
|
|
item.toFile(file)
|
|
|
|
else:
|
|
|
|
file.write(item)
|
|
|
|
|
|
|
|
def setPos(self, pos, endPos):
|
|
|
|
self.parent.rawDict["FDArray"] = pos
|
|
|
|
|
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
class GlobalSubrsCompiler(IndexCompiler):
|
2017-07-19 18:18:58 +01:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def getItems(self, items, strings):
|
|
|
|
out = []
|
|
|
|
for cs in items:
|
2017-07-19 18:18:58 +01:00
|
|
|
cs.compile(self.isCFF2)
|
2002-05-23 21:50:36 +00:00
|
|
|
out.append(cs.bytecode)
|
|
|
|
return out
|
|
|
|
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
class SubrsCompiler(GlobalSubrsCompiler):
|
2017-07-19 18:18:58 +01:00
|
|
|
|
2002-05-24 09:58:04 +00:00
|
|
|
def setPos(self, pos, endPos):
|
2002-05-23 21:50:36 +00:00
|
|
|
offset = pos - self.parent.pos
|
|
|
|
self.parent.rawDict["Subrs"] = offset
|
|
|
|
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
class CharStringsCompiler(GlobalSubrsCompiler):
|
2017-07-19 18:18:58 +01:00
|
|
|
|
2017-03-09 22:39:14 -08:00
|
|
|
def getItems(self, items, strings):
|
|
|
|
out = []
|
|
|
|
for cs in items:
|
2017-07-19 18:18:58 +01:00
|
|
|
cs.compile(self.isCFF2)
|
2017-03-09 22:39:14 -08:00
|
|
|
out.append(cs.bytecode)
|
|
|
|
return out
|
|
|
|
|
2002-05-24 09:58:04 +00:00
|
|
|
def setPos(self, pos, endPos):
|
2002-05-23 21:50:36 +00:00
|
|
|
self.parent.rawDict["CharStrings"] = pos
|
|
|
|
|
|
|
|
|
2013-11-28 14:26:58 -05:00
|
|
|
class Index(object):
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-16 18:17:32 +00:00
|
|
|
"""This class represents what the CFF spec calls an INDEX."""
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
compilerClass = IndexCompiler
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2017-07-19 18:18:58 +01:00
|
|
|
def __init__(self, file=None, isCFF2=None):
|
|
|
|
assert (isCFF2 is None) == (file is None)
|
2014-06-16 15:35:15 -04:00
|
|
|
self.items = []
|
2002-05-24 09:58:04 +00:00
|
|
|
name = self.__class__.__name__
|
2002-05-23 21:50:36 +00:00
|
|
|
if file is None:
|
|
|
|
return
|
2017-07-19 18:18:58 +01:00
|
|
|
self._isCFF2 = isCFF2
|
2016-01-24 14:50:57 +00:00
|
|
|
log.log(DEBUG, "loading %s at %s", name, file.tell())
|
2002-05-15 07:41:30 +00:00
|
|
|
self.file = file
|
2017-07-19 18:18:58 +01:00
|
|
|
if isCFF2:
|
2017-01-12 15:23:12 -08:00
|
|
|
count = readCard32(file)
|
|
|
|
else:
|
|
|
|
count = readCard16(file)
|
2002-05-15 07:41:30 +00:00
|
|
|
if count == 0:
|
|
|
|
return
|
2014-06-16 15:35:15 -04:00
|
|
|
self.items = [None] * count
|
2002-05-17 18:36:07 +00:00
|
|
|
offSize = readCard8(file)
|
2016-01-24 14:50:57 +00:00
|
|
|
log.log(DEBUG, " index count: %s offSize: %s", count, offSize)
|
2002-05-17 07:06:32 +00:00
|
|
|
assert offSize <= 4, "offSize too large: %s" % offSize
|
2002-05-15 07:41:30 +00:00
|
|
|
self.offsets = offsets = []
|
2013-11-27 21:17:35 -05:00
|
|
|
pad = b'\0' * (4 - offSize)
|
2017-06-21 10:32:58 +01:00
|
|
|
for index in range(count + 1):
|
2002-05-15 07:41:30 +00:00
|
|
|
chunk = file.read(offSize)
|
|
|
|
chunk = pad + chunk
|
|
|
|
offset, = struct.unpack(">L", chunk)
|
|
|
|
offsets.append(int(offset))
|
|
|
|
self.offsetBase = file.tell() - 1
|
2002-05-16 18:17:32 +00:00
|
|
|
file.seek(self.offsetBase + offsets[-1]) # pretend we've read the whole lot
|
2016-01-24 14:50:57 +00:00
|
|
|
log.log(DEBUG, " end of %s at %s", name, file.tell())
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-15 07:41:30 +00:00
|
|
|
def __len__(self):
|
2002-05-23 21:50:36 +00:00
|
|
|
return len(self.items)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-15 07:41:30 +00:00
|
|
|
def __getitem__(self, index):
|
2002-05-16 18:17:32 +00:00
|
|
|
item = self.items[index]
|
|
|
|
if item is not None:
|
|
|
|
return item
|
2002-05-15 07:41:30 +00:00
|
|
|
offset = self.offsets[index] + self.offsetBase
|
2017-06-21 10:32:58 +01:00
|
|
|
size = self.offsets[index + 1] - self.offsets[index]
|
2002-05-16 18:17:32 +00:00
|
|
|
file = self.file
|
|
|
|
file.seek(offset)
|
|
|
|
data = file.read(size)
|
|
|
|
assert len(data) == size
|
2017-07-19 18:18:58 +01:00
|
|
|
item = self.produceItem(index, data, file, offset)
|
2002-05-16 18:17:32 +00:00
|
|
|
self.items[index] = item
|
|
|
|
return item
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2017-01-12 15:23:12 -08:00
|
|
|
def __setitem__(self, index, item):
|
|
|
|
self.items[index] = item
|
2017-03-09 21:30:28 -08:00
|
|
|
|
2017-07-19 18:18:58 +01:00
|
|
|
def produceItem(self, index, data, file, offset):
|
2002-05-16 18:17:32 +00:00
|
|
|
return data
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def append(self, item):
|
|
|
|
self.items.append(item)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2017-07-19 18:18:58 +01:00
|
|
|
def getCompiler(self, strings, parent, isCFF2=None):
|
|
|
|
return self.compilerClass(self, strings, parent, isCFF2=isCFF2)
|
2002-05-15 07:41:30 +00:00
|
|
|
|
2019-02-07 01:58:22 +01:00
|
|
|
def clear(self):
|
|
|
|
del self.items[:]
|
|
|
|
|
2002-05-15 07:41:30 +00:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
class GlobalSubrsIndex(Index):
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
compilerClass = GlobalSubrsCompiler
|
2017-01-12 15:23:12 -08:00
|
|
|
subrClass = psCharStrings.T2CharString
|
|
|
|
charStringClass = psCharStrings.T2CharString
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2017-07-19 18:18:58 +01:00
|
|
|
def __init__(self, file=None, globalSubrs=None, private=None,
|
|
|
|
fdSelect=None, fdArray=None, isCFF2=None):
|
|
|
|
super(GlobalSubrsIndex, self).__init__(file, isCFF2=isCFF2)
|
2002-05-17 18:36:07 +00:00
|
|
|
self.globalSubrs = globalSubrs
|
|
|
|
self.private = private
|
2003-08-22 19:53:32 +00:00
|
|
|
if fdSelect:
|
|
|
|
self.fdSelect = fdSelect
|
|
|
|
if fdArray:
|
|
|
|
self.fdArray = fdArray
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2017-07-19 18:18:58 +01:00
|
|
|
def produceItem(self, index, data, file, offset):
|
2002-05-17 18:36:07 +00:00
|
|
|
if self.private is not None:
|
|
|
|
private = self.private
|
2003-08-22 19:53:32 +00:00
|
|
|
elif hasattr(self, 'fdArray') and self.fdArray is not None:
|
2017-01-12 15:23:12 -08:00
|
|
|
if hasattr(self, 'fdSelect') and self.fdSelect is not None:
|
|
|
|
fdIndex = self.fdSelect[index]
|
|
|
|
else:
|
|
|
|
fdIndex = 0
|
|
|
|
private = self.fdArray[fdIndex].Private
|
2002-05-17 18:36:07 +00:00
|
|
|
else:
|
|
|
|
private = None
|
2017-01-12 15:23:12 -08:00
|
|
|
return self.subrClass(data, private=private, globalSubrs=self.globalSubrs)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2018-01-25 17:30:23 -08:00
|
|
|
def toXML(self, xmlWriter):
|
2017-06-21 10:32:58 +01:00
|
|
|
xmlWriter.comment(
|
|
|
|
"The 'index' attribute is only for humans; "
|
|
|
|
"it is ignored when parsed.")
|
2002-05-24 09:58:04 +00:00
|
|
|
xmlWriter.newline()
|
2002-05-16 18:17:32 +00:00
|
|
|
for i in range(len(self)):
|
2002-05-24 11:55:37 +00:00
|
|
|
subr = self[i]
|
|
|
|
if subr.needsDecompilation():
|
|
|
|
xmlWriter.begintag("CharString", index=i, raw=1)
|
|
|
|
else:
|
|
|
|
xmlWriter.begintag("CharString", index=i)
|
2002-05-16 18:17:32 +00:00
|
|
|
xmlWriter.newline()
|
2002-05-24 11:55:37 +00:00
|
|
|
subr.toXML(xmlWriter)
|
2002-05-16 18:17:32 +00:00
|
|
|
xmlWriter.endtag("CharString")
|
|
|
|
xmlWriter.newline()
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2013-11-27 03:19:32 -05:00
|
|
|
def fromXML(self, name, attrs, content):
|
2013-11-27 02:40:30 -05:00
|
|
|
if name != "CharString":
|
2002-05-24 09:58:04 +00:00
|
|
|
return
|
2017-01-12 15:23:12 -08:00
|
|
|
subr = self.subrClass()
|
2013-11-27 03:19:32 -05:00
|
|
|
subr.fromXML(name, attrs, content)
|
2002-05-24 09:58:04 +00:00
|
|
|
self.append(subr)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-17 18:36:07 +00:00
|
|
|
def getItemAndSelector(self, index):
|
2003-08-22 19:53:32 +00:00
|
|
|
sel = None
|
|
|
|
if hasattr(self, 'fdSelect'):
|
|
|
|
sel = self.fdSelect[index]
|
2002-05-17 18:36:07 +00:00
|
|
|
return self[index], sel
|
2002-05-23 21:50:36 +00:00
|
|
|
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
class SubrsIndex(GlobalSubrsIndex):
|
|
|
|
compilerClass = SubrsCompiler
|
|
|
|
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2002-05-17 07:06:32 +00:00
|
|
|
class TopDictIndex(Index):
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2017-07-19 18:18:58 +01:00
|
|
|
compilerClass = TopDictIndexCompiler
|
|
|
|
|
|
|
|
def __init__(self, file=None, cff2GetGlyphOrder=None, topSize=0,
|
|
|
|
isCFF2=None):
|
|
|
|
assert (isCFF2 is None) == (file is None)
|
2017-01-12 15:23:12 -08:00
|
|
|
self.cff2GetGlyphOrder = cff2GetGlyphOrder
|
2017-07-19 18:18:58 +01:00
|
|
|
if file is not None and isCFF2:
|
|
|
|
self._isCFF2 = isCFF2
|
2017-05-10 10:37:55 -07:00
|
|
|
self.items = []
|
|
|
|
name = self.__class__.__name__
|
|
|
|
log.log(DEBUG, "loading %s at %s", name, file.tell())
|
|
|
|
self.file = file
|
|
|
|
count = 1
|
|
|
|
self.items = [None] * count
|
2017-06-21 10:32:58 +01:00
|
|
|
self.offsets = [0, topSize]
|
2017-05-10 10:37:55 -07:00
|
|
|
self.offsetBase = file.tell()
|
2017-06-21 10:32:58 +01:00
|
|
|
# pretend we've read the whole lot
|
|
|
|
file.seek(self.offsetBase + topSize)
|
2017-05-10 10:37:55 -07:00
|
|
|
log.log(DEBUG, " end of %s at %s", name, file.tell())
|
|
|
|
else:
|
2017-07-19 18:18:58 +01:00
|
|
|
super(TopDictIndex, self).__init__(file, isCFF2=isCFF2)
|
2017-03-09 21:30:28 -08:00
|
|
|
|
2017-07-19 18:18:58 +01:00
|
|
|
def produceItem(self, index, data, file, offset):
|
2017-06-21 10:32:58 +01:00
|
|
|
top = TopDict(
|
2017-07-19 18:18:58 +01:00
|
|
|
self.strings, file, offset, self.GlobalSubrs,
|
|
|
|
self.cff2GetGlyphOrder, isCFF2=self._isCFF2)
|
2002-05-17 07:06:32 +00:00
|
|
|
top.decompile(data)
|
|
|
|
return top
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2018-01-25 17:30:23 -08:00
|
|
|
def toXML(self, xmlWriter):
|
2002-05-17 18:36:07 +00:00
|
|
|
for i in range(len(self)):
|
|
|
|
xmlWriter.begintag("FontDict", index=i)
|
|
|
|
xmlWriter.newline()
|
2018-01-25 17:30:23 -08:00
|
|
|
self[i].toXML(xmlWriter)
|
2002-05-17 18:36:07 +00:00
|
|
|
xmlWriter.endtag("FontDict")
|
|
|
|
xmlWriter.newline()
|
2002-05-17 07:06:32 +00:00
|
|
|
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2017-05-10 10:37:55 -07:00
|
|
|
class FDArrayIndex(Index):
|
2017-01-12 15:23:12 -08:00
|
|
|
|
2017-05-10 10:37:55 -07:00
|
|
|
compilerClass = FDArrayIndexCompiler
|
2002-05-17 07:06:32 +00:00
|
|
|
|
2018-01-25 17:30:23 -08:00
|
|
|
def toXML(self, xmlWriter):
|
2017-05-10 10:37:55 -07:00
|
|
|
for i in range(len(self)):
|
|
|
|
xmlWriter.begintag("FontDict", index=i)
|
|
|
|
xmlWriter.newline()
|
2018-01-25 17:30:23 -08:00
|
|
|
self[i].toXML(xmlWriter)
|
2017-05-10 10:37:55 -07:00
|
|
|
xmlWriter.endtag("FontDict")
|
|
|
|
xmlWriter.newline()
|
2003-08-22 19:53:32 +00:00
|
|
|
|
2017-07-19 18:18:58 +01:00
|
|
|
def produceItem(self, index, data, file, offset):
|
2017-06-21 10:32:58 +01:00
|
|
|
fontDict = FontDict(
|
2017-07-19 18:18:58 +01:00
|
|
|
self.strings, file, offset, self.GlobalSubrs, isCFF2=self._isCFF2,
|
|
|
|
vstore=self.vstore)
|
2016-09-20 19:46:53 -07:00
|
|
|
fontDict.decompile(data)
|
|
|
|
return fontDict
|
|
|
|
|
2013-11-27 03:19:32 -05:00
|
|
|
def fromXML(self, name, attrs, content):
|
2013-11-27 02:40:30 -05:00
|
|
|
if name != "FontDict":
|
2003-08-22 19:53:32 +00:00
|
|
|
return
|
2017-07-19 18:18:58 +01:00
|
|
|
fontDict = FontDict()
|
2003-08-22 19:53:32 +00:00
|
|
|
for element in content:
|
2008-03-07 19:56:17 +00:00
|
|
|
if isinstance(element, basestring):
|
2003-08-22 19:53:32 +00:00
|
|
|
continue
|
2013-11-27 03:19:32 -05:00
|
|
|
name, attrs, content = element
|
This commit fixes #740.
Commits 3063def and 5b47971 introduced a separate fontDictOperators list for FontDict, only listing those TopDict key/value pairs that are actually used in the FontDict context. It provided a fallback that TTX files containing such "useless" key/value pairs would not be rejected.
However, the code still rejected binary fonts that contained such values, even though it didn't before, and yes, such fonts exist. Also: such fonts are not broken per spec, they just contain some fields that otherwise no one ever looks at, so it's a little harsh to reject them.
This patch removes most of the special FontDict code, and uses everything from TopDict, *except* the order attribute: it sets that to a list of the relevant keys for the FontDict. The effect of this is that "useless" key/value pairs are ignored, not just upon reading XML, but also upon decompilation and compilation of binary fonts. It improves on the previous XML reading behavior in that it no longer silently ignores key typos in the TTX input.
Ideally, we would *output* everything that is actually in the FontDict to TTX, and only ignore the values when compiling, but I didn't find a clean solution for that, so I decided to just fix the issue.
2016-11-26 08:30:34 +01:00
|
|
|
fontDict.fromXML(name, attrs, content)
|
2003-08-22 19:53:32 +00:00
|
|
|
self.append(fontDict)
|
|
|
|
|
2017-06-21 10:32:58 +01:00
|
|
|
|
|
|
|
class VarStoreData(object):
|
|
|
|
|
|
|
|
def __init__(self, file=None, otVarStore=None):
|
2017-01-12 15:23:12 -08:00
|
|
|
self.file = file
|
|
|
|
self.data = None
|
|
|
|
self.otVarStore = otVarStore
|
2017-06-21 10:32:58 +01:00
|
|
|
self.font = TTFont() # dummy font for the decompile function.
|
2017-03-09 21:30:28 -08:00
|
|
|
|
2017-01-12 15:23:12 -08:00
|
|
|
def decompile(self):
|
|
|
|
if self.file:
|
|
|
|
class GlobalState(object):
|
|
|
|
def __init__(self, tableType, cachingStats):
|
|
|
|
self.tableType = tableType
|
|
|
|
self.cachingStats = cachingStats
|
|
|
|
globalState = GlobalState(tableType="VarStore", cachingStats={})
|
|
|
|
# read data in from file. Assume position is correct.
|
|
|
|
length = readCard16(self.file)
|
|
|
|
self.data = self.file.read(length)
|
|
|
|
globalState = {}
|
|
|
|
reader = OTTableReader(self.data, globalState)
|
|
|
|
self.otVarStore = ot.VarStore()
|
2017-06-21 10:32:58 +01:00
|
|
|
self.otVarStore.decompile(reader, self.font)
|
2017-01-12 15:23:12 -08:00
|
|
|
return self
|
|
|
|
|
|
|
|
def compile(self):
|
|
|
|
writer = OTTableWriter()
|
|
|
|
self.otVarStore.compile(writer, self.font)
|
2017-06-21 10:32:58 +01:00
|
|
|
# Note that this omits the initial Card16 length from the CFF2
|
|
|
|
# VarStore data block
|
|
|
|
self.data = writer.getAllData()
|
2017-01-12 15:23:12 -08:00
|
|
|
|
|
|
|
def writeXML(self, xmlWriter, name):
|
|
|
|
self.otVarStore.toXML(xmlWriter, self.font)
|
2017-03-09 21:30:28 -08:00
|
|
|
|
2017-01-12 15:23:12 -08:00
|
|
|
def xmlRead(self, name, attrs, content, parent):
|
|
|
|
self.otVarStore = ot.VarStore()
|
|
|
|
for element in content:
|
|
|
|
if isinstance(element, tuple):
|
|
|
|
name, attrs, content = element
|
|
|
|
self.otVarStore.fromXML(name, attrs, content, self.font)
|
|
|
|
else:
|
|
|
|
pass
|
|
|
|
return None
|
2017-03-09 21:30:28 -08:00
|
|
|
|
2017-01-12 15:23:12 -08:00
|
|
|
def __len__(self):
|
|
|
|
return len(self.data)
|
|
|
|
|
|
|
|
def getNumRegions(self, vsIndex):
|
|
|
|
varData = self.otVarStore.VarData[vsIndex]
|
2017-03-22 17:53:45 -07:00
|
|
|
numRegions = varData.VarRegionCount
|
2017-01-12 15:23:12 -08:00
|
|
|
return numRegions
|
2017-03-09 21:30:28 -08:00
|
|
|
|
2003-08-22 19:53:32 +00:00
|
|
|
|
2017-07-19 18:18:58 +01:00
|
|
|
class FDSelect(object):
|
|
|
|
|
2015-04-26 00:54:30 -04:00
|
|
|
def __init__(self, file=None, numGlyphs=None, format=None):
|
2003-08-22 19:53:32 +00:00
|
|
|
if file:
|
|
|
|
# read data in from file
|
|
|
|
self.format = readCard8(file)
|
|
|
|
if self.format == 0:
|
|
|
|
from array import array
|
|
|
|
self.gidArray = array("B", file.read(numGlyphs)).tolist()
|
|
|
|
elif self.format == 3:
|
|
|
|
gidArray = [None] * numGlyphs
|
|
|
|
nRanges = readCard16(file)
|
2015-04-26 02:17:13 -04:00
|
|
|
fd = None
|
2003-08-22 19:53:32 +00:00
|
|
|
prev = None
|
|
|
|
for i in range(nRanges):
|
|
|
|
first = readCard16(file)
|
|
|
|
if prev is not None:
|
|
|
|
for glyphID in range(prev, first):
|
|
|
|
gidArray[glyphID] = fd
|
|
|
|
prev = first
|
|
|
|
fd = readCard8(file)
|
|
|
|
if prev is not None:
|
|
|
|
first = readCard16(file)
|
|
|
|
for glyphID in range(prev, first):
|
|
|
|
gidArray[glyphID] = fd
|
|
|
|
self.gidArray = gidArray
|
2019-08-01 21:28:46 +02:00
|
|
|
elif self.format == 4:
|
|
|
|
gidArray = [None] * numGlyphs
|
|
|
|
nRanges = readCard32(file)
|
|
|
|
fd = None
|
|
|
|
prev = None
|
|
|
|
for i in range(nRanges):
|
|
|
|
first = readCard32(file)
|
|
|
|
if prev is not None:
|
|
|
|
for glyphID in range(prev, first):
|
|
|
|
gidArray[glyphID] = fd
|
|
|
|
prev = first
|
|
|
|
fd = readCard16(file)
|
|
|
|
if prev is not None:
|
|
|
|
first = readCard32(file)
|
|
|
|
for glyphID in range(prev, first):
|
|
|
|
gidArray[glyphID] = fd
|
|
|
|
self.gidArray = gidArray
|
2003-08-22 19:53:32 +00:00
|
|
|
else:
|
2013-12-04 01:15:46 -05:00
|
|
|
assert False, "unsupported FDSelect format: %s" % format
|
2003-08-22 19:53:32 +00:00
|
|
|
else:
|
2017-06-21 10:32:58 +01:00
|
|
|
# reading from XML. Make empty gidArray, and leave format as passed in.
|
2013-12-04 16:31:44 -05:00
|
|
|
# format is None will result in the smallest representation being used.
|
2003-08-22 19:53:32 +00:00
|
|
|
self.format = format
|
|
|
|
self.gidArray = []
|
|
|
|
|
|
|
|
def __len__(self):
|
|
|
|
return len(self.gidArray)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2003-08-22 19:53:32 +00:00
|
|
|
def __getitem__(self, index):
|
|
|
|
return self.gidArray[index]
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2003-08-22 19:53:32 +00:00
|
|
|
def __setitem__(self, index, fdSelectValue):
|
|
|
|
self.gidArray[index] = fdSelectValue
|
|
|
|
|
|
|
|
def append(self, fdSelectValue):
|
|
|
|
self.gidArray.append(fdSelectValue)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2013-11-28 14:26:58 -05:00
|
|
|
class CharStrings(object):
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2017-06-21 10:32:58 +01:00
|
|
|
def __init__(self, file, charset, globalSubrs, private, fdSelect, fdArray,
|
2017-07-19 18:18:58 +01:00
|
|
|
isCFF2=None):
|
2017-01-12 15:23:12 -08:00
|
|
|
self.globalSubrs = globalSubrs
|
2002-05-24 09:58:04 +00:00
|
|
|
if file is not None:
|
2017-06-21 10:32:58 +01:00
|
|
|
self.charStringsIndex = SubrsIndex(
|
2017-07-19 18:18:58 +01:00
|
|
|
file, globalSubrs, private, fdSelect, fdArray, isCFF2=isCFF2)
|
2002-05-24 09:58:04 +00:00
|
|
|
self.charStrings = charStrings = {}
|
|
|
|
for i in range(len(charset)):
|
|
|
|
charStrings[charset[i]] = i
|
2017-06-21 10:32:58 +01:00
|
|
|
# read from OTF file: charStrings.values() are indices into
|
|
|
|
# charStringsIndex.
|
2002-05-24 09:58:04 +00:00
|
|
|
self.charStringsAreIndexed = 1
|
|
|
|
else:
|
|
|
|
self.charStrings = {}
|
2017-07-19 18:18:58 +01:00
|
|
|
# read from ttx file: charStrings.values() are actual charstrings
|
2017-06-21 10:32:58 +01:00
|
|
|
self.charStringsAreIndexed = 0
|
2002-05-24 09:58:04 +00:00
|
|
|
self.private = private
|
2013-12-04 16:31:44 -05:00
|
|
|
if fdSelect is not None:
|
2003-08-22 19:53:32 +00:00
|
|
|
self.fdSelect = fdSelect
|
2013-12-04 16:31:44 -05:00
|
|
|
if fdArray is not None:
|
2003-08-22 19:53:32 +00:00
|
|
|
self.fdArray = fdArray
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-16 18:17:32 +00:00
|
|
|
def keys(self):
|
2013-11-27 06:26:55 -05:00
|
|
|
return list(self.charStrings.keys())
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-16 18:38:03 +00:00
|
|
|
def values(self):
|
2002-05-24 09:58:04 +00:00
|
|
|
if self.charStringsAreIndexed:
|
|
|
|
return self.charStringsIndex
|
|
|
|
else:
|
2013-11-27 06:26:55 -05:00
|
|
|
return list(self.charStrings.values())
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-16 18:17:32 +00:00
|
|
|
def has_key(self, name):
|
2013-11-27 02:33:03 -05:00
|
|
|
return name in self.charStrings
|
2014-06-13 12:44:38 -04:00
|
|
|
|
|
|
|
__contains__ = has_key
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-17 07:06:32 +00:00
|
|
|
def __len__(self):
|
2002-05-24 09:58:04 +00:00
|
|
|
return len(self.charStrings)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-16 18:17:32 +00:00
|
|
|
def __getitem__(self, name):
|
2002-05-24 09:58:04 +00:00
|
|
|
charString = self.charStrings[name]
|
|
|
|
if self.charStringsAreIndexed:
|
|
|
|
charString = self.charStringsIndex[charString]
|
|
|
|
return charString
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-24 09:58:04 +00:00
|
|
|
def __setitem__(self, name, charString):
|
|
|
|
if self.charStringsAreIndexed:
|
|
|
|
index = self.charStrings[name]
|
|
|
|
self.charStringsIndex[index] = charString
|
|
|
|
else:
|
|
|
|
self.charStrings[name] = charString
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-17 18:36:07 +00:00
|
|
|
def getItemAndSelector(self, name):
|
2002-05-24 09:58:04 +00:00
|
|
|
if self.charStringsAreIndexed:
|
|
|
|
index = self.charStrings[name]
|
|
|
|
return self.charStringsIndex.getItemAndSelector(index)
|
|
|
|
else:
|
2016-07-06 04:23:43 -06:00
|
|
|
if hasattr(self, 'fdArray'):
|
|
|
|
if hasattr(self, 'fdSelect'):
|
2016-07-06 10:27:02 -06:00
|
|
|
sel = self.charStrings[name].fdSelectIndex
|
2016-07-06 04:23:43 -06:00
|
|
|
else:
|
2017-01-12 15:23:12 -08:00
|
|
|
sel = 0
|
2003-08-22 19:53:32 +00:00
|
|
|
else:
|
2016-07-06 04:23:43 -06:00
|
|
|
sel = None
|
2003-08-22 19:53:32 +00:00
|
|
|
return self.charStrings[name], sel
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2018-01-25 17:30:23 -08:00
|
|
|
def toXML(self, xmlWriter):
|
2013-11-27 04:15:34 -05:00
|
|
|
names = sorted(self.keys())
|
2002-05-16 18:17:32 +00:00
|
|
|
for name in names:
|
2003-08-22 19:53:32 +00:00
|
|
|
charStr, fdSelectIndex = self.getItemAndSelector(name)
|
2002-05-24 11:55:37 +00:00
|
|
|
if charStr.needsDecompilation():
|
|
|
|
raw = [("raw", 1)]
|
|
|
|
else:
|
|
|
|
raw = []
|
2003-08-22 19:53:32 +00:00
|
|
|
if fdSelectIndex is None:
|
2002-05-24 11:55:37 +00:00
|
|
|
xmlWriter.begintag("CharString", [('name', name)] + raw)
|
2002-05-17 18:36:07 +00:00
|
|
|
else:
|
2017-06-21 10:32:58 +01:00
|
|
|
xmlWriter.begintag(
|
|
|
|
"CharString",
|
|
|
|
[('name', name), ('fdSelectIndex', fdSelectIndex)] + raw)
|
2002-05-16 18:17:32 +00:00
|
|
|
xmlWriter.newline()
|
2002-05-24 11:55:37 +00:00
|
|
|
charStr.toXML(xmlWriter)
|
2002-05-16 18:17:32 +00:00
|
|
|
xmlWriter.endtag("CharString")
|
|
|
|
xmlWriter.newline()
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2013-11-27 03:19:32 -05:00
|
|
|
def fromXML(self, name, attrs, content):
|
2002-05-24 09:58:04 +00:00
|
|
|
for element in content:
|
2008-03-07 19:56:17 +00:00
|
|
|
if isinstance(element, basestring):
|
2002-05-24 09:58:04 +00:00
|
|
|
continue
|
|
|
|
name, attrs, content = element
|
2013-11-27 02:40:30 -05:00
|
|
|
if name != "CharString":
|
2002-05-24 09:58:04 +00:00
|
|
|
continue
|
2003-08-22 19:53:32 +00:00
|
|
|
fdID = -1
|
|
|
|
if hasattr(self, "fdArray"):
|
2017-01-12 15:23:12 -08:00
|
|
|
try:
|
|
|
|
fdID = safeEval(attrs["fdSelectIndex"])
|
|
|
|
except KeyError:
|
|
|
|
fdID = 0
|
2003-08-22 19:53:32 +00:00
|
|
|
private = self.fdArray[fdID].Private
|
|
|
|
else:
|
|
|
|
private = self.private
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-24 09:58:04 +00:00
|
|
|
glyphName = attrs["name"]
|
2017-05-09 11:26:53 -07:00
|
|
|
charStringClass = psCharStrings.T2CharString
|
2017-01-12 15:23:12 -08:00
|
|
|
charString = charStringClass(
|
2003-08-24 19:56:16 +00:00
|
|
|
private=private,
|
|
|
|
globalSubrs=self.globalSubrs)
|
2013-11-27 03:19:32 -05:00
|
|
|
charString.fromXML(name, attrs, content)
|
2003-08-22 19:53:32 +00:00
|
|
|
if fdID >= 0:
|
|
|
|
charString.fdSelectIndex = fdID
|
2002-05-24 09:58:04 +00:00
|
|
|
self[glyphName] = charString
|
2002-05-15 07:41:30 +00:00
|
|
|
|
|
|
|
|
2002-05-17 18:36:07 +00:00
|
|
|
def readCard8(file):
|
2013-11-27 18:13:48 -05:00
|
|
|
return byteord(file.read(1))
|
2002-05-17 18:36:07 +00:00
|
|
|
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2002-05-17 18:36:07 +00:00
|
|
|
def readCard16(file):
|
|
|
|
value, = struct.unpack(">H", file.read(2))
|
|
|
|
return value
|
|
|
|
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2017-01-12 15:23:12 -08:00
|
|
|
def readCard32(file):
|
|
|
|
value, = struct.unpack(">L", file.read(4))
|
|
|
|
return value
|
|
|
|
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def writeCard8(file, value):
|
2013-11-27 15:19:40 -05:00
|
|
|
file.write(bytechr(value))
|
2002-05-23 21:50:36 +00:00
|
|
|
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def writeCard16(file, value):
|
|
|
|
file.write(struct.pack(">H", value))
|
|
|
|
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2017-01-12 15:23:12 -08:00
|
|
|
def writeCard32(file, value):
|
|
|
|
file.write(struct.pack(">L", value))
|
|
|
|
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def packCard8(value):
|
2013-11-27 15:19:40 -05:00
|
|
|
return bytechr(value)
|
2002-05-23 21:50:36 +00:00
|
|
|
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def packCard16(value):
|
|
|
|
return struct.pack(">H", value)
|
|
|
|
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2019-08-01 21:28:46 +02:00
|
|
|
def packCard32(value):
|
|
|
|
return struct.pack(">L", value)
|
|
|
|
|
|
|
|
|
2002-05-16 18:17:32 +00:00
|
|
|
def buildOperatorDict(table):
|
|
|
|
d = {}
|
|
|
|
for op, name, arg, default, conv in table:
|
|
|
|
d[op] = (name, arg)
|
|
|
|
return d
|
2002-05-15 07:41:30 +00:00
|
|
|
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def buildOpcodeDict(table):
|
|
|
|
d = {}
|
|
|
|
for op, name, arg, default, conv in table:
|
2008-03-07 19:56:17 +00:00
|
|
|
if isinstance(op, tuple):
|
2013-11-27 15:19:40 -05:00
|
|
|
op = bytechr(op[0]) + bytechr(op[1])
|
2002-05-23 21:50:36 +00:00
|
|
|
else:
|
2013-11-27 15:19:40 -05:00
|
|
|
op = bytechr(op)
|
2002-05-23 21:50:36 +00:00
|
|
|
d[name] = (op, arg)
|
|
|
|
return d
|
|
|
|
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2002-05-16 18:17:32 +00:00
|
|
|
def buildOrder(table):
|
|
|
|
l = []
|
|
|
|
for op, name, arg, default, conv in table:
|
|
|
|
l.append(name)
|
|
|
|
return l
|
2002-05-15 07:41:30 +00:00
|
|
|
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2002-05-16 18:17:32 +00:00
|
|
|
def buildDefaults(table):
|
|
|
|
d = {}
|
|
|
|
for op, name, arg, default, conv in table:
|
|
|
|
if default is not None:
|
|
|
|
d[name] = default
|
|
|
|
return d
|
|
|
|
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2002-05-16 18:17:32 +00:00
|
|
|
def buildConverters(table):
|
|
|
|
d = {}
|
|
|
|
for op, name, arg, default, conv in table:
|
|
|
|
d[name] = conv
|
|
|
|
return d
|
|
|
|
|
|
|
|
|
2018-02-23 02:01:35 +09:00
|
|
|
class SimpleConverter(object):
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2002-05-17 20:04:05 +00:00
|
|
|
def read(self, parent, value):
|
2018-02-23 02:01:35 +09:00
|
|
|
if not hasattr(parent, "file"):
|
|
|
|
return self._read(parent, value)
|
2018-02-23 01:03:37 +09:00
|
|
|
file = parent.file
|
|
|
|
pos = file.tell()
|
2018-02-23 02:22:57 +09:00
|
|
|
try:
|
|
|
|
return self._read(parent, value)
|
|
|
|
finally:
|
|
|
|
file.seek(pos)
|
2018-02-23 01:03:37 +09:00
|
|
|
|
|
|
|
def _read(self, parent, value):
|
2002-05-17 20:04:05 +00:00
|
|
|
return value
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def write(self, parent, value):
|
|
|
|
return value
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2018-01-25 17:30:23 -08:00
|
|
|
def xmlWrite(self, xmlWriter, name, value):
|
2002-05-24 09:58:04 +00:00
|
|
|
xmlWriter.simpletag(name, value=value)
|
|
|
|
xmlWriter.newline()
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2013-11-27 03:19:32 -05:00
|
|
|
def xmlRead(self, name, attrs, content, parent):
|
2002-05-24 09:58:04 +00:00
|
|
|
return attrs["value"]
|
|
|
|
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2013-11-28 07:10:53 -05:00
|
|
|
class ASCIIConverter(SimpleConverter):
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2018-02-23 01:03:37 +09:00
|
|
|
def _read(self, parent, value):
|
2013-11-28 07:10:53 -05:00
|
|
|
return tostr(value, encoding='ascii')
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2013-11-28 07:10:53 -05:00
|
|
|
def write(self, parent, value):
|
|
|
|
return tobytes(value, encoding='ascii')
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2018-01-25 17:30:23 -08:00
|
|
|
def xmlWrite(self, xmlWriter, name, value):
|
2015-07-03 08:42:11 +09:00
|
|
|
xmlWriter.simpletag(name, value=tounicode(value, encoding="ascii"))
|
2013-11-28 07:10:53 -05:00
|
|
|
xmlWriter.newline()
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2013-11-28 07:10:53 -05:00
|
|
|
def xmlRead(self, name, attrs, content, parent):
|
|
|
|
return tobytes(attrs["value"], encoding=("ascii"))
|
|
|
|
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2002-09-09 14:18:39 +00:00
|
|
|
class Latin1Converter(SimpleConverter):
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2018-02-23 01:03:37 +09:00
|
|
|
def _read(self, parent, value):
|
2013-11-28 07:10:53 -05:00
|
|
|
return tostr(value, encoding='latin1')
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2013-11-28 07:10:53 -05:00
|
|
|
def write(self, parent, value):
|
|
|
|
return tobytes(value, encoding='latin1')
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2018-01-25 17:30:23 -08:00
|
|
|
def xmlWrite(self, xmlWriter, name, value):
|
2017-03-05 23:11:41 +00:00
|
|
|
value = tounicode(value, encoding="latin1")
|
2017-01-12 15:23:12 -08:00
|
|
|
if name in ['Notice', 'Copyright']:
|
|
|
|
value = re.sub(r"[\r\n]\s+", " ", value)
|
2017-03-05 23:11:41 +00:00
|
|
|
xmlWriter.simpletag(name, value=value)
|
2013-10-28 13:20:00 +01:00
|
|
|
xmlWriter.newline()
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2013-11-27 03:19:32 -05:00
|
|
|
def xmlRead(self, name, attrs, content, parent):
|
2013-11-28 07:10:53 -05:00
|
|
|
return tobytes(attrs["value"], encoding=("latin1"))
|
2002-09-09 14:18:39 +00:00
|
|
|
|
|
|
|
|
2002-05-24 09:58:04 +00:00
|
|
|
def parseNum(s):
|
|
|
|
try:
|
|
|
|
value = int(s)
|
|
|
|
except:
|
|
|
|
value = float(s)
|
|
|
|
return value
|
|
|
|
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2017-01-12 15:23:12 -08:00
|
|
|
def parseBlendList(s):
|
|
|
|
valueList = []
|
|
|
|
for element in s:
|
|
|
|
if isinstance(element, basestring):
|
|
|
|
continue
|
|
|
|
name, attrs, content = element
|
|
|
|
blendList = attrs["value"].split()
|
|
|
|
blendList = [eval(val) for val in blendList]
|
|
|
|
valueList.append(blendList)
|
|
|
|
if len(valueList) == 1:
|
|
|
|
valueList = valueList[0]
|
|
|
|
return valueList
|
2017-03-09 21:30:28 -08:00
|
|
|
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2002-05-24 09:58:04 +00:00
|
|
|
class NumberConverter(SimpleConverter):
|
2018-01-25 17:30:23 -08:00
|
|
|
def xmlWrite(self, xmlWriter, name, value):
|
2017-01-12 15:23:12 -08:00
|
|
|
if isinstance(value, list):
|
|
|
|
xmlWriter.begintag(name)
|
|
|
|
xmlWriter.newline()
|
|
|
|
xmlWriter.indent()
|
|
|
|
blendValue = " ".join([str(val) for val in value])
|
|
|
|
xmlWriter.simpletag(kBlendDictOpName, value=blendValue)
|
|
|
|
xmlWriter.newline()
|
|
|
|
xmlWriter.dedent()
|
|
|
|
xmlWriter.endtag(name)
|
|
|
|
xmlWriter.newline()
|
|
|
|
else:
|
|
|
|
xmlWriter.simpletag(name, value=value)
|
|
|
|
xmlWriter.newline()
|
2017-03-09 21:30:28 -08:00
|
|
|
|
2013-11-27 03:19:32 -05:00
|
|
|
def xmlRead(self, name, attrs, content, parent):
|
2017-01-12 15:23:12 -08:00
|
|
|
valueString = attrs.get("value", None)
|
2017-06-21 10:32:58 +01:00
|
|
|
if valueString is None:
|
2017-01-12 15:23:12 -08:00
|
|
|
value = parseBlendList(content)
|
|
|
|
else:
|
|
|
|
value = parseNum(attrs["value"])
|
|
|
|
return value
|
2002-05-24 09:58:04 +00:00
|
|
|
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2002-05-24 09:58:04 +00:00
|
|
|
class ArrayConverter(SimpleConverter):
|
2018-01-25 17:30:23 -08:00
|
|
|
def xmlWrite(self, xmlWriter, name, value):
|
[cffLib] fix IndexError when dumping toXML empty deltas
$ ttx -t CFF "font.otf"
Dumping "font.otf" to "font.ttx"...
Dumping 'CFF ' table...
ERROR: Unhandled exception has occurred
Traceback (most recent call last):
File "fonttools/Lib/fontTools/ttx.py", line 384, in main
process(jobs, options)
File "fonttools/Lib/fontTools/ttx.py", line 358, in process
action(input, output, options)
File "fonttools/Lib/fontTools/misc/loggingTools.py", line 372, in wrapper
return func(*args, **kwds)
File "fonttools/Lib/fontTools/ttx.py", line 258, in ttDump
newlinestr=options.newlinestr)
File "fonttools/Lib/fontTools/ttLib/__init__.py", line 311, in saveXML
self._tableToXML(tableWriter, tag, progress)
File "fonttools/Lib/fontTools/ttLib/__init__.py", line 348, in _tableToXML
table.toXML(writer, self, progress)
File "fonttools/Lib/fontTools/ttLib/tables/C_F_F_.py", line 42, in toXML
self.cff.toXML(writer, progress)
File "fonttools/Lib/fontTools/cffLib.py", line 135, in toXML
font.toXML(xmlWriter, progress)
File "fonttools/Lib/fontTools/cffLib.py", line 2178, in toXML
BaseDict.toXML(self, xmlWriter, progress)
File "fonttools/Lib/fontTools/cffLib.py", line 2128, in toXML
conv.xmlWrite(xmlWriter, name, value, progress)
File "fonttools/Lib/fontTools/cffLib.py", line 1120, in xmlWrite
value.toXML(xmlWriter, progress)
File "fonttools/Lib/fontTools/cffLib.py", line 2128, in toXML
conv.xmlWrite(xmlWriter, name, value, progress)
File "fonttools/Lib/fontTools/cffLib.py", line 1089, in xmlWrite
if isinstance(value[0], list):
IndexError: list index out of range
2017-03-09 15:40:52 +00:00
|
|
|
if value and isinstance(value[0], list):
|
2017-01-12 15:23:12 -08:00
|
|
|
xmlWriter.begintag(name)
|
|
|
|
xmlWriter.newline()
|
|
|
|
xmlWriter.indent()
|
|
|
|
for valueList in value:
|
|
|
|
blendValue = " ".join([str(val) for val in valueList])
|
|
|
|
xmlWriter.simpletag(kBlendDictOpName, value=blendValue)
|
|
|
|
xmlWriter.newline()
|
|
|
|
xmlWriter.dedent()
|
|
|
|
xmlWriter.endtag(name)
|
|
|
|
xmlWriter.newline()
|
|
|
|
else:
|
|
|
|
value = " ".join([str(val) for val in value])
|
|
|
|
xmlWriter.simpletag(name, value=value)
|
|
|
|
xmlWriter.newline()
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2013-11-27 03:19:32 -05:00
|
|
|
def xmlRead(self, name, attrs, content, parent):
|
2017-01-12 15:23:12 -08:00
|
|
|
valueString = attrs.get("value", None)
|
2017-06-21 10:32:58 +01:00
|
|
|
if valueString is None:
|
2017-01-12 15:23:12 -08:00
|
|
|
valueList = parseBlendList(content)
|
|
|
|
else:
|
|
|
|
values = valueString.split()
|
|
|
|
valueList = [parseNum(value) for value in values]
|
|
|
|
return valueList
|
|
|
|
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2002-05-24 09:58:04 +00:00
|
|
|
class TableConverter(SimpleConverter):
|
2017-07-19 18:18:58 +01:00
|
|
|
|
2018-01-25 17:30:23 -08:00
|
|
|
def xmlWrite(self, xmlWriter, name, value):
|
2002-05-17 18:36:07 +00:00
|
|
|
xmlWriter.begintag(name)
|
|
|
|
xmlWriter.newline()
|
2018-01-25 17:30:23 -08:00
|
|
|
value.toXML(xmlWriter)
|
2002-05-17 18:36:07 +00:00
|
|
|
xmlWriter.endtag(name)
|
|
|
|
xmlWriter.newline()
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2013-11-27 03:19:32 -05:00
|
|
|
def xmlRead(self, name, attrs, content, parent):
|
2017-07-19 18:18:58 +01:00
|
|
|
ob = self.getClass()()
|
2002-05-24 09:58:04 +00:00
|
|
|
for element in content:
|
2008-03-07 19:56:17 +00:00
|
|
|
if isinstance(element, basestring):
|
2002-05-24 09:58:04 +00:00
|
|
|
continue
|
2013-11-27 03:19:32 -05:00
|
|
|
name, attrs, content = element
|
|
|
|
ob.fromXML(name, attrs, content)
|
2002-05-24 09:58:04 +00:00
|
|
|
return ob
|
2002-05-17 18:36:07 +00:00
|
|
|
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2002-05-24 09:58:04 +00:00
|
|
|
class PrivateDictConverter(TableConverter):
|
2017-07-19 18:18:58 +01:00
|
|
|
|
2002-05-24 09:58:04 +00:00
|
|
|
def getClass(self):
|
2017-05-09 15:17:48 -07:00
|
|
|
return PrivateDict
|
2017-03-09 21:30:28 -08:00
|
|
|
|
2018-02-23 01:03:37 +09:00
|
|
|
def _read(self, parent, value):
|
2002-05-16 18:17:32 +00:00
|
|
|
size, offset = value
|
|
|
|
file = parent.file
|
2017-07-19 18:18:58 +01:00
|
|
|
isCFF2 = parent._isCFF2
|
|
|
|
try:
|
|
|
|
vstore = parent.vstore
|
|
|
|
except AttributeError:
|
|
|
|
vstore = None
|
|
|
|
priv = PrivateDict(
|
|
|
|
parent.strings, file, offset, isCFF2=isCFF2, vstore=vstore)
|
2002-05-16 18:17:32 +00:00
|
|
|
file.seek(offset)
|
|
|
|
data = file.read(size)
|
2013-12-04 01:15:46 -05:00
|
|
|
assert len(data) == size
|
2002-05-24 09:58:04 +00:00
|
|
|
priv.decompile(data)
|
|
|
|
return priv
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def write(self, parent, value):
|
|
|
|
return (0, 0) # dummy value
|
2017-06-21 10:32:58 +01:00
|
|
|
|
|
|
|
|
2002-05-24 09:58:04 +00:00
|
|
|
class SubrsConverter(TableConverter):
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2002-05-24 09:58:04 +00:00
|
|
|
def getClass(self):
|
2017-05-09 11:26:53 -07:00
|
|
|
return SubrsIndex
|
2017-03-09 21:30:28 -08:00
|
|
|
|
2018-02-23 01:03:37 +09:00
|
|
|
def _read(self, parent, value):
|
2002-05-16 18:17:32 +00:00
|
|
|
file = parent.file
|
2017-07-19 18:18:58 +01:00
|
|
|
isCFF2 = parent._isCFF2
|
2002-05-16 18:17:32 +00:00
|
|
|
file.seek(parent.offset + value) # Offset(self)
|
2018-02-23 01:03:37 +09:00
|
|
|
return SubrsIndex(file, isCFF2=isCFF2)
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def write(self, parent, value):
|
|
|
|
return 0 # dummy value
|
2002-05-16 18:17:32 +00:00
|
|
|
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2002-05-24 09:58:04 +00:00
|
|
|
class CharStringsConverter(TableConverter):
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2018-02-23 01:03:37 +09:00
|
|
|
def _read(self, parent, value):
|
2002-05-16 18:17:32 +00:00
|
|
|
file = parent.file
|
2017-07-19 18:18:58 +01:00
|
|
|
isCFF2 = parent._isCFF2
|
2002-05-17 07:06:32 +00:00
|
|
|
charset = parent.charset
|
2002-05-17 18:36:07 +00:00
|
|
|
globalSubrs = parent.GlobalSubrs
|
2017-01-12 15:23:12 -08:00
|
|
|
if hasattr(parent, "FDArray"):
|
|
|
|
fdArray = parent.FDArray
|
|
|
|
if hasattr(parent, "FDSelect"):
|
|
|
|
fdSelect = parent.FDSelect
|
|
|
|
else:
|
|
|
|
fdSelect = None
|
2002-05-17 18:36:07 +00:00
|
|
|
private = None
|
|
|
|
else:
|
|
|
|
fdSelect, fdArray = None, None
|
|
|
|
private = parent.Private
|
2002-05-16 18:17:32 +00:00
|
|
|
file.seek(value) # Offset(0)
|
2017-06-21 10:32:58 +01:00
|
|
|
charStrings = CharStrings(
|
2017-07-19 18:18:58 +01:00
|
|
|
file, charset, globalSubrs, private, fdSelect, fdArray, isCFF2=isCFF2)
|
2017-05-16 19:32:27 -07:00
|
|
|
return charStrings
|
2017-03-09 21:30:28 -08:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def write(self, parent, value):
|
|
|
|
return 0 # dummy value
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2013-11-27 03:19:32 -05:00
|
|
|
def xmlRead(self, name, attrs, content, parent):
|
2017-01-12 15:23:12 -08:00
|
|
|
if hasattr(parent, "FDArray"):
|
2017-06-21 10:32:58 +01:00
|
|
|
# if it is a CID-keyed font, then the private Dict is extracted from the
|
|
|
|
# parent.FDArray
|
2017-01-12 15:23:12 -08:00
|
|
|
fdArray = parent.FDArray
|
|
|
|
if hasattr(parent, "FDSelect"):
|
|
|
|
fdSelect = parent.FDSelect
|
|
|
|
else:
|
|
|
|
fdSelect = None
|
|
|
|
private = None
|
2003-08-22 19:53:32 +00:00
|
|
|
else:
|
2017-06-21 10:32:58 +01:00
|
|
|
# if it is a name-keyed font, then the private dict is in the top dict,
|
|
|
|
# and
|
|
|
|
# there is no fdArray.
|
2003-08-22 19:53:32 +00:00
|
|
|
private, fdSelect, fdArray = parent.Private, None, None
|
2017-06-21 10:32:58 +01:00
|
|
|
charStrings = CharStrings(
|
2017-07-19 18:18:58 +01:00
|
|
|
None, None, parent.GlobalSubrs, private, fdSelect, fdArray)
|
2013-11-27 03:19:32 -05:00
|
|
|
charStrings.fromXML(name, attrs, content)
|
2002-05-24 09:58:04 +00:00
|
|
|
return charStrings
|
2002-05-16 18:17:32 +00:00
|
|
|
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2018-02-23 02:01:35 +09:00
|
|
|
class CharsetConverter(SimpleConverter):
|
2018-02-23 01:03:37 +09:00
|
|
|
def _read(self, parent, value):
|
2002-05-16 18:17:32 +00:00
|
|
|
isCID = hasattr(parent, "ROS")
|
|
|
|
if value > 2:
|
|
|
|
numGlyphs = parent.numGlyphs
|
|
|
|
file = parent.file
|
|
|
|
file.seek(value)
|
2016-01-24 14:50:57 +00:00
|
|
|
log.log(DEBUG, "loading charset at %s", value)
|
2002-05-17 18:36:07 +00:00
|
|
|
format = readCard8(file)
|
2002-05-16 18:17:32 +00:00
|
|
|
if format == 0:
|
2018-02-22 22:46:46 +09:00
|
|
|
charset = parseCharset0(numGlyphs, file, parent.strings, isCID)
|
2002-05-16 18:17:32 +00:00
|
|
|
elif format == 1 or format == 2:
|
2018-02-22 22:46:46 +09:00
|
|
|
charset = parseCharset(numGlyphs, file, parent.strings, isCID, format)
|
2002-05-16 18:17:32 +00:00
|
|
|
else:
|
|
|
|
raise NotImplementedError
|
|
|
|
assert len(charset) == numGlyphs
|
2016-01-24 14:50:57 +00:00
|
|
|
log.log(DEBUG, " charset end at %s", file.tell())
|
2019-08-17 04:03:22 +02:00
|
|
|
# make sure glyph names are unique
|
|
|
|
allNames = {}
|
|
|
|
newCharset = []
|
|
|
|
for glyphName in charset:
|
|
|
|
if glyphName in allNames:
|
|
|
|
# make up a new glyphName that's unique
|
|
|
|
n = allNames[glyphName]
|
|
|
|
while (glyphName + "#" + str(n)) in allNames:
|
|
|
|
n += 1
|
|
|
|
allNames[glyphName] = n + 1
|
|
|
|
glyphName = glyphName + "#" + str(n)
|
|
|
|
allNames[glyphName] = 1
|
|
|
|
newCharset.append(glyphName)
|
|
|
|
charset = newCharset
|
2017-06-21 10:32:58 +01:00
|
|
|
else: # offset == 0 -> no charset data.
|
2015-04-26 02:01:01 -04:00
|
|
|
if isCID or "CharStrings" not in parent.rawDict:
|
2017-06-21 10:32:58 +01:00
|
|
|
# We get here only when processing fontDicts from the FDArray of
|
|
|
|
# CFF-CID fonts. Only the real topDict references the chrset.
|
|
|
|
assert value == 0
|
2002-05-16 18:17:32 +00:00
|
|
|
charset = None
|
|
|
|
elif value == 0:
|
2006-10-21 13:41:18 +00:00
|
|
|
charset = cffISOAdobeStrings
|
2002-05-16 18:17:32 +00:00
|
|
|
elif value == 1:
|
2006-10-21 13:41:18 +00:00
|
|
|
charset = cffIExpertStrings
|
2002-05-16 18:17:32 +00:00
|
|
|
elif value == 2:
|
2006-10-21 13:41:18 +00:00
|
|
|
charset = cffExpertSubsetStrings
|
2017-01-12 15:23:12 -08:00
|
|
|
if charset and (len(charset) != parent.numGlyphs):
|
2016-09-22 15:33:53 -07:00
|
|
|
charset = charset[:parent.numGlyphs]
|
2002-05-16 18:17:32 +00:00
|
|
|
return charset
|
2006-10-21 13:41:18 +00:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def write(self, parent, value):
|
|
|
|
return 0 # dummy value
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2018-01-25 17:30:23 -08:00
|
|
|
def xmlWrite(self, xmlWriter, name, value):
|
2002-05-24 09:58:04 +00:00
|
|
|
# XXX only write charset when not in OT/TTX context, where we
|
|
|
|
# dump charset as a separate "GlyphOrder" table.
|
2017-06-21 10:32:58 +01:00
|
|
|
# # xmlWriter.simpletag("charset")
|
2002-05-24 09:58:04 +00:00
|
|
|
xmlWriter.comment("charset is dumped separately as the 'GlyphOrder' element")
|
2002-05-16 18:17:32 +00:00
|
|
|
xmlWriter.newline()
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2013-11-27 03:19:32 -05:00
|
|
|
def xmlRead(self, name, attrs, content, parent):
|
2017-01-12 15:23:12 -08:00
|
|
|
pass
|
1999-12-16 21:34:53 +00:00
|
|
|
|
|
|
|
|
2013-11-28 14:26:58 -05:00
|
|
|
class CharsetCompiler(object):
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def __init__(self, strings, charset, parent):
|
|
|
|
assert charset[0] == '.notdef'
|
2003-08-22 19:53:32 +00:00
|
|
|
isCID = hasattr(parent.dictObj, "ROS")
|
|
|
|
data0 = packCharset0(charset, isCID, strings)
|
|
|
|
data = packCharset(charset, isCID, strings)
|
2002-05-24 10:35:13 +00:00
|
|
|
if len(data) < len(data0):
|
|
|
|
self.data = data
|
|
|
|
else:
|
|
|
|
self.data = data0
|
2002-05-23 21:50:36 +00:00
|
|
|
self.parent = parent
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-24 09:58:04 +00:00
|
|
|
def setPos(self, pos, endPos):
|
2002-05-23 21:50:36 +00:00
|
|
|
self.parent.rawDict["charset"] = pos
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def getDataLength(self):
|
|
|
|
return len(self.data)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def toFile(self, file):
|
|
|
|
file.write(self.data)
|
|
|
|
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2016-09-22 15:33:53 -07:00
|
|
|
def getStdCharSet(charset):
|
|
|
|
# check to see if we can use a predefined charset value.
|
2017-01-12 15:23:12 -08:00
|
|
|
predefinedCharSetVal = None
|
2016-09-22 15:33:53 -07:00
|
|
|
predefinedCharSets = [
|
|
|
|
(cffISOAdobeStringCount, cffISOAdobeStrings, 0),
|
|
|
|
(cffExpertStringCount, cffIExpertStrings, 1),
|
2017-06-21 10:32:58 +01:00
|
|
|
(cffExpertSubsetStringCount, cffExpertSubsetStrings, 2)]
|
2017-01-12 15:23:12 -08:00
|
|
|
lcs = len(charset)
|
|
|
|
for cnt, pcs, csv in predefinedCharSets:
|
2017-06-21 10:32:58 +01:00
|
|
|
if predefinedCharSetVal is not None:
|
2016-09-22 15:33:53 -07:00
|
|
|
break
|
2017-01-12 15:23:12 -08:00
|
|
|
if lcs > cnt:
|
2016-09-22 15:33:53 -07:00
|
|
|
continue
|
2017-01-12 15:23:12 -08:00
|
|
|
predefinedCharSetVal = csv
|
|
|
|
for i in range(lcs):
|
|
|
|
if charset[i] != pcs[i]:
|
|
|
|
predefinedCharSetVal = None
|
2016-09-22 15:33:53 -07:00
|
|
|
break
|
2017-01-12 15:23:12 -08:00
|
|
|
return predefinedCharSetVal
|
2002-05-23 21:50:36 +00:00
|
|
|
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2003-08-22 19:53:32 +00:00
|
|
|
def getCIDfromName(name, strings):
|
|
|
|
return int(name[3:])
|
|
|
|
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2003-08-22 19:53:32 +00:00
|
|
|
def getSIDfromName(name, strings):
|
|
|
|
return strings.getSID(name)
|
|
|
|
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2003-08-22 19:53:32 +00:00
|
|
|
def packCharset0(charset, isCID, strings):
|
2013-12-04 01:15:46 -05:00
|
|
|
fmt = 0
|
|
|
|
data = [packCard8(fmt)]
|
2003-08-22 19:53:32 +00:00
|
|
|
if isCID:
|
|
|
|
getNameID = getCIDfromName
|
|
|
|
else:
|
|
|
|
getNameID = getSIDfromName
|
|
|
|
|
2002-05-24 10:35:13 +00:00
|
|
|
for name in charset[1:]:
|
2017-06-21 10:32:58 +01:00
|
|
|
data.append(packCard16(getNameID(name, strings)))
|
2013-11-27 21:17:35 -05:00
|
|
|
return bytesjoin(data)
|
2002-05-24 10:35:13 +00:00
|
|
|
|
2003-08-22 19:53:32 +00:00
|
|
|
|
|
|
|
def packCharset(charset, isCID, strings):
|
2013-12-04 01:15:46 -05:00
|
|
|
fmt = 1
|
2002-05-24 10:35:13 +00:00
|
|
|
ranges = []
|
|
|
|
first = None
|
|
|
|
end = 0
|
2003-08-22 19:53:32 +00:00
|
|
|
if isCID:
|
|
|
|
getNameID = getCIDfromName
|
|
|
|
else:
|
|
|
|
getNameID = getSIDfromName
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-24 10:35:13 +00:00
|
|
|
for name in charset[1:]:
|
2003-08-22 19:53:32 +00:00
|
|
|
SID = getNameID(name, strings)
|
2002-05-24 10:35:13 +00:00
|
|
|
if first is None:
|
|
|
|
first = SID
|
2013-11-27 02:40:30 -05:00
|
|
|
elif end + 1 != SID:
|
2002-05-24 10:35:13 +00:00
|
|
|
nLeft = end - first
|
|
|
|
if nLeft > 255:
|
2013-12-04 01:15:46 -05:00
|
|
|
fmt = 2
|
2002-05-24 10:35:13 +00:00
|
|
|
ranges.append((first, nLeft))
|
|
|
|
first = SID
|
|
|
|
end = SID
|
2014-07-09 14:30:06 -04:00
|
|
|
if end:
|
|
|
|
nLeft = end - first
|
|
|
|
if nLeft > 255:
|
|
|
|
fmt = 2
|
|
|
|
ranges.append((first, nLeft))
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2013-12-04 01:15:46 -05:00
|
|
|
data = [packCard8(fmt)]
|
|
|
|
if fmt == 1:
|
2002-05-24 10:35:13 +00:00
|
|
|
nLeftFunc = packCard8
|
|
|
|
else:
|
|
|
|
nLeftFunc = packCard16
|
|
|
|
for first, nLeft in ranges:
|
|
|
|
data.append(packCard16(first) + nLeftFunc(nLeft))
|
2013-11-27 21:17:35 -05:00
|
|
|
return bytesjoin(data)
|
2002-05-24 10:35:13 +00:00
|
|
|
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2018-02-22 22:46:46 +09:00
|
|
|
def parseCharset0(numGlyphs, file, strings, isCID):
|
2002-05-23 21:50:36 +00:00
|
|
|
charset = [".notdef"]
|
2006-10-21 13:41:18 +00:00
|
|
|
if isCID:
|
|
|
|
for i in range(numGlyphs - 1):
|
|
|
|
CID = readCard16(file)
|
2013-11-27 05:47:34 -05:00
|
|
|
charset.append("cid" + str(CID).zfill(5))
|
2006-10-21 13:41:18 +00:00
|
|
|
else:
|
|
|
|
for i in range(numGlyphs - 1):
|
|
|
|
SID = readCard16(file)
|
|
|
|
charset.append(strings[SID])
|
2002-05-23 21:50:36 +00:00
|
|
|
return charset
|
|
|
|
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2018-02-22 22:46:46 +09:00
|
|
|
def parseCharset(numGlyphs, file, strings, isCID, fmt):
|
2002-05-13 11:25:17 +00:00
|
|
|
charset = ['.notdef']
|
1999-12-16 21:34:53 +00:00
|
|
|
count = 1
|
2013-12-04 01:15:46 -05:00
|
|
|
if fmt == 1:
|
2002-05-17 18:36:07 +00:00
|
|
|
nLeftFunc = readCard8
|
2002-05-16 18:17:32 +00:00
|
|
|
else:
|
2002-05-17 18:36:07 +00:00
|
|
|
nLeftFunc = readCard16
|
2002-05-16 18:17:32 +00:00
|
|
|
while count < numGlyphs:
|
2002-05-17 18:36:07 +00:00
|
|
|
first = readCard16(file)
|
2002-05-16 18:17:32 +00:00
|
|
|
nLeft = nLeftFunc(file)
|
2002-05-13 11:25:17 +00:00
|
|
|
if isCID:
|
2017-06-21 10:32:58 +01:00
|
|
|
for CID in range(first, first + nLeft + 1):
|
2013-11-27 05:47:34 -05:00
|
|
|
charset.append("cid" + str(CID).zfill(5))
|
2002-05-13 11:25:17 +00:00
|
|
|
else:
|
2017-06-21 10:32:58 +01:00
|
|
|
for SID in range(first, first + nLeft + 1):
|
2002-05-13 11:25:17 +00:00
|
|
|
charset.append(strings[SID])
|
1999-12-16 21:34:53 +00:00
|
|
|
count = count + nLeft + 1
|
2002-05-13 11:25:17 +00:00
|
|
|
return charset
|
1999-12-16 21:34:53 +00:00
|
|
|
|
|
|
|
|
2013-11-28 14:26:58 -05:00
|
|
|
class EncodingCompiler(object):
|
2003-01-03 20:56:01 +00:00
|
|
|
|
|
|
|
def __init__(self, strings, encoding, parent):
|
2008-03-07 19:56:17 +00:00
|
|
|
assert not isinstance(encoding, basestring)
|
2003-01-03 20:56:01 +00:00
|
|
|
data0 = packEncoding0(parent.dictObj.charset, encoding, parent.strings)
|
|
|
|
data1 = packEncoding1(parent.dictObj.charset, encoding, parent.strings)
|
|
|
|
if len(data0) < len(data1):
|
|
|
|
self.data = data0
|
|
|
|
else:
|
|
|
|
self.data = data1
|
|
|
|
self.parent = parent
|
|
|
|
|
|
|
|
def setPos(self, pos, endPos):
|
|
|
|
self.parent.rawDict["Encoding"] = pos
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2003-01-03 20:56:01 +00:00
|
|
|
def getDataLength(self):
|
|
|
|
return len(self.data)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2003-01-03 20:56:01 +00:00
|
|
|
def toFile(self, file):
|
|
|
|
file.write(self.data)
|
|
|
|
|
|
|
|
|
|
|
|
class EncodingConverter(SimpleConverter):
|
|
|
|
|
2018-02-23 01:03:37 +09:00
|
|
|
def _read(self, parent, value):
|
2003-01-03 20:56:01 +00:00
|
|
|
if value == 0:
|
|
|
|
return "StandardEncoding"
|
|
|
|
elif value == 1:
|
|
|
|
return "ExpertEncoding"
|
|
|
|
else:
|
|
|
|
assert value > 1
|
|
|
|
file = parent.file
|
|
|
|
file.seek(value)
|
2016-01-24 14:50:57 +00:00
|
|
|
log.log(DEBUG, "loading Encoding at %s", value)
|
2013-12-04 01:15:46 -05:00
|
|
|
fmt = readCard8(file)
|
|
|
|
haveSupplement = fmt & 0x80
|
2003-01-03 20:56:01 +00:00
|
|
|
if haveSupplement:
|
2013-11-27 02:42:28 -05:00
|
|
|
raise NotImplementedError("Encoding supplements are not yet supported")
|
2013-12-04 01:15:46 -05:00
|
|
|
fmt = fmt & 0x7f
|
|
|
|
if fmt == 0:
|
2003-01-03 20:56:01 +00:00
|
|
|
encoding = parseEncoding0(parent.charset, file, haveSupplement,
|
2018-02-22 22:46:46 +09:00
|
|
|
parent.strings)
|
2013-12-04 01:15:46 -05:00
|
|
|
elif fmt == 1:
|
2003-01-03 20:56:01 +00:00
|
|
|
encoding = parseEncoding1(parent.charset, file, haveSupplement,
|
2018-02-22 22:46:46 +09:00
|
|
|
parent.strings)
|
2003-01-03 20:56:01 +00:00
|
|
|
return encoding
|
|
|
|
|
|
|
|
def write(self, parent, value):
|
|
|
|
if value == "StandardEncoding":
|
|
|
|
return 0
|
|
|
|
elif value == "ExpertEncoding":
|
|
|
|
return 1
|
|
|
|
return 0 # dummy value
|
|
|
|
|
2018-01-25 17:30:23 -08:00
|
|
|
def xmlWrite(self, xmlWriter, name, value):
|
2003-01-03 20:56:01 +00:00
|
|
|
if value in ("StandardEncoding", "ExpertEncoding"):
|
|
|
|
xmlWriter.simpletag(name, name=value)
|
|
|
|
xmlWriter.newline()
|
|
|
|
return
|
|
|
|
xmlWriter.begintag(name)
|
|
|
|
xmlWriter.newline()
|
|
|
|
for code in range(len(value)):
|
|
|
|
glyphName = value[code]
|
|
|
|
if glyphName != ".notdef":
|
|
|
|
xmlWriter.simpletag("map", code=hex(code), name=glyphName)
|
|
|
|
xmlWriter.newline()
|
|
|
|
xmlWriter.endtag(name)
|
|
|
|
xmlWriter.newline()
|
|
|
|
|
2013-11-27 03:19:32 -05:00
|
|
|
def xmlRead(self, name, attrs, content, parent):
|
2013-11-27 02:33:03 -05:00
|
|
|
if "name" in attrs:
|
2003-01-03 20:56:01 +00:00
|
|
|
return attrs["name"]
|
|
|
|
encoding = [".notdef"] * 256
|
|
|
|
for element in content:
|
2008-03-07 19:56:17 +00:00
|
|
|
if isinstance(element, basestring):
|
2003-01-03 20:56:01 +00:00
|
|
|
continue
|
|
|
|
name, attrs, content = element
|
|
|
|
code = safeEval(attrs["code"])
|
|
|
|
glyphName = attrs["name"]
|
|
|
|
encoding[code] = glyphName
|
|
|
|
return encoding
|
|
|
|
|
|
|
|
|
2018-02-22 22:46:46 +09:00
|
|
|
def parseEncoding0(charset, file, haveSupplement, strings):
|
2003-01-03 20:56:01 +00:00
|
|
|
nCodes = readCard8(file)
|
|
|
|
encoding = [".notdef"] * 256
|
|
|
|
for glyphID in range(1, nCodes + 1):
|
|
|
|
code = readCard8(file)
|
|
|
|
if code != 0:
|
|
|
|
encoding[code] = charset[glyphID]
|
|
|
|
return encoding
|
|
|
|
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2018-02-22 22:46:46 +09:00
|
|
|
def parseEncoding1(charset, file, haveSupplement, strings):
|
2003-01-03 20:56:01 +00:00
|
|
|
nRanges = readCard8(file)
|
|
|
|
encoding = [".notdef"] * 256
|
|
|
|
glyphID = 1
|
|
|
|
for i in range(nRanges):
|
|
|
|
code = readCard8(file)
|
|
|
|
nLeft = readCard8(file)
|
|
|
|
for glyphID in range(glyphID, glyphID + nLeft + 1):
|
|
|
|
encoding[code] = charset[glyphID]
|
|
|
|
code = code + 1
|
|
|
|
glyphID = glyphID + 1
|
|
|
|
return encoding
|
|
|
|
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2003-01-03 20:56:01 +00:00
|
|
|
def packEncoding0(charset, encoding, strings):
|
2013-12-04 01:15:46 -05:00
|
|
|
fmt = 0
|
2003-01-03 20:56:01 +00:00
|
|
|
m = {}
|
|
|
|
for code in range(len(encoding)):
|
|
|
|
name = encoding[code]
|
|
|
|
if name != ".notdef":
|
|
|
|
m[name] = code
|
|
|
|
codes = []
|
|
|
|
for name in charset[1:]:
|
|
|
|
code = m.get(name)
|
|
|
|
codes.append(code)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2003-01-03 20:56:01 +00:00
|
|
|
while codes and codes[-1] is None:
|
|
|
|
codes.pop()
|
|
|
|
|
2013-12-04 01:15:46 -05:00
|
|
|
data = [packCard8(fmt), packCard8(len(codes))]
|
2003-01-03 20:56:01 +00:00
|
|
|
for code in codes:
|
|
|
|
if code is None:
|
|
|
|
code = 0
|
|
|
|
data.append(packCard8(code))
|
2013-11-27 21:17:35 -05:00
|
|
|
return bytesjoin(data)
|
2003-01-03 20:56:01 +00:00
|
|
|
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2003-01-03 20:56:01 +00:00
|
|
|
def packEncoding1(charset, encoding, strings):
|
2013-12-04 01:15:46 -05:00
|
|
|
fmt = 1
|
2003-01-03 20:56:01 +00:00
|
|
|
m = {}
|
|
|
|
for code in range(len(encoding)):
|
|
|
|
name = encoding[code]
|
|
|
|
if name != ".notdef":
|
|
|
|
m[name] = code
|
|
|
|
ranges = []
|
|
|
|
first = None
|
|
|
|
end = 0
|
|
|
|
for name in charset[1:]:
|
|
|
|
code = m.get(name, -1)
|
|
|
|
if first is None:
|
|
|
|
first = code
|
2013-11-27 02:40:30 -05:00
|
|
|
elif end + 1 != code:
|
2003-01-03 20:56:01 +00:00
|
|
|
nLeft = end - first
|
|
|
|
ranges.append((first, nLeft))
|
|
|
|
first = code
|
|
|
|
end = code
|
|
|
|
nLeft = end - first
|
|
|
|
ranges.append((first, nLeft))
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2003-01-03 20:56:01 +00:00
|
|
|
# remove unencoded glyphs at the end.
|
|
|
|
while ranges and ranges[-1][0] == -1:
|
|
|
|
ranges.pop()
|
|
|
|
|
2013-12-04 01:15:46 -05:00
|
|
|
data = [packCard8(fmt), packCard8(len(ranges))]
|
2003-01-03 20:56:01 +00:00
|
|
|
for first, nLeft in ranges:
|
|
|
|
if first == -1: # unencoded
|
|
|
|
first = 0
|
|
|
|
data.append(packCard8(first) + packCard8(nLeft))
|
2013-11-27 21:17:35 -05:00
|
|
|
return bytesjoin(data)
|
2003-01-03 20:56:01 +00:00
|
|
|
|
|
|
|
|
2002-05-24 09:58:04 +00:00
|
|
|
class FDArrayConverter(TableConverter):
|
2003-08-22 19:53:32 +00:00
|
|
|
|
2018-02-23 01:03:37 +09:00
|
|
|
def _read(self, parent, value):
|
2017-07-19 18:18:58 +01:00
|
|
|
try:
|
|
|
|
vstore = parent.VarStore
|
|
|
|
except AttributeError:
|
|
|
|
vstore = None
|
2002-05-17 18:36:07 +00:00
|
|
|
file = parent.file
|
2017-07-19 18:18:58 +01:00
|
|
|
isCFF2 = parent._isCFF2
|
2002-05-17 18:36:07 +00:00
|
|
|
file.seek(value)
|
2017-07-19 18:18:58 +01:00
|
|
|
fdArray = FDArrayIndex(file, isCFF2=isCFF2)
|
|
|
|
fdArray.vstore = vstore
|
2002-05-17 18:36:07 +00:00
|
|
|
fdArray.strings = parent.strings
|
|
|
|
fdArray.GlobalSubrs = parent.GlobalSubrs
|
|
|
|
return fdArray
|
|
|
|
|
2003-08-22 19:53:32 +00:00
|
|
|
def write(self, parent, value):
|
|
|
|
return 0 # dummy value
|
|
|
|
|
2013-11-27 03:19:32 -05:00
|
|
|
def xmlRead(self, name, attrs, content, parent):
|
2017-07-19 18:18:58 +01:00
|
|
|
fdArray = FDArrayIndex()
|
2003-08-22 19:53:32 +00:00
|
|
|
for element in content:
|
2008-03-07 19:56:17 +00:00
|
|
|
if isinstance(element, basestring):
|
2003-08-22 19:53:32 +00:00
|
|
|
continue
|
2013-11-27 03:19:32 -05:00
|
|
|
name, attrs, content = element
|
|
|
|
fdArray.fromXML(name, attrs, content)
|
2003-08-22 19:53:32 +00:00
|
|
|
return fdArray
|
|
|
|
|
2002-05-17 18:36:07 +00:00
|
|
|
|
2018-02-23 02:01:35 +09:00
|
|
|
class FDSelectConverter(SimpleConverter):
|
2003-08-22 19:53:32 +00:00
|
|
|
|
2018-02-23 01:03:37 +09:00
|
|
|
def _read(self, parent, value):
|
2002-05-17 18:36:07 +00:00
|
|
|
file = parent.file
|
|
|
|
file.seek(value)
|
2003-08-22 19:53:32 +00:00
|
|
|
fdSelect = FDSelect(file, parent.numGlyphs)
|
2017-06-21 10:32:58 +01:00
|
|
|
return fdSelect
|
2003-08-22 19:53:32 +00:00
|
|
|
|
|
|
|
def write(self, parent, value):
|
|
|
|
return 0 # dummy value
|
|
|
|
|
|
|
|
# The FDSelect glyph data is written out to XML in the charstring keys,
|
|
|
|
# so we write out only the format selector
|
2018-01-25 17:30:23 -08:00
|
|
|
def xmlWrite(self, xmlWriter, name, value):
|
2003-08-22 19:53:32 +00:00
|
|
|
xmlWriter.simpletag(name, [('format', value.format)])
|
|
|
|
xmlWriter.newline()
|
|
|
|
|
2013-11-27 03:19:32 -05:00
|
|
|
def xmlRead(self, name, attrs, content, parent):
|
2013-12-04 01:15:46 -05:00
|
|
|
fmt = safeEval(attrs["format"])
|
2003-08-22 19:53:32 +00:00
|
|
|
file = None
|
|
|
|
numGlyphs = None
|
2013-12-04 01:15:46 -05:00
|
|
|
fdSelect = FDSelect(file, numGlyphs, fmt)
|
2003-08-22 19:53:32 +00:00
|
|
|
return fdSelect
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2017-01-12 15:23:12 -08:00
|
|
|
class VarStoreConverter(SimpleConverter):
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2018-02-23 01:03:37 +09:00
|
|
|
def _read(self, parent, value):
|
2017-01-12 15:23:12 -08:00
|
|
|
file = parent.file
|
|
|
|
file.seek(value)
|
|
|
|
varStore = VarStoreData(file)
|
|
|
|
varStore.decompile()
|
|
|
|
return varStore
|
2017-03-09 21:30:28 -08:00
|
|
|
|
2017-01-12 15:23:12 -08:00
|
|
|
def write(self, parent, value):
|
|
|
|
return 0 # dummy value
|
2017-03-09 21:30:28 -08:00
|
|
|
|
2018-01-25 17:30:23 -08:00
|
|
|
def xmlWrite(self, xmlWriter, name, value):
|
2017-01-12 15:23:12 -08:00
|
|
|
value.writeXML(xmlWriter, name)
|
2017-03-09 21:30:28 -08:00
|
|
|
|
2017-01-12 15:23:12 -08:00
|
|
|
def xmlRead(self, name, attrs, content, parent):
|
|
|
|
varStore = VarStoreData()
|
|
|
|
varStore.xmlRead(name, attrs, content, parent)
|
|
|
|
return varStore
|
2017-03-09 21:30:28 -08:00
|
|
|
|
2003-08-22 19:53:32 +00:00
|
|
|
|
|
|
|
def packFDSelect0(fdSelectArray):
|
2013-12-04 01:15:46 -05:00
|
|
|
fmt = 0
|
|
|
|
data = [packCard8(fmt)]
|
2003-08-22 19:53:32 +00:00
|
|
|
for index in fdSelectArray:
|
|
|
|
data.append(packCard8(index))
|
2013-11-27 21:17:35 -05:00
|
|
|
return bytesjoin(data)
|
2003-08-22 19:53:32 +00:00
|
|
|
|
|
|
|
|
|
|
|
def packFDSelect3(fdSelectArray):
|
2013-12-04 01:15:46 -05:00
|
|
|
fmt = 3
|
2003-08-22 19:53:32 +00:00
|
|
|
fdRanges = []
|
|
|
|
lenArray = len(fdSelectArray)
|
|
|
|
lastFDIndex = -1
|
|
|
|
for i in range(lenArray):
|
|
|
|
fdIndex = fdSelectArray[i]
|
|
|
|
if lastFDIndex != fdIndex:
|
|
|
|
fdRanges.append([i, fdIndex])
|
|
|
|
lastFDIndex = fdIndex
|
|
|
|
sentinelGID = i + 1
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2013-12-04 01:15:46 -05:00
|
|
|
data = [packCard8(fmt)]
|
2017-06-21 10:32:58 +01:00
|
|
|
data.append(packCard16(len(fdRanges)))
|
2003-08-22 19:53:32 +00:00
|
|
|
for fdRange in fdRanges:
|
|
|
|
data.append(packCard16(fdRange[0]))
|
|
|
|
data.append(packCard8(fdRange[1]))
|
|
|
|
data.append(packCard16(sentinelGID))
|
2013-11-27 21:17:35 -05:00
|
|
|
return bytesjoin(data)
|
2003-08-22 19:53:32 +00:00
|
|
|
|
|
|
|
|
2019-08-01 21:28:46 +02:00
|
|
|
def packFDSelect4(fdSelectArray):
|
|
|
|
fmt = 4
|
|
|
|
fdRanges = []
|
|
|
|
lenArray = len(fdSelectArray)
|
|
|
|
lastFDIndex = -1
|
|
|
|
for i in range(lenArray):
|
|
|
|
fdIndex = fdSelectArray[i]
|
|
|
|
if lastFDIndex != fdIndex:
|
|
|
|
fdRanges.append([i, fdIndex])
|
|
|
|
lastFDIndex = fdIndex
|
|
|
|
sentinelGID = i + 1
|
|
|
|
|
|
|
|
data = [packCard8(fmt)]
|
|
|
|
data.append(packCard32(len(fdRanges)))
|
|
|
|
for fdRange in fdRanges:
|
|
|
|
data.append(packCard32(fdRange[0]))
|
|
|
|
data.append(packCard16(fdRange[1]))
|
|
|
|
data.append(packCard32(sentinelGID))
|
|
|
|
return bytesjoin(data)
|
|
|
|
|
|
|
|
|
2013-11-28 14:26:58 -05:00
|
|
|
class FDSelectCompiler(object):
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2003-08-22 19:53:32 +00:00
|
|
|
def __init__(self, fdSelect, parent):
|
2013-12-04 01:15:46 -05:00
|
|
|
fmt = fdSelect.format
|
2003-08-22 19:53:32 +00:00
|
|
|
fdSelectArray = fdSelect.gidArray
|
2013-12-04 01:15:46 -05:00
|
|
|
if fmt == 0:
|
2003-08-22 19:53:32 +00:00
|
|
|
self.data = packFDSelect0(fdSelectArray)
|
2013-12-04 01:15:46 -05:00
|
|
|
elif fmt == 3:
|
2003-08-22 19:53:32 +00:00
|
|
|
self.data = packFDSelect3(fdSelectArray)
|
2019-08-01 21:28:46 +02:00
|
|
|
elif fmt == 4:
|
|
|
|
self.data = packFDSelect4(fdSelectArray)
|
2002-05-17 18:36:07 +00:00
|
|
|
else:
|
2003-08-22 19:53:32 +00:00
|
|
|
# choose smaller of the two formats
|
|
|
|
data0 = packFDSelect0(fdSelectArray)
|
|
|
|
data3 = packFDSelect3(fdSelectArray)
|
|
|
|
if len(data0) < len(data3):
|
|
|
|
self.data = data0
|
|
|
|
fdSelect.format = 0
|
|
|
|
else:
|
|
|
|
self.data = data3
|
|
|
|
fdSelect.format = 3
|
|
|
|
|
|
|
|
self.parent = parent
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2003-08-22 19:53:32 +00:00
|
|
|
def setPos(self, pos, endPos):
|
|
|
|
self.parent.rawDict["FDSelect"] = pos
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2003-08-22 19:53:32 +00:00
|
|
|
def getDataLength(self):
|
|
|
|
return len(self.data)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2003-08-22 19:53:32 +00:00
|
|
|
def toFile(self, file):
|
|
|
|
file.write(self.data)
|
2002-05-17 18:36:07 +00:00
|
|
|
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2017-01-12 15:23:12 -08:00
|
|
|
class VarStoreCompiler(object):
|
|
|
|
|
|
|
|
def __init__(self, varStoreData, parent):
|
|
|
|
self.parent = parent
|
|
|
|
if not varStoreData.data:
|
|
|
|
varStoreData.compile()
|
2017-06-21 10:32:58 +01:00
|
|
|
data = [
|
|
|
|
packCard16(len(varStoreData.data)),
|
|
|
|
varStoreData.data
|
|
|
|
]
|
2017-01-12 15:23:12 -08:00
|
|
|
self.data = bytesjoin(data)
|
|
|
|
|
|
|
|
def setPos(self, pos, endPos):
|
|
|
|
self.parent.rawDict["VarStore"] = pos
|
|
|
|
|
|
|
|
def getDataLength(self):
|
|
|
|
return len(self.data)
|
|
|
|
|
|
|
|
def toFile(self, file):
|
|
|
|
file.write(self.data)
|
2002-05-17 18:36:07 +00:00
|
|
|
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2002-05-24 09:58:04 +00:00
|
|
|
class ROSConverter(SimpleConverter):
|
2003-08-22 19:53:32 +00:00
|
|
|
|
2018-01-25 17:30:23 -08:00
|
|
|
def xmlWrite(self, xmlWriter, name, value):
|
2002-05-17 19:58:49 +00:00
|
|
|
registry, order, supplement = value
|
2017-06-21 10:32:58 +01:00
|
|
|
xmlWriter.simpletag(
|
|
|
|
name,
|
|
|
|
[
|
|
|
|
('Registry', tostr(registry)),
|
|
|
|
('Order', tostr(order)),
|
|
|
|
('Supplement', supplement)
|
|
|
|
])
|
2002-05-18 20:07:01 +00:00
|
|
|
xmlWriter.newline()
|
2002-05-17 19:58:49 +00:00
|
|
|
|
2013-11-27 03:19:32 -05:00
|
|
|
def xmlRead(self, name, attrs, content, parent):
|
2003-08-22 19:53:32 +00:00
|
|
|
return (attrs['Registry'], attrs['Order'], safeEval(attrs['Supplement']))
|
|
|
|
|
2002-05-16 18:17:32 +00:00
|
|
|
topDictOperators = [
|
2015-04-26 00:54:30 -04:00
|
|
|
# opcode name argument type default converter
|
2017-03-09 22:39:14 -08:00
|
|
|
(25, 'maxstack', 'number', None, None),
|
2015-04-26 00:54:30 -04:00
|
|
|
((12, 30), 'ROS', ('SID', 'SID', 'number'), None, ROSConverter()),
|
|
|
|
((12, 20), 'SyntheticBase', 'number', None, None),
|
|
|
|
(0, 'version', 'SID', None, None),
|
|
|
|
(1, 'Notice', 'SID', None, Latin1Converter()),
|
|
|
|
((12, 0), 'Copyright', 'SID', None, Latin1Converter()),
|
|
|
|
(2, 'FullName', 'SID', None, None),
|
|
|
|
((12, 38), 'FontName', 'SID', None, None),
|
|
|
|
(3, 'FamilyName', 'SID', None, None),
|
|
|
|
(4, 'Weight', 'SID', None, None),
|
|
|
|
((12, 1), 'isFixedPitch', 'number', 0, None),
|
|
|
|
((12, 2), 'ItalicAngle', 'number', 0, None),
|
2017-06-09 06:55:02 +02:00
|
|
|
((12, 3), 'UnderlinePosition', 'number', -100, None),
|
2015-04-26 00:54:30 -04:00
|
|
|
((12, 4), 'UnderlineThickness', 'number', 50, None),
|
|
|
|
((12, 5), 'PaintType', 'number', 0, None),
|
|
|
|
((12, 6), 'CharstringType', 'number', 2, None),
|
|
|
|
((12, 7), 'FontMatrix', 'array', [0.001, 0, 0, 0.001, 0, 0], None),
|
|
|
|
(13, 'UniqueID', 'number', None, None),
|
|
|
|
(5, 'FontBBox', 'array', [0, 0, 0, 0], None),
|
|
|
|
((12, 8), 'StrokeWidth', 'number', 0, None),
|
|
|
|
(14, 'XUID', 'array', None, None),
|
|
|
|
((12, 21), 'PostScript', 'SID', None, None),
|
|
|
|
((12, 22), 'BaseFontName', 'SID', None, None),
|
|
|
|
((12, 23), 'BaseFontBlend', 'delta', None, None),
|
|
|
|
((12, 31), 'CIDFontVersion', 'number', 0, None),
|
|
|
|
((12, 32), 'CIDFontRevision', 'number', 0, None),
|
|
|
|
((12, 33), 'CIDFontType', 'number', 0, None),
|
|
|
|
((12, 34), 'CIDCount', 'number', 8720, None),
|
2017-01-12 15:23:12 -08:00
|
|
|
(15, 'charset', 'number', None, CharsetConverter()),
|
2015-04-26 00:54:30 -04:00
|
|
|
((12, 35), 'UIDBase', 'number', None, None),
|
|
|
|
(16, 'Encoding', 'number', 0, EncodingConverter()),
|
|
|
|
(18, 'Private', ('number', 'number'), None, PrivateDictConverter()),
|
|
|
|
((12, 37), 'FDSelect', 'number', None, FDSelectConverter()),
|
|
|
|
((12, 36), 'FDArray', 'number', None, FDArrayConverter()),
|
|
|
|
(17, 'CharStrings', 'number', None, CharStringsConverter()),
|
2017-01-12 15:23:12 -08:00
|
|
|
(24, 'VarStore', 'number', None, VarStoreConverter()),
|
2002-05-16 18:17:32 +00:00
|
|
|
]
|
2002-05-15 07:41:30 +00:00
|
|
|
|
2017-01-12 15:23:12 -08:00
|
|
|
topDictOperators2 = [
|
|
|
|
# opcode name argument type default converter
|
2017-03-09 22:39:14 -08:00
|
|
|
(25, 'maxstack', 'number', None, None),
|
2017-01-12 15:23:12 -08:00
|
|
|
((12, 7), 'FontMatrix', 'array', [0.001, 0, 0, 0.001, 0, 0], None),
|
|
|
|
((12, 37), 'FDSelect', 'number', None, FDSelectConverter()),
|
|
|
|
((12, 36), 'FDArray', 'number', None, FDArrayConverter()),
|
|
|
|
(17, 'CharStrings', 'number', None, CharStringsConverter()),
|
|
|
|
(24, 'VarStore', 'number', None, VarStoreConverter()),
|
|
|
|
]
|
|
|
|
|
2003-08-22 19:53:32 +00:00
|
|
|
# Note! FDSelect and FDArray must both preceed CharStrings in the output XML build order,
|
|
|
|
# in order for the font to compile back from xml.
|
|
|
|
|
2017-01-12 15:23:12 -08:00
|
|
|
kBlendDictOpName = "blend"
|
|
|
|
blendOp = 23
|
2003-08-22 19:53:32 +00:00
|
|
|
|
2002-05-16 18:17:32 +00:00
|
|
|
privateDictOperators = [
|
2015-04-26 00:54:30 -04:00
|
|
|
# opcode name argument type default converter
|
2017-05-09 11:28:11 -07:00
|
|
|
(22, "vsindex", 'number', None, None),
|
|
|
|
(blendOp, kBlendDictOpName, 'blendList', None, None), # This is for reading to/from XML: it not written to CFF.
|
2015-04-26 00:54:30 -04:00
|
|
|
(6, 'BlueValues', 'delta', None, None),
|
|
|
|
(7, 'OtherBlues', 'delta', None, None),
|
|
|
|
(8, 'FamilyBlues', 'delta', None, None),
|
|
|
|
(9, 'FamilyOtherBlues', 'delta', None, None),
|
|
|
|
((12, 9), 'BlueScale', 'number', 0.039625, None),
|
|
|
|
((12, 10), 'BlueShift', 'number', 7, None),
|
|
|
|
((12, 11), 'BlueFuzz', 'number', 1, None),
|
|
|
|
(10, 'StdHW', 'number', None, None),
|
|
|
|
(11, 'StdVW', 'number', None, None),
|
|
|
|
((12, 12), 'StemSnapH', 'delta', None, None),
|
|
|
|
((12, 13), 'StemSnapV', 'delta', None, None),
|
|
|
|
((12, 14), 'ForceBold', 'number', 0, None),
|
|
|
|
((12, 15), 'ForceBoldThreshold', 'number', None, None), # deprecated
|
|
|
|
((12, 16), 'lenIV', 'number', None, None), # deprecated
|
|
|
|
((12, 17), 'LanguageGroup', 'number', 0, None),
|
|
|
|
((12, 18), 'ExpansionFactor', 'number', 0.06, None),
|
|
|
|
((12, 19), 'initialRandomSeed', 'number', 0, None),
|
|
|
|
(20, 'defaultWidthX', 'number', 0, None),
|
|
|
|
(21, 'nominalWidthX', 'number', 0, None),
|
|
|
|
(19, 'Subrs', 'number', None, SubrsConverter()),
|
2002-05-16 18:17:32 +00:00
|
|
|
]
|
2002-05-15 07:41:30 +00:00
|
|
|
|
2017-01-12 15:23:12 -08:00
|
|
|
privateDictOperators2 = [
|
|
|
|
# opcode name argument type default converter
|
|
|
|
(22, "vsindex", 'number', None, None),
|
|
|
|
(blendOp, kBlendDictOpName, 'blendList', None, None), # This is for reading to/from XML: it not written to CFF.
|
|
|
|
(6, 'BlueValues', 'delta', None, None),
|
|
|
|
(7, 'OtherBlues', 'delta', None, None),
|
|
|
|
(8, 'FamilyBlues', 'delta', None, None),
|
|
|
|
(9, 'FamilyOtherBlues', 'delta', None, None),
|
|
|
|
((12, 9), 'BlueScale', 'number', 0.039625, None),
|
|
|
|
((12, 10), 'BlueShift', 'number', 7, None),
|
|
|
|
((12, 11), 'BlueFuzz', 'number', 1, None),
|
|
|
|
(10, 'StdHW', 'number', None, None),
|
|
|
|
(11, 'StdVW', 'number', None, None),
|
|
|
|
((12, 12), 'StemSnapH', 'delta', None, None),
|
|
|
|
((12, 13), 'StemSnapV', 'delta', None, None),
|
|
|
|
(19, 'Subrs', 'number', None, SubrsConverter()),
|
|
|
|
]
|
|
|
|
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2002-05-24 09:58:04 +00:00
|
|
|
def addConverters(table):
|
|
|
|
for i in range(len(table)):
|
|
|
|
op, name, arg, default, conv = table[i]
|
|
|
|
if conv is not None:
|
|
|
|
continue
|
|
|
|
if arg in ("delta", "array"):
|
|
|
|
conv = ArrayConverter()
|
|
|
|
elif arg == "number":
|
|
|
|
conv = NumberConverter()
|
|
|
|
elif arg == "SID":
|
2013-11-28 07:10:53 -05:00
|
|
|
conv = ASCIIConverter()
|
2017-01-12 15:23:12 -08:00
|
|
|
elif arg == 'blendList':
|
|
|
|
conv = None
|
2002-05-24 09:58:04 +00:00
|
|
|
else:
|
2013-12-04 01:15:46 -05:00
|
|
|
assert False
|
2002-05-24 09:58:04 +00:00
|
|
|
table[i] = op, name, arg, default, conv
|
|
|
|
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2002-05-24 09:58:04 +00:00
|
|
|
addConverters(privateDictOperators)
|
|
|
|
addConverters(topDictOperators)
|
|
|
|
|
2002-05-15 07:41:30 +00:00
|
|
|
|
2002-05-16 18:17:32 +00:00
|
|
|
class TopDictDecompiler(psCharStrings.DictDecompiler):
|
|
|
|
operators = buildOperatorDict(topDictOperators)
|
2002-05-15 07:41:30 +00:00
|
|
|
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2002-05-16 18:17:32 +00:00
|
|
|
class PrivateDictDecompiler(psCharStrings.DictDecompiler):
|
|
|
|
operators = buildOperatorDict(privateDictOperators)
|
2002-05-14 12:22:03 +00:00
|
|
|
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2013-11-28 14:26:58 -05:00
|
|
|
class DictCompiler(object):
|
2017-03-09 22:39:14 -08:00
|
|
|
maxBlendStack = 0
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2017-07-19 18:18:58 +01:00
|
|
|
def __init__(self, dictObj, strings, parent, isCFF2=None):
|
2017-01-12 15:23:12 -08:00
|
|
|
if strings:
|
|
|
|
assert isinstance(strings, IndexedStrings)
|
2017-07-19 18:18:58 +01:00
|
|
|
if isCFF2 is None and hasattr(parent, "isCFF2"):
|
|
|
|
isCFF2 = parent.isCFF2
|
|
|
|
assert isCFF2 is not None
|
|
|
|
self.isCFF2 = isCFF2
|
2002-05-23 21:50:36 +00:00
|
|
|
self.dictObj = dictObj
|
|
|
|
self.strings = strings
|
|
|
|
self.parent = parent
|
|
|
|
rawDict = {}
|
|
|
|
for name in dictObj.order:
|
|
|
|
value = getattr(dictObj, name, None)
|
|
|
|
if value is None:
|
|
|
|
continue
|
|
|
|
conv = dictObj.converters[name]
|
2002-05-24 09:58:04 +00:00
|
|
|
value = conv.write(dictObj, value)
|
2002-05-23 21:50:36 +00:00
|
|
|
if value == dictObj.defaults.get(name):
|
|
|
|
continue
|
|
|
|
rawDict[name] = value
|
|
|
|
self.rawDict = rawDict
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-24 09:58:04 +00:00
|
|
|
def setPos(self, pos, endPos):
|
2002-05-23 21:50:36 +00:00
|
|
|
pass
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def getDataLength(self):
|
2002-05-24 09:58:04 +00:00
|
|
|
return len(self.compile("getDataLength"))
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-24 09:58:04 +00:00
|
|
|
def compile(self, reason):
|
2016-01-24 14:50:57 +00:00
|
|
|
log.log(DEBUG, "-- compiling %s for %s", self.__class__.__name__, reason)
|
2002-05-23 21:50:36 +00:00
|
|
|
rawDict = self.rawDict
|
|
|
|
data = []
|
|
|
|
for name in self.dictObj.order:
|
|
|
|
value = rawDict.get(name)
|
|
|
|
if value is None:
|
|
|
|
continue
|
|
|
|
op, argType = self.opcodes[name]
|
2008-03-07 19:56:17 +00:00
|
|
|
if isinstance(argType, tuple):
|
2002-05-23 21:50:36 +00:00
|
|
|
l = len(argType)
|
|
|
|
assert len(value) == l, "value doesn't match arg type"
|
|
|
|
for i in range(l):
|
2003-08-22 19:53:32 +00:00
|
|
|
arg = argType[i]
|
2002-05-23 21:50:36 +00:00
|
|
|
v = value[i]
|
|
|
|
arghandler = getattr(self, "arg_" + arg)
|
|
|
|
data.append(arghandler(v))
|
|
|
|
else:
|
|
|
|
arghandler = getattr(self, "arg_" + argType)
|
|
|
|
data.append(arghandler(value))
|
|
|
|
data.append(op)
|
2017-01-12 15:23:12 -08:00
|
|
|
data = bytesjoin(data)
|
|
|
|
return data
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def toFile(self, file):
|
2017-01-12 15:23:12 -08:00
|
|
|
data = self.compile("toFile")
|
|
|
|
file.write(data)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def arg_number(self, num):
|
2017-01-12 15:23:12 -08:00
|
|
|
if isinstance(num, list):
|
2017-11-02 11:31:47 -07:00
|
|
|
data = [encodeNumber(val) for val in num]
|
2017-01-12 15:23:12 -08:00
|
|
|
data.append(encodeNumber(1))
|
|
|
|
data.append(bytechr(blendOp))
|
|
|
|
datum = bytesjoin(data)
|
|
|
|
else:
|
|
|
|
datum = encodeNumber(num)
|
|
|
|
return datum
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def arg_SID(self, s):
|
|
|
|
return psCharStrings.encodeIntCFF(self.strings.getSID(s))
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def arg_array(self, value):
|
|
|
|
data = []
|
|
|
|
for num in value:
|
2017-01-12 15:23:12 -08:00
|
|
|
data.append(self.arg_number(num))
|
2013-11-27 21:17:35 -05:00
|
|
|
return bytesjoin(data)
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def arg_delta(self, value):
|
[cffLib/psCharStrings] fix IndexError with empty deltas
After 2b2aca1, DictCompiler/Decompiler's `arg_delta` method unconditionally attempts to get the first item to check if it's a list, but this fails with `IndexError` when the value is empty.
```
Traceback (most recent call last):
[...]
File "/Users/cosimolupo/Documents/Github/ufo2ft/Lib/ufo2ft/otfPostProcessor.py", line 15, in __init__
otf.save(stream)
File "/Users/cosimolupo/Documents/Github/fonttools/Lib/fontTools/ttLib/__init__.py", line 219, in save
self._writeTable(tag, writer, done)
File "/Users/cosimolupo/Documents/Github/fonttools/Lib/fontTools/ttLib/__init__.py", line 658, in _writeTable
tabledata = self.getTableData(tag)
File "/Users/cosimolupo/Documents/Github/fonttools/Lib/fontTools/ttLib/__init__.py", line 669, in getTableData
return self.tables[tag].compile(self)
File "/Users/cosimolupo/Documents/Github/fonttools/Lib/fontTools/ttLib/tables/C_F_F_.py", line 20, in compile
self.cff.compile(f, otFont)
File "/Users/cosimolupo/Documents/Github/fonttools/Lib/fontTools/cffLib.py", line 124, in compile
writer.toFile(file)
File "/Users/cosimolupo/Documents/Github/fonttools/Lib/fontTools/cffLib.py", line 300, in toFile
endPos = pos + item.getDataLength()
File "/Users/cosimolupo/Documents/Github/fonttools/Lib/fontTools/cffLib.py", line 1858, in getDataLength
return len(self.compile("getDataLength"))
File "/Users/cosimolupo/Documents/Github/fonttools/Lib/fontTools/cffLib.py", line 1879, in compile
data.append(arghandler(value))
File "/Users/cosimolupo/Documents/Github/fonttools/Lib/fontTools/cffLib.py", line 1910, in arg_delta
val0 = value[0]
IndexError: list index out of range
``
2017-03-09 14:21:38 +00:00
|
|
|
if not value:
|
|
|
|
return b""
|
2017-01-12 15:23:12 -08:00
|
|
|
val0 = value[0]
|
|
|
|
if isinstance(val0, list):
|
|
|
|
data = self.arg_delta_blend(value)
|
|
|
|
else:
|
|
|
|
out = []
|
|
|
|
last = 0
|
|
|
|
for v in value:
|
|
|
|
out.append(v - last)
|
|
|
|
last = v
|
|
|
|
data = []
|
|
|
|
for num in out:
|
|
|
|
data.append(encodeNumber(num))
|
2013-11-27 21:17:35 -05:00
|
|
|
return bytesjoin(data)
|
2002-05-23 21:50:36 +00:00
|
|
|
|
2017-11-02 11:31:47 -07:00
|
|
|
|
2017-01-12 15:23:12 -08:00
|
|
|
def arg_delta_blend(self, value):
|
2017-11-02 11:31:47 -07:00
|
|
|
""" A delta list with blend lists has to be *all* blend lists.
|
|
|
|
The value is a list is arranged as follows.
|
|
|
|
[
|
|
|
|
[V0, d0..dn]
|
|
|
|
[V1, d0..dn]
|
|
|
|
...
|
|
|
|
[Vm, d0..dn]
|
|
|
|
]
|
|
|
|
V is the absolute coordinate value from the default font, and d0-dn are
|
|
|
|
the delta values from the n regions. Each V is an absolute coordinate
|
|
|
|
from the default font.
|
|
|
|
We want to return a list:
|
|
|
|
[
|
|
|
|
[v0, v1..vm]
|
|
|
|
[d0..dn]
|
|
|
|
...
|
|
|
|
[d0..dn]
|
|
|
|
numBlends
|
|
|
|
blendOp
|
|
|
|
]
|
|
|
|
where each v is relative to the previous default font value.
|
|
|
|
"""
|
2017-01-12 15:23:12 -08:00
|
|
|
numMasters = len(value[0])
|
2017-11-02 11:31:47 -07:00
|
|
|
numBlends = len(value)
|
|
|
|
numStack = (numBlends * numMasters) + 1
|
2017-03-09 22:39:14 -08:00
|
|
|
if numStack > self.maxBlendStack:
|
|
|
|
# Figure out the max number of value we can blend
|
|
|
|
# and divide this list up into chunks of that size.
|
|
|
|
|
2017-06-21 10:32:58 +01:00
|
|
|
numBlendValues = int((self.maxBlendStack - 1) / numMasters)
|
2017-03-09 22:39:14 -08:00
|
|
|
out = []
|
|
|
|
while True:
|
|
|
|
numVal = min(len(value), numBlendValues)
|
|
|
|
if numVal == 0:
|
|
|
|
break
|
|
|
|
valList = value[0:numVal]
|
|
|
|
out1 = self.arg_delta_blend(valList)
|
|
|
|
out.extend(out1)
|
|
|
|
value = value[numVal:]
|
|
|
|
else:
|
2017-11-02 11:31:47 -07:00
|
|
|
firstList = [0] * numBlends
|
|
|
|
deltaList = [None] * numBlends
|
2017-03-09 22:39:14 -08:00
|
|
|
i = 0
|
2017-11-02 11:31:47 -07:00
|
|
|
prevVal = 0
|
|
|
|
while i < numBlends:
|
|
|
|
# For PrivateDict BlueValues, the default font
|
|
|
|
# values are absolute, not relative.
|
|
|
|
# Must convert these back to relative coordinates
|
|
|
|
# befor writing to CFF2.
|
|
|
|
defaultValue = value[i][0]
|
|
|
|
firstList[i] = defaultValue - prevVal
|
|
|
|
prevVal = defaultValue
|
|
|
|
deltaList[i] = value[i][1:]
|
2017-06-21 10:32:58 +01:00
|
|
|
i += 1
|
2017-03-09 22:39:14 -08:00
|
|
|
|
|
|
|
relValueList = firstList
|
|
|
|
for blendList in deltaList:
|
|
|
|
relValueList.extend(blendList)
|
|
|
|
out = [encodeNumber(val) for val in relValueList]
|
2017-11-02 11:31:47 -07:00
|
|
|
out.append(encodeNumber(numBlends))
|
2017-03-09 22:39:14 -08:00
|
|
|
out.append(bytechr(blendOp))
|
2017-01-12 15:23:12 -08:00
|
|
|
return out
|
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
|
|
|
|
def encodeNumber(num):
|
2008-03-07 19:49:25 +00:00
|
|
|
if isinstance(num, float):
|
2002-05-23 21:50:36 +00:00
|
|
|
return psCharStrings.encodeFloat(num)
|
|
|
|
else:
|
|
|
|
return psCharStrings.encodeIntCFF(num)
|
|
|
|
|
|
|
|
|
|
|
|
class TopDictCompiler(DictCompiler):
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
opcodes = buildOpcodeDict(topDictOperators)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def getChildren(self, strings):
|
2017-07-19 18:18:58 +01:00
|
|
|
isCFF2 = self.isCFF2
|
2002-05-23 21:50:36 +00:00
|
|
|
children = []
|
2017-06-21 10:32:58 +01:00
|
|
|
if self.dictObj.cff2GetGlyphOrder is None:
|
2017-01-12 15:23:12 -08:00
|
|
|
if hasattr(self.dictObj, "charset") and self.dictObj.charset:
|
2017-06-21 10:32:58 +01:00
|
|
|
if hasattr(self.dictObj, "ROS"): # aka isCID
|
2017-01-12 15:23:12 -08:00
|
|
|
charsetCode = None
|
|
|
|
else:
|
|
|
|
charsetCode = getStdCharSet(self.dictObj.charset)
|
2017-06-21 10:32:58 +01:00
|
|
|
if charsetCode is None:
|
2017-01-12 15:23:12 -08:00
|
|
|
children.append(CharsetCompiler(strings, self.dictObj.charset, self))
|
|
|
|
else:
|
|
|
|
self.rawDict["charset"] = charsetCode
|
|
|
|
if hasattr(self.dictObj, "Encoding") and self.dictObj.Encoding:
|
|
|
|
encoding = self.dictObj.Encoding
|
|
|
|
if not isinstance(encoding, basestring):
|
|
|
|
children.append(EncodingCompiler(strings, encoding, self))
|
|
|
|
else:
|
|
|
|
if hasattr(self.dictObj, "VarStore"):
|
|
|
|
varStoreData = self.dictObj.VarStore
|
|
|
|
varStoreComp = VarStoreCompiler(varStoreData, self)
|
|
|
|
children.append(varStoreComp)
|
2003-08-25 07:37:25 +00:00
|
|
|
if hasattr(self.dictObj, "FDSelect"):
|
2017-06-21 10:32:58 +01:00
|
|
|
# I have not yet supported merging a ttx CFF-CID font, as there are
|
|
|
|
# interesting issues about merging the FDArrays. Here I assume that
|
2016-09-22 15:33:53 -07:00
|
|
|
# either the font was read from XML, and the FDSelect indices are all
|
2003-08-22 19:53:32 +00:00
|
|
|
# in the charstring data, or the FDSelect array is already fully defined.
|
|
|
|
fdSelect = self.dictObj.FDSelect
|
2017-06-21 10:32:58 +01:00
|
|
|
# probably read in from XML; assume fdIndex in CharString data
|
|
|
|
if len(fdSelect) == 0:
|
2003-08-22 19:53:32 +00:00
|
|
|
charStrings = self.dictObj.CharStrings
|
|
|
|
for name in self.dictObj.charset:
|
|
|
|
fdSelect.append(charStrings[name].fdSelectIndex)
|
|
|
|
fdSelectComp = FDSelectCompiler(fdSelect, self)
|
|
|
|
children.append(fdSelectComp)
|
2002-05-23 21:50:36 +00:00
|
|
|
if hasattr(self.dictObj, "CharStrings"):
|
|
|
|
items = []
|
|
|
|
charStrings = self.dictObj.CharStrings
|
|
|
|
for name in self.dictObj.charset:
|
|
|
|
items.append(charStrings[name])
|
2017-07-19 18:18:58 +01:00
|
|
|
charStringsComp = CharStringsCompiler(
|
|
|
|
items, strings, self, isCFF2=isCFF2)
|
2002-05-23 21:50:36 +00:00
|
|
|
children.append(charStringsComp)
|
2003-08-25 07:37:25 +00:00
|
|
|
if hasattr(self.dictObj, "FDArray"):
|
2017-06-21 10:32:58 +01:00
|
|
|
# I have not yet supported merging a ttx CFF-CID font, as there are
|
|
|
|
# interesting issues about merging the FDArrays. Here I assume that the
|
|
|
|
# FDArray info is correct and complete.
|
2003-08-22 19:53:32 +00:00
|
|
|
fdArrayIndexComp = self.dictObj.FDArray.getCompiler(strings, self)
|
|
|
|
children.append(fdArrayIndexComp)
|
|
|
|
children.extend(fdArrayIndexComp.getChildren(strings))
|
|
|
|
if hasattr(self.dictObj, "Private"):
|
|
|
|
privComp = self.dictObj.Private.getCompiler(strings, self)
|
|
|
|
children.append(privComp)
|
|
|
|
children.extend(privComp.getChildren(strings))
|
|
|
|
return children
|
|
|
|
|
|
|
|
|
2017-05-09 15:32:12 -07:00
|
|
|
class FontDictCompiler(DictCompiler):
|
|
|
|
opcodes = buildOpcodeDict(topDictOperators)
|
2016-11-26 12:36:20 +01:00
|
|
|
|
2017-07-19 18:18:58 +01:00
|
|
|
def __init__(self, dictObj, strings, parent, isCFF2=None):
|
|
|
|
super(FontDictCompiler, self).__init__(dictObj, strings, parent, isCFF2=isCFF2)
|
2017-01-18 13:07:03 -08:00
|
|
|
#
|
2017-06-21 10:32:58 +01:00
|
|
|
# We now take some effort to detect if there were any key/value pairs
|
|
|
|
# supplied that were ignored in the FontDict context, and issue a warning
|
|
|
|
# for those cases.
|
2017-01-18 13:07:03 -08:00
|
|
|
#
|
|
|
|
ignoredNames = []
|
|
|
|
dictObj = self.dictObj
|
|
|
|
for name in sorted(set(dictObj.converters) - set(dictObj.order)):
|
|
|
|
if name in dictObj.rawDict:
|
|
|
|
# The font was directly read from binary. In this
|
|
|
|
# case, we want to report *all* "useless" key/value
|
|
|
|
# pairs that are in the font, not just the ones that
|
|
|
|
# are different from the default.
|
|
|
|
ignoredNames.append(name)
|
|
|
|
else:
|
|
|
|
# The font was probably read from a TTX file. We only
|
|
|
|
# warn about keys whos value is not the default. The
|
|
|
|
# ones that have the default value will not be written
|
|
|
|
# to binary anyway.
|
|
|
|
default = dictObj.defaults.get(name)
|
|
|
|
if default is not None:
|
|
|
|
conv = dictObj.converters[name]
|
|
|
|
default = conv.read(dictObj, default)
|
|
|
|
if getattr(dictObj, name, None) != default:
|
|
|
|
ignoredNames.append(name)
|
|
|
|
if ignoredNames:
|
2017-06-21 10:32:58 +01:00
|
|
|
log.warning(
|
|
|
|
"Some CFF FDArray/FontDict keys were ignored upon compile: " +
|
2017-01-18 13:07:03 -08:00
|
|
|
" ".join(sorted(ignoredNames)))
|
2017-01-12 15:23:12 -08:00
|
|
|
|
2017-05-09 15:32:12 -07:00
|
|
|
def getChildren(self, strings):
|
|
|
|
children = []
|
|
|
|
if hasattr(self.dictObj, "Private"):
|
|
|
|
privComp = self.dictObj.Private.getCompiler(strings, self)
|
|
|
|
children.append(privComp)
|
|
|
|
children.extend(privComp.getChildren(strings))
|
|
|
|
return children
|
2016-11-26 12:36:20 +01:00
|
|
|
|
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
class PrivateDictCompiler(DictCompiler):
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2017-06-21 10:32:58 +01:00
|
|
|
maxBlendStack = maxStackLimit
|
2002-05-23 21:50:36 +00:00
|
|
|
opcodes = buildOpcodeDict(privateDictOperators)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-24 09:58:04 +00:00
|
|
|
def setPos(self, pos, endPos):
|
|
|
|
size = endPos - pos
|
2002-05-23 21:50:36 +00:00
|
|
|
self.parent.rawDict["Private"] = size, pos
|
|
|
|
self.pos = pos
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def getChildren(self, strings):
|
|
|
|
children = []
|
|
|
|
if hasattr(self.dictObj, "Subrs"):
|
|
|
|
children.append(self.dictObj.Subrs.getCompiler(strings, self))
|
|
|
|
return children
|
|
|
|
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2013-11-28 14:26:58 -05:00
|
|
|
class BaseDict(object):
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2017-07-19 18:18:58 +01:00
|
|
|
def __init__(self, strings=None, file=None, offset=None, isCFF2=None):
|
|
|
|
assert (isCFF2 is None) == (file is None)
|
2002-05-16 18:17:32 +00:00
|
|
|
self.rawDict = {}
|
2017-07-19 18:18:58 +01:00
|
|
|
self.skipNames = []
|
|
|
|
self.strings = strings
|
|
|
|
if file is None:
|
|
|
|
return
|
|
|
|
self._isCFF2 = isCFF2
|
|
|
|
self.file = file
|
2016-01-24 14:50:57 +00:00
|
|
|
if offset is not None:
|
|
|
|
log.log(DEBUG, "loading %s at %s", self.__class__.__name__, offset)
|
2017-07-19 18:18:58 +01:00
|
|
|
self.offset = offset
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-16 18:17:32 +00:00
|
|
|
def decompile(self, data):
|
2016-01-24 14:50:57 +00:00
|
|
|
log.log(DEBUG, " length %s is %d", self.__class__.__name__, len(data))
|
2017-05-09 11:28:11 -07:00
|
|
|
dec = self.decompilerClass(self.strings, self)
|
2002-05-16 18:17:32 +00:00
|
|
|
dec.decompile(data)
|
|
|
|
self.rawDict = dec.getDict()
|
|
|
|
self.postDecompile()
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-16 18:17:32 +00:00
|
|
|
def postDecompile(self):
|
|
|
|
pass
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2017-07-19 18:18:58 +01:00
|
|
|
def getCompiler(self, strings, parent, isCFF2=None):
|
|
|
|
return self.compilerClass(self, strings, parent, isCFF2=isCFF2)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-16 18:17:32 +00:00
|
|
|
def __getattr__(self, name):
|
2019-02-06 01:01:54 +01:00
|
|
|
if name[:2] == name[-2:] == "__":
|
|
|
|
# to make deepcopy() and pickle.load() work, we need to signal with
|
|
|
|
# AttributeError that dunder methods like '__deepcopy__' or '__getstate__'
|
|
|
|
# aren't implemented. For more details, see:
|
|
|
|
# https://github.com/fonttools/fonttools/pull/1488
|
|
|
|
raise AttributeError(name)
|
2017-01-12 15:23:12 -08:00
|
|
|
value = self.rawDict.get(name, None)
|
2002-05-16 18:17:32 +00:00
|
|
|
if value is None:
|
|
|
|
value = self.defaults.get(name)
|
|
|
|
if value is None:
|
2013-11-27 02:42:28 -05:00
|
|
|
raise AttributeError(name)
|
2002-05-16 18:17:32 +00:00
|
|
|
conv = self.converters[name]
|
2002-05-24 09:58:04 +00:00
|
|
|
value = conv.read(self, value)
|
2002-05-16 18:17:32 +00:00
|
|
|
setattr(self, name, value)
|
|
|
|
return value
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2018-01-25 17:30:23 -08:00
|
|
|
def toXML(self, xmlWriter):
|
2002-05-16 18:17:32 +00:00
|
|
|
for name in self.order:
|
2002-05-17 19:58:49 +00:00
|
|
|
if name in self.skipNames:
|
|
|
|
continue
|
2002-05-16 18:17:32 +00:00
|
|
|
value = getattr(self, name, None)
|
2017-03-02 12:50:18 +00:00
|
|
|
# XXX For "charset" we never skip calling xmlWrite even if the
|
|
|
|
# value is None, so we always write the following XML comment:
|
|
|
|
#
|
|
|
|
# <!-- charset is dumped separately as the 'GlyphOrder' element -->
|
|
|
|
#
|
|
|
|
# Charset is None when 'CFF ' table is imported from XML into an
|
|
|
|
# empty TTFont(). By writing this comment all the time, we obtain
|
|
|
|
# the same XML output whether roundtripping XML-to-XML or
|
|
|
|
# dumping binary-to-XML
|
|
|
|
if value is None and name != "charset":
|
2002-05-16 18:17:32 +00:00
|
|
|
continue
|
2002-05-24 09:58:04 +00:00
|
|
|
conv = self.converters[name]
|
2018-01-25 17:30:23 -08:00
|
|
|
conv.xmlWrite(xmlWriter, name, value)
|
2017-01-18 13:07:03 -08:00
|
|
|
ignoredNames = set(self.rawDict) - set(self.order)
|
|
|
|
if ignoredNames:
|
2017-06-21 10:32:58 +01:00
|
|
|
xmlWriter.comment(
|
|
|
|
"some keys were ignored: %s" % " ".join(sorted(ignoredNames)))
|
2017-01-18 13:07:03 -08:00
|
|
|
xmlWriter.newline()
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2013-11-27 03:19:32 -05:00
|
|
|
def fromXML(self, name, attrs, content):
|
2017-01-18 13:07:03 -08:00
|
|
|
conv = self.converters[name]
|
|
|
|
value = conv.xmlRead(name, attrs, content, self)
|
|
|
|
setattr(self, name, value)
|
2002-05-14 12:22:03 +00:00
|
|
|
|
|
|
|
|
2002-05-16 18:17:32 +00:00
|
|
|
class TopDict(BaseDict):
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2017-05-09 14:59:04 -07:00
|
|
|
defaults = buildDefaults(topDictOperators)
|
2002-05-16 18:17:32 +00:00
|
|
|
converters = buildConverters(topDictOperators)
|
2017-01-12 15:23:12 -08:00
|
|
|
compilerClass = TopDictCompiler
|
2002-05-16 18:17:32 +00:00
|
|
|
order = buildOrder(topDictOperators)
|
2002-05-23 21:50:36 +00:00
|
|
|
decompilerClass = TopDictDecompiler
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2017-07-19 18:18:58 +01:00
|
|
|
def __init__(self, strings=None, file=None, offset=None,
|
|
|
|
GlobalSubrs=None, cff2GetGlyphOrder=None, isCFF2=None):
|
|
|
|
super(TopDict, self).__init__(strings, file, offset, isCFF2=isCFF2)
|
2017-01-12 15:23:12 -08:00
|
|
|
self.cff2GetGlyphOrder = cff2GetGlyphOrder
|
2002-05-16 18:38:03 +00:00
|
|
|
self.GlobalSubrs = GlobalSubrs
|
2017-07-19 18:18:58 +01:00
|
|
|
if isCFF2:
|
2017-05-09 15:32:12 -07:00
|
|
|
self.defaults = buildDefaults(topDictOperators2)
|
|
|
|
self.charset = cff2GetGlyphOrder()
|
|
|
|
self.order = buildOrder(topDictOperators2)
|
|
|
|
else:
|
|
|
|
self.defaults = buildDefaults(topDictOperators)
|
|
|
|
self.order = buildOrder(topDictOperators)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-16 18:17:32 +00:00
|
|
|
def getGlyphOrder(self):
|
|
|
|
return self.charset
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-16 18:17:32 +00:00
|
|
|
def postDecompile(self):
|
|
|
|
offset = self.rawDict.get("CharStrings")
|
|
|
|
if offset is None:
|
|
|
|
return
|
|
|
|
# get the number of glyphs beforehand.
|
|
|
|
self.file.seek(offset)
|
2017-07-19 18:18:58 +01:00
|
|
|
if self._isCFF2:
|
2017-01-12 15:23:12 -08:00
|
|
|
self.numGlyphs = readCard32(self.file)
|
|
|
|
else:
|
|
|
|
self.numGlyphs = readCard16(self.file)
|
2017-03-09 21:30:28 -08:00
|
|
|
|
2018-01-25 17:30:23 -08:00
|
|
|
def toXML(self, xmlWriter):
|
2002-05-17 19:58:49 +00:00
|
|
|
if hasattr(self, "CharStrings"):
|
2018-01-25 17:30:23 -08:00
|
|
|
self.decompileAllCharStrings()
|
2003-08-22 19:53:32 +00:00
|
|
|
if hasattr(self, "ROS"):
|
|
|
|
self.skipNames = ['Encoding']
|
2002-05-17 19:58:49 +00:00
|
|
|
if not hasattr(self, "ROS") or not hasattr(self, "CharStrings"):
|
|
|
|
# these values have default values, but I only want them to show up
|
|
|
|
# in CID fonts.
|
2017-06-21 10:32:58 +01:00
|
|
|
self.skipNames = [
|
|
|
|
'CIDFontVersion', 'CIDFontRevision', 'CIDFontType', 'CIDCount']
|
2018-01-25 17:30:23 -08:00
|
|
|
BaseDict.toXML(self, xmlWriter)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2018-01-25 17:30:23 -08:00
|
|
|
def decompileAllCharStrings(self):
|
2017-01-12 15:23:12 -08:00
|
|
|
# Make sure that all the Private Dicts have been instantiated.
|
2019-01-02 17:29:48 +01:00
|
|
|
for i, charString in enumerate(self.CharStrings.values()):
|
2003-08-22 19:53:32 +00:00
|
|
|
try:
|
|
|
|
charString.decompile()
|
|
|
|
except:
|
2016-01-24 14:50:57 +00:00
|
|
|
log.error("Error in charstring %s", i)
|
2017-02-20 10:03:10 +01:00
|
|
|
raise
|
2002-05-16 18:17:32 +00:00
|
|
|
|
2017-05-18 17:58:39 +09:00
|
|
|
def recalcFontBBox(self):
|
2017-08-01 10:35:39 +09:00
|
|
|
fontBBox = None
|
2017-05-18 17:58:39 +09:00
|
|
|
for charString in self.CharStrings.values():
|
2018-01-26 16:53:04 -08:00
|
|
|
bounds = charString.calcBounds(self.CharStrings)
|
2017-08-01 10:35:39 +09:00
|
|
|
if bounds is not None:
|
|
|
|
if fontBBox is not None:
|
|
|
|
fontBBox = unionRect(fontBBox, bounds)
|
2017-05-22 15:30:05 +09:00
|
|
|
else:
|
2017-08-01 10:35:39 +09:00
|
|
|
fontBBox = bounds
|
2017-05-18 17:58:39 +09:00
|
|
|
|
2017-08-01 10:35:39 +09:00
|
|
|
if fontBBox is None:
|
2017-05-18 17:58:39 +09:00
|
|
|
self.FontBBox = self.defaults['FontBBox'][:]
|
|
|
|
else:
|
2017-08-01 10:35:39 +09:00
|
|
|
self.FontBBox = list(intRect(fontBBox))
|
2017-05-18 17:58:39 +09:00
|
|
|
|
2017-07-19 18:18:58 +01:00
|
|
|
|
2017-05-09 15:32:12 -07:00
|
|
|
class FontDict(BaseDict):
|
2017-01-18 13:07:03 -08:00
|
|
|
#
|
|
|
|
# Since fonttools used to pass a lot of fields that are not relevant in the FDArray
|
|
|
|
# FontDict, there are 'ttx' files in the wild that contain all these. These got in
|
|
|
|
# the ttx files because fonttools writes explicit values for all the TopDict default
|
|
|
|
# values. These are not actually illegal in the context of an FDArray FontDict - you
|
|
|
|
# can legally, per spec, put any arbitrary key/value pair in a FontDict - but are
|
|
|
|
# useless since current major company CFF interpreters ignore anything but the set
|
|
|
|
# listed in this file. So, we just silently skip them. An exception is Weight: this
|
|
|
|
# is not used by any interpreter, but some foundries have asked that this be
|
|
|
|
# supported in FDArray FontDicts just to preserve information about the design when
|
|
|
|
# the font is being inspected.
|
|
|
|
#
|
|
|
|
# On top of that, there are fonts out there that contain such useless FontDict values.
|
|
|
|
#
|
|
|
|
# By subclassing TopDict, we *allow* all key/values from TopDict, both when reading
|
|
|
|
# from binary or when reading from XML, but by overriding `order` with a limited
|
|
|
|
# list of names, we ensure that only the useful names ever get exported to XML and
|
|
|
|
# ever get compiled into the binary font.
|
|
|
|
#
|
|
|
|
# We override compilerClass so we can warn about "useless" key/value pairs, either
|
|
|
|
# from the original binary font or from TTX input.
|
|
|
|
#
|
|
|
|
# See:
|
|
|
|
# - https://github.com/fonttools/fonttools/issues/740
|
|
|
|
# - https://github.com/fonttools/fonttools/issues/601
|
|
|
|
# - https://github.com/adobe-type-tools/afdko/issues/137
|
|
|
|
#
|
2017-05-09 15:32:12 -07:00
|
|
|
defaults = {}
|
|
|
|
converters = buildConverters(topDictOperators)
|
2017-05-09 14:59:04 -07:00
|
|
|
compilerClass = FontDictCompiler
|
2018-03-07 14:19:49 -08:00
|
|
|
orderCFF = ['FontName', 'FontMatrix', 'Weight', 'Private']
|
2018-03-06 17:33:05 -08:00
|
|
|
orderCFF2 = ['Private']
|
2017-05-09 15:32:12 -07:00
|
|
|
decompilerClass = TopDictDecompiler
|
2003-08-22 19:53:32 +00:00
|
|
|
|
2017-07-19 18:18:58 +01:00
|
|
|
def __init__(self, strings=None, file=None, offset=None,
|
|
|
|
GlobalSubrs=None, isCFF2=None, vstore=None):
|
|
|
|
super(FontDict, self).__init__(strings, file, offset, isCFF2=isCFF2)
|
|
|
|
self.vstore = vstore
|
2018-03-07 14:19:49 -08:00
|
|
|
self.setCFF2(isCFF2)
|
|
|
|
|
|
|
|
def setCFF2(self, isCFF2):
|
|
|
|
# isCFF2 may be None.
|
2018-03-06 17:33:05 -08:00
|
|
|
if isCFF2:
|
|
|
|
self.order = self.orderCFF2
|
2018-03-07 14:19:49 -08:00
|
|
|
self._isCFF2 = True
|
|
|
|
else:
|
|
|
|
self.order = self.orderCFF
|
|
|
|
self._isCFF2 = False
|
2017-07-19 18:18:58 +01:00
|
|
|
|
2017-01-12 15:23:12 -08:00
|
|
|
|
2002-05-16 18:17:32 +00:00
|
|
|
class PrivateDict(BaseDict):
|
2017-05-09 14:59:04 -07:00
|
|
|
defaults = buildDefaults(privateDictOperators)
|
2002-05-16 18:17:32 +00:00
|
|
|
converters = buildConverters(privateDictOperators)
|
|
|
|
order = buildOrder(privateDictOperators)
|
2002-05-23 21:50:36 +00:00
|
|
|
decompilerClass = PrivateDictDecompiler
|
|
|
|
compilerClass = PrivateDictCompiler
|
2002-05-16 18:17:32 +00:00
|
|
|
|
2017-07-19 18:18:58 +01:00
|
|
|
def __init__(self, strings=None, file=None, offset=None, isCFF2=None,
|
|
|
|
vstore=None):
|
|
|
|
super(PrivateDict, self).__init__(strings, file, offset, isCFF2=isCFF2)
|
|
|
|
self.vstore = vstore
|
|
|
|
if isCFF2:
|
2017-05-09 15:17:48 -07:00
|
|
|
self.defaults = buildDefaults(privateDictOperators2)
|
|
|
|
self.order = buildOrder(privateDictOperators2)
|
2018-12-04 15:31:55 -08:00
|
|
|
# Provide dummy values. This avoids needing to provide
|
|
|
|
# an isCFF2 state in a lot of places.
|
[varLib subset CFF2] Set PrivateDict nominal and default WidthX to None
@bedhad
Address issues raised in #1403
I do think setting the dummy CFF2 PrivateDict nominalWidthX and defaultWidthX to None, which leads to the charstring.width also being None, is a good idea. I originally set them to 0, which produces a charstring width of 0, in order to avoid problems with logic that assumes that the field is good for math. However, I now think that it is better to find errors around charstring type assumptions earlier than later.
"drop_hints()" is actually not wrong - I did look at this when making the changes. For CFF2 charstrings, self.width is always equal to self.private.defaultWidthX, so the width is never inserted. This is because in psCharstrings.py::T2WidthExtractor.popallWidth(), the test "evenOdd ^ (len(args) % 2)" is alway False. Left to myself, I would not change this code. If the CFF2 charstring is correct, there is not a problem. if the CFF2 charstring is not correct, then both in drop_hints() and in T2WidthExtractor.popallWidth(), the logic will stack dump. I did add asserts, but am not totally sure it is worth the extra calls.
2018-12-06 11:55:48 -08:00
|
|
|
self.nominalWidthX = self.defaultWidthX = None
|
2017-05-09 15:17:48 -07:00
|
|
|
else:
|
|
|
|
self.defaults = buildDefaults(privateDictOperators)
|
|
|
|
self.order = buildOrder(privateDictOperators)
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2019-02-06 10:15:38 +00:00
|
|
|
@property
|
|
|
|
def in_cff2(self):
|
|
|
|
return self._isCFF2
|
2018-12-04 15:31:55 -08:00
|
|
|
|
2017-06-21 10:32:58 +01:00
|
|
|
def getNumRegions(self, vi=None): # called from misc/psCharStrings.py
|
2017-01-12 15:23:12 -08:00
|
|
|
# if getNumRegions is being called, we can assume that VarStore exists.
|
2017-06-21 10:32:58 +01:00
|
|
|
if vi is None:
|
2017-01-12 15:23:12 -08:00
|
|
|
if hasattr(self, 'vsindex'):
|
|
|
|
vi = self.vsindex
|
|
|
|
else:
|
|
|
|
vi = 0
|
|
|
|
numRegions = self.vstore.getNumRegions(vi)
|
|
|
|
return numRegions
|
2017-06-21 10:32:58 +01:00
|
|
|
|
|
|
|
|
2013-11-28 14:26:58 -05:00
|
|
|
class IndexedStrings(object):
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-17 07:06:32 +00:00
|
|
|
"""SID -> string mapping."""
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2017-07-19 18:18:58 +01:00
|
|
|
def __init__(self, file=None):
|
2002-05-17 07:06:32 +00:00
|
|
|
if file is None:
|
2002-05-14 12:22:03 +00:00
|
|
|
strings = []
|
2002-05-17 07:06:32 +00:00
|
|
|
else:
|
2017-07-19 18:18:58 +01:00
|
|
|
strings = [
|
|
|
|
tostr(s, encoding="latin1")
|
|
|
|
for s in Index(file, isCFF2=False)
|
|
|
|
]
|
2002-05-14 12:22:03 +00:00
|
|
|
self.strings = strings
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def getCompiler(self):
|
2017-07-19 18:18:58 +01:00
|
|
|
return IndexedStringsCompiler(self, None, self, isCFF2=False)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-23 21:50:36 +00:00
|
|
|
def __len__(self):
|
|
|
|
return len(self.strings)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-14 12:22:03 +00:00
|
|
|
def __getitem__(self, SID):
|
|
|
|
if SID < cffStandardStringCount:
|
|
|
|
return cffStandardStrings[SID]
|
|
|
|
else:
|
|
|
|
return self.strings[SID - cffStandardStringCount]
|
2017-06-21 10:32:58 +01:00
|
|
|
|
2002-05-14 12:22:03 +00:00
|
|
|
def getSID(self, s):
|
|
|
|
if not hasattr(self, "stringMapping"):
|
|
|
|
self.buildStringMapping()
|
cffLib: use tostr() when getting SID of indexed string (or we end up with duplicates in py3)
I noticed this issue while porting compreffor to py3. In my test fonts, the binary
CFF tables as generated with python 2 sometimes were slightly different from the
ones generated with python 3, although the TTX dump was identical!
It turns out, when running in Python 3, cffLib adds extra entries to the
list of CFF indexed strings, because of bytes vs str.
The `IndexedStrings.getSID` method takes an input string 's' and and returns
the SID integer for that string. If it's a new string, it gets appended to the
list, as well as to an internal strings-to-SID mapping, so that the same SID
value is returned for any given string.
The problem with python 3 was that, if the input string was of `bytes` type
instead of `str`, then the test for inclusion (the dict's `__contains__`)
would return False, and as a result the "same" string (e.g. "Regular" and
b"Regular") could be encoded twice in the list of CFF strings.
(yes, we desperately need unit tests for cffLib...)
2016-05-16 13:10:39 +01:00
|
|
|
s = tostr(s, encoding="latin1")
|
2013-11-27 02:33:03 -05:00
|
|
|
if s in cffStandardStringMapping:
|
2002-05-14 12:22:03 +00:00
|
|
|
SID = cffStandardStringMapping[s]
|
2013-11-27 02:33:03 -05:00
|
|
|
elif s in self.stringMapping:
|
2002-05-14 12:22:03 +00:00
|
|
|
SID = self.stringMapping[s]
|
|
|
|
else:
|
|
|
|
SID = len(self.strings) + cffStandardStringCount
|
|
|
|
self.strings.append(s)
|
|
|
|
self.stringMapping[s] = SID
|
|
|
|
return SID
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-14 12:22:03 +00:00
|
|
|
def getStrings(self):
|
|
|
|
return self.strings
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-14 12:22:03 +00:00
|
|
|
def buildStringMapping(self):
|
|
|
|
self.stringMapping = {}
|
|
|
|
for index in range(len(self.strings)):
|
|
|
|
self.stringMapping[self.strings[index]] = index + cffStandardStringCount
|
|
|
|
|
|
|
|
|
1999-12-16 21:34:53 +00:00
|
|
|
# The 391 Standard Strings as used in the CFF format.
|
|
|
|
# from Adobe Technical None #5176, version 1.0, 18 March 1998
|
|
|
|
|
2015-04-26 02:01:01 -04:00
|
|
|
cffStandardStrings = ['.notdef', 'space', 'exclam', 'quotedbl', 'numbersign',
|
|
|
|
'dollar', 'percent', 'ampersand', 'quoteright', 'parenleft', 'parenright',
|
|
|
|
'asterisk', 'plus', 'comma', 'hyphen', 'period', 'slash', 'zero', 'one',
|
|
|
|
'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'colon',
|
|
|
|
'semicolon', 'less', 'equal', 'greater', 'question', 'at', 'A', 'B', 'C',
|
|
|
|
'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R',
|
|
|
|
'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'bracketleft', 'backslash',
|
|
|
|
'bracketright', 'asciicircum', 'underscore', 'quoteleft', 'a', 'b', 'c',
|
|
|
|
'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r',
|
|
|
|
's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'braceleft', 'bar', 'braceright',
|
|
|
|
'asciitilde', 'exclamdown', 'cent', 'sterling', 'fraction', 'yen', 'florin',
|
|
|
|
'section', 'currency', 'quotesingle', 'quotedblleft', 'guillemotleft',
|
|
|
|
'guilsinglleft', 'guilsinglright', 'fi', 'fl', 'endash', 'dagger',
|
|
|
|
'daggerdbl', 'periodcentered', 'paragraph', 'bullet', 'quotesinglbase',
|
|
|
|
'quotedblbase', 'quotedblright', 'guillemotright', 'ellipsis', 'perthousand',
|
|
|
|
'questiondown', 'grave', 'acute', 'circumflex', 'tilde', 'macron', 'breve',
|
|
|
|
'dotaccent', 'dieresis', 'ring', 'cedilla', 'hungarumlaut', 'ogonek', 'caron',
|
|
|
|
'emdash', 'AE', 'ordfeminine', 'Lslash', 'Oslash', 'OE', 'ordmasculine', 'ae',
|
|
|
|
'dotlessi', 'lslash', 'oslash', 'oe', 'germandbls', 'onesuperior',
|
|
|
|
'logicalnot', 'mu', 'trademark', 'Eth', 'onehalf', 'plusminus', 'Thorn',
|
|
|
|
'onequarter', 'divide', 'brokenbar', 'degree', 'thorn', 'threequarters',
|
|
|
|
'twosuperior', 'registered', 'minus', 'eth', 'multiply', 'threesuperior',
|
|
|
|
'copyright', 'Aacute', 'Acircumflex', 'Adieresis', 'Agrave', 'Aring',
|
|
|
|
'Atilde', 'Ccedilla', 'Eacute', 'Ecircumflex', 'Edieresis', 'Egrave',
|
|
|
|
'Iacute', 'Icircumflex', 'Idieresis', 'Igrave', 'Ntilde', 'Oacute',
|
|
|
|
'Ocircumflex', 'Odieresis', 'Ograve', 'Otilde', 'Scaron', 'Uacute',
|
|
|
|
'Ucircumflex', 'Udieresis', 'Ugrave', 'Yacute', 'Ydieresis', 'Zcaron',
|
|
|
|
'aacute', 'acircumflex', 'adieresis', 'agrave', 'aring', 'atilde', 'ccedilla',
|
|
|
|
'eacute', 'ecircumflex', 'edieresis', 'egrave', 'iacute', 'icircumflex',
|
|
|
|
'idieresis', 'igrave', 'ntilde', 'oacute', 'ocircumflex', 'odieresis',
|
|
|
|
'ograve', 'otilde', 'scaron', 'uacute', 'ucircumflex', 'udieresis', 'ugrave',
|
|
|
|
'yacute', 'ydieresis', 'zcaron', 'exclamsmall', 'Hungarumlautsmall',
|
|
|
|
'dollaroldstyle', 'dollarsuperior', 'ampersandsmall', 'Acutesmall',
|
|
|
|
'parenleftsuperior', 'parenrightsuperior', 'twodotenleader', 'onedotenleader',
|
|
|
|
'zerooldstyle', 'oneoldstyle', 'twooldstyle', 'threeoldstyle', 'fouroldstyle',
|
|
|
|
'fiveoldstyle', 'sixoldstyle', 'sevenoldstyle', 'eightoldstyle',
|
|
|
|
'nineoldstyle', 'commasuperior', 'threequartersemdash', 'periodsuperior',
|
|
|
|
'questionsmall', 'asuperior', 'bsuperior', 'centsuperior', 'dsuperior',
|
|
|
|
'esuperior', 'isuperior', 'lsuperior', 'msuperior', 'nsuperior', 'osuperior',
|
|
|
|
'rsuperior', 'ssuperior', 'tsuperior', 'ff', 'ffi', 'ffl', 'parenleftinferior',
|
|
|
|
'parenrightinferior', 'Circumflexsmall', 'hyphensuperior', 'Gravesmall',
|
|
|
|
'Asmall', 'Bsmall', 'Csmall', 'Dsmall', 'Esmall', 'Fsmall', 'Gsmall', 'Hsmall',
|
|
|
|
'Ismall', 'Jsmall', 'Ksmall', 'Lsmall', 'Msmall', 'Nsmall', 'Osmall', 'Psmall',
|
|
|
|
'Qsmall', 'Rsmall', 'Ssmall', 'Tsmall', 'Usmall', 'Vsmall', 'Wsmall', 'Xsmall',
|
|
|
|
'Ysmall', 'Zsmall', 'colonmonetary', 'onefitted', 'rupiah', 'Tildesmall',
|
|
|
|
'exclamdownsmall', 'centoldstyle', 'Lslashsmall', 'Scaronsmall', 'Zcaronsmall',
|
|
|
|
'Dieresissmall', 'Brevesmall', 'Caronsmall', 'Dotaccentsmall', 'Macronsmall',
|
|
|
|
'figuredash', 'hypheninferior', 'Ogoneksmall', 'Ringsmall', 'Cedillasmall',
|
|
|
|
'questiondownsmall', 'oneeighth', 'threeeighths', 'fiveeighths', 'seveneighths',
|
|
|
|
'onethird', 'twothirds', 'zerosuperior', 'foursuperior', 'fivesuperior',
|
|
|
|
'sixsuperior', 'sevensuperior', 'eightsuperior', 'ninesuperior', 'zeroinferior',
|
|
|
|
'oneinferior', 'twoinferior', 'threeinferior', 'fourinferior', 'fiveinferior',
|
|
|
|
'sixinferior', 'seveninferior', 'eightinferior', 'nineinferior', 'centinferior',
|
|
|
|
'dollarinferior', 'periodinferior', 'commainferior', 'Agravesmall',
|
|
|
|
'Aacutesmall', 'Acircumflexsmall', 'Atildesmall', 'Adieresissmall', 'Aringsmall',
|
|
|
|
'AEsmall', 'Ccedillasmall', 'Egravesmall', 'Eacutesmall', 'Ecircumflexsmall',
|
|
|
|
'Edieresissmall', 'Igravesmall', 'Iacutesmall', 'Icircumflexsmall',
|
|
|
|
'Idieresissmall', 'Ethsmall', 'Ntildesmall', 'Ogravesmall', 'Oacutesmall',
|
|
|
|
'Ocircumflexsmall', 'Otildesmall', 'Odieresissmall', 'OEsmall', 'Oslashsmall',
|
|
|
|
'Ugravesmall', 'Uacutesmall', 'Ucircumflexsmall', 'Udieresissmall',
|
|
|
|
'Yacutesmall', 'Thornsmall', 'Ydieresissmall', '001.000', '001.001', '001.002',
|
|
|
|
'001.003', 'Black', 'Bold', 'Book', 'Light', 'Medium', 'Regular', 'Roman',
|
1999-12-16 21:34:53 +00:00
|
|
|
'Semibold'
|
|
|
|
]
|
|
|
|
|
|
|
|
cffStandardStringCount = 391
|
|
|
|
assert len(cffStandardStrings) == cffStandardStringCount
|
|
|
|
# build reverse mapping
|
|
|
|
cffStandardStringMapping = {}
|
|
|
|
for _i in range(cffStandardStringCount):
|
|
|
|
cffStandardStringMapping[cffStandardStrings[_i]] = _i
|
2006-10-21 13:41:18 +00:00
|
|
|
|
|
|
|
cffISOAdobeStrings = [".notdef", "space", "exclam", "quotedbl", "numbersign",
|
|
|
|
"dollar", "percent", "ampersand", "quoteright", "parenleft", "parenright",
|
|
|
|
"asterisk", "plus", "comma", "hyphen", "period", "slash", "zero", "one", "two",
|
|
|
|
"three", "four", "five", "six", "seven", "eight", "nine", "colon", "semicolon",
|
|
|
|
"less", "equal", "greater", "question", "at", "A", "B", "C", "D", "E", "F", "G",
|
|
|
|
"H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W",
|
|
|
|
"X", "Y", "Z", "bracketleft", "backslash", "bracketright", "asciicircum",
|
|
|
|
"underscore", "quoteleft", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j",
|
|
|
|
"k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z",
|
|
|
|
"braceleft", "bar", "braceright", "asciitilde", "exclamdown", "cent",
|
|
|
|
"sterling", "fraction", "yen", "florin", "section", "currency", "quotesingle",
|
|
|
|
"quotedblleft", "guillemotleft", "guilsinglleft", "guilsinglright", "fi", "fl",
|
|
|
|
"endash", "dagger", "daggerdbl", "periodcentered", "paragraph", "bullet",
|
|
|
|
"quotesinglbase", "quotedblbase", "quotedblright", "guillemotright", "ellipsis",
|
|
|
|
"perthousand", "questiondown", "grave", "acute", "circumflex", "tilde",
|
|
|
|
"macron", "breve", "dotaccent", "dieresis", "ring", "cedilla", "hungarumlaut",
|
|
|
|
"ogonek", "caron", "emdash", "AE", "ordfeminine", "Lslash", "Oslash", "OE",
|
|
|
|
"ordmasculine", "ae", "dotlessi", "lslash", "oslash", "oe", "germandbls",
|
|
|
|
"onesuperior", "logicalnot", "mu", "trademark", "Eth", "onehalf", "plusminus",
|
|
|
|
"Thorn", "onequarter", "divide", "brokenbar", "degree", "thorn",
|
|
|
|
"threequarters", "twosuperior", "registered", "minus", "eth", "multiply",
|
|
|
|
"threesuperior", "copyright", "Aacute", "Acircumflex", "Adieresis", "Agrave",
|
|
|
|
"Aring", "Atilde", "Ccedilla", "Eacute", "Ecircumflex", "Edieresis", "Egrave",
|
|
|
|
"Iacute", "Icircumflex", "Idieresis", "Igrave", "Ntilde", "Oacute",
|
|
|
|
"Ocircumflex", "Odieresis", "Ograve", "Otilde", "Scaron", "Uacute",
|
|
|
|
"Ucircumflex", "Udieresis", "Ugrave", "Yacute", "Ydieresis", "Zcaron", "aacute",
|
|
|
|
"acircumflex", "adieresis", "agrave", "aring", "atilde", "ccedilla", "eacute",
|
|
|
|
"ecircumflex", "edieresis", "egrave", "iacute", "icircumflex", "idieresis",
|
|
|
|
"igrave", "ntilde", "oacute", "ocircumflex", "odieresis", "ograve", "otilde",
|
|
|
|
"scaron", "uacute", "ucircumflex", "udieresis", "ugrave", "yacute", "ydieresis",
|
|
|
|
"zcaron"]
|
|
|
|
|
|
|
|
cffISOAdobeStringCount = 229
|
|
|
|
assert len(cffISOAdobeStrings) == cffISOAdobeStringCount
|
|
|
|
|
|
|
|
cffIExpertStrings = [".notdef", "space", "exclamsmall", "Hungarumlautsmall",
|
|
|
|
"dollaroldstyle", "dollarsuperior", "ampersandsmall", "Acutesmall",
|
|
|
|
"parenleftsuperior", "parenrightsuperior", "twodotenleader", "onedotenleader",
|
|
|
|
"comma", "hyphen", "period", "fraction", "zerooldstyle", "oneoldstyle",
|
|
|
|
"twooldstyle", "threeoldstyle", "fouroldstyle", "fiveoldstyle", "sixoldstyle",
|
|
|
|
"sevenoldstyle", "eightoldstyle", "nineoldstyle", "colon", "semicolon",
|
|
|
|
"commasuperior", "threequartersemdash", "periodsuperior", "questionsmall",
|
|
|
|
"asuperior", "bsuperior", "centsuperior", "dsuperior", "esuperior", "isuperior",
|
|
|
|
"lsuperior", "msuperior", "nsuperior", "osuperior", "rsuperior", "ssuperior",
|
|
|
|
"tsuperior", "ff", "fi", "fl", "ffi", "ffl", "parenleftinferior",
|
|
|
|
"parenrightinferior", "Circumflexsmall", "hyphensuperior", "Gravesmall",
|
|
|
|
"Asmall", "Bsmall", "Csmall", "Dsmall", "Esmall", "Fsmall", "Gsmall", "Hsmall",
|
|
|
|
"Ismall", "Jsmall", "Ksmall", "Lsmall", "Msmall", "Nsmall", "Osmall", "Psmall",
|
|
|
|
"Qsmall", "Rsmall", "Ssmall", "Tsmall", "Usmall", "Vsmall", "Wsmall", "Xsmall",
|
|
|
|
"Ysmall", "Zsmall", "colonmonetary", "onefitted", "rupiah", "Tildesmall",
|
|
|
|
"exclamdownsmall", "centoldstyle", "Lslashsmall", "Scaronsmall", "Zcaronsmall",
|
|
|
|
"Dieresissmall", "Brevesmall", "Caronsmall", "Dotaccentsmall", "Macronsmall",
|
|
|
|
"figuredash", "hypheninferior", "Ogoneksmall", "Ringsmall", "Cedillasmall",
|
|
|
|
"onequarter", "onehalf", "threequarters", "questiondownsmall", "oneeighth",
|
|
|
|
"threeeighths", "fiveeighths", "seveneighths", "onethird", "twothirds",
|
|
|
|
"zerosuperior", "onesuperior", "twosuperior", "threesuperior", "foursuperior",
|
|
|
|
"fivesuperior", "sixsuperior", "sevensuperior", "eightsuperior", "ninesuperior",
|
|
|
|
"zeroinferior", "oneinferior", "twoinferior", "threeinferior", "fourinferior",
|
|
|
|
"fiveinferior", "sixinferior", "seveninferior", "eightinferior", "nineinferior",
|
|
|
|
"centinferior", "dollarinferior", "periodinferior", "commainferior",
|
|
|
|
"Agravesmall", "Aacutesmall", "Acircumflexsmall", "Atildesmall",
|
|
|
|
"Adieresissmall", "Aringsmall", "AEsmall", "Ccedillasmall", "Egravesmall",
|
|
|
|
"Eacutesmall", "Ecircumflexsmall", "Edieresissmall", "Igravesmall",
|
|
|
|
"Iacutesmall", "Icircumflexsmall", "Idieresissmall", "Ethsmall", "Ntildesmall",
|
|
|
|
"Ogravesmall", "Oacutesmall", "Ocircumflexsmall", "Otildesmall",
|
|
|
|
"Odieresissmall", "OEsmall", "Oslashsmall", "Ugravesmall", "Uacutesmall",
|
|
|
|
"Ucircumflexsmall", "Udieresissmall", "Yacutesmall", "Thornsmall",
|
|
|
|
"Ydieresissmall"]
|
|
|
|
|
|
|
|
cffExpertStringCount = 166
|
|
|
|
assert len(cffIExpertStrings) == cffExpertStringCount
|
|
|
|
|
|
|
|
cffExpertSubsetStrings = [".notdef", "space", "dollaroldstyle",
|
|
|
|
"dollarsuperior", "parenleftsuperior", "parenrightsuperior", "twodotenleader",
|
|
|
|
"onedotenleader", "comma", "hyphen", "period", "fraction", "zerooldstyle",
|
|
|
|
"oneoldstyle", "twooldstyle", "threeoldstyle", "fouroldstyle", "fiveoldstyle",
|
|
|
|
"sixoldstyle", "sevenoldstyle", "eightoldstyle", "nineoldstyle", "colon",
|
|
|
|
"semicolon", "commasuperior", "threequartersemdash", "periodsuperior",
|
|
|
|
"asuperior", "bsuperior", "centsuperior", "dsuperior", "esuperior", "isuperior",
|
|
|
|
"lsuperior", "msuperior", "nsuperior", "osuperior", "rsuperior", "ssuperior",
|
|
|
|
"tsuperior", "ff", "fi", "fl", "ffi", "ffl", "parenleftinferior",
|
|
|
|
"parenrightinferior", "hyphensuperior", "colonmonetary", "onefitted", "rupiah",
|
|
|
|
"centoldstyle", "figuredash", "hypheninferior", "onequarter", "onehalf",
|
|
|
|
"threequarters", "oneeighth", "threeeighths", "fiveeighths", "seveneighths",
|
|
|
|
"onethird", "twothirds", "zerosuperior", "onesuperior", "twosuperior",
|
|
|
|
"threesuperior", "foursuperior", "fivesuperior", "sixsuperior", "sevensuperior",
|
|
|
|
"eightsuperior", "ninesuperior", "zeroinferior", "oneinferior", "twoinferior",
|
|
|
|
"threeinferior", "fourinferior", "fiveinferior", "sixinferior", "seveninferior",
|
|
|
|
"eightinferior", "nineinferior", "centinferior", "dollarinferior",
|
|
|
|
"periodinferior", "commainferior"]
|
|
|
|
|
|
|
|
cffExpertSubsetStringCount = 87
|
|
|
|
assert len(cffExpertSubsetStrings) == cffExpertSubsetStringCount
|