2013-11-27 17:27:45 -05:00
|
|
|
from fontTools.misc.py23 import *
|
2013-11-27 02:34:11 -05:00
|
|
|
from .DefaultTable import DefaultTable
|
2016-04-07 09:42:13 +01:00
|
|
|
import sys
|
2015-10-17 06:47:52 +02:00
|
|
|
import array
|
2002-05-11 00:59:27 +00:00
|
|
|
import struct
|
2016-01-24 14:43:21 +00:00
|
|
|
import logging
|
|
|
|
|
|
|
|
log = logging.getLogger(__name__)
|
2002-05-11 00:59:27 +00:00
|
|
|
|
2013-11-28 14:26:58 -05:00
|
|
|
class OverflowErrorRecord(object):
|
2006-10-21 14:12:38 +00:00
|
|
|
def __init__(self, overflowTuple):
|
|
|
|
self.tableType = overflowTuple[0]
|
|
|
|
self.LookupListIndex = overflowTuple[1]
|
|
|
|
self.SubTableIndex = overflowTuple[2]
|
|
|
|
self.itemName = overflowTuple[3]
|
|
|
|
self.itemIndex = overflowTuple[4]
|
|
|
|
|
|
|
|
def __repr__(self):
|
|
|
|
return str((self.tableType, "LookupIndex:", self.LookupListIndex, "SubTableIndex:", self.SubTableIndex, "ItemName:", self.itemName, "ItemIndex:", self.itemIndex))
|
|
|
|
|
|
|
|
class OTLOffsetOverflowError(Exception):
|
|
|
|
def __init__(self, overflowErrorRecord):
|
|
|
|
self.value = overflowErrorRecord
|
|
|
|
|
|
|
|
def __str__(self):
|
|
|
|
return repr(self.value)
|
|
|
|
|
2002-05-11 00:59:27 +00:00
|
|
|
|
|
|
|
class BaseTTXConverter(DefaultTable):
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2003-09-02 19:23:13 +00:00
|
|
|
"""Generic base class for TTX table converters. It functions as an
|
|
|
|
adapter between the TTX (ttLib actually) table model and the model
|
|
|
|
we use for OpenType tables, which is necessarily subtly different.
|
|
|
|
"""
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-11 00:59:27 +00:00
|
|
|
def decompile(self, data, font):
|
2013-11-27 02:34:11 -05:00
|
|
|
from . import otTables
|
2016-12-28 20:54:57 -05:00
|
|
|
reader = OTTableReader(data, tableTag=self.tableTag)
|
2002-05-11 00:59:27 +00:00
|
|
|
tableClass = getattr(otTables, self.tableTag)
|
|
|
|
self.table = tableClass()
|
|
|
|
self.table.decompile(reader, font)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-11 00:59:27 +00:00
|
|
|
def compile(self, font):
|
2018-02-20 20:58:18 -08:00
|
|
|
""" Create a top-level OTTableWriter for the GPOS/GSUB table.
|
2006-10-21 14:12:38 +00:00
|
|
|
Call the compile method for the the table
|
2008-03-09 20:13:16 +00:00
|
|
|
for each 'converter' record in the table converter list
|
2015-04-26 02:01:01 -04:00
|
|
|
call converter's write method for each item in the value.
|
2006-10-21 14:12:38 +00:00
|
|
|
- For simple items, the write method adds a string to the
|
2015-04-26 02:01:01 -04:00
|
|
|
writer's self.items list.
|
|
|
|
- For Struct/Table/Subtable items, it add first adds new writer to the
|
2006-10-21 14:12:38 +00:00
|
|
|
to the writer's self.items, then calls the item's compile method.
|
|
|
|
This creates a tree of writers, rooted at the GUSB/GPOS writer, with
|
|
|
|
each writer representing a table, and the writer.items list containing
|
|
|
|
the child data strings and writers.
|
2008-03-09 20:13:16 +00:00
|
|
|
call the getAllData method
|
2006-10-21 14:12:38 +00:00
|
|
|
call _doneWriting, which removes duplicates
|
2008-03-09 20:13:16 +00:00
|
|
|
call _gatherTables. This traverses the tables, adding unique occurences to a flat list of tables
|
|
|
|
Traverse the flat list of tables, calling getDataLength on each to update their position
|
|
|
|
Traverse the flat list of tables again, calling getData each get the data in the table, now that
|
|
|
|
pos's and offset are known.
|
2006-10-21 14:12:38 +00:00
|
|
|
|
2015-04-26 02:01:01 -04:00
|
|
|
If a lookup subtable overflows an offset, we have to start all over.
|
2006-10-21 14:12:38 +00:00
|
|
|
"""
|
2013-12-20 21:52:28 -05:00
|
|
|
overflowRecord = None
|
|
|
|
|
|
|
|
while True:
|
|
|
|
try:
|
2016-12-28 20:54:57 -05:00
|
|
|
writer = OTTableWriter(tableTag=self.tableTag)
|
2013-12-20 21:52:28 -05:00
|
|
|
self.table.compile(writer, font)
|
|
|
|
return writer.getAllData()
|
|
|
|
|
|
|
|
except OTLOffsetOverflowError as e:
|
|
|
|
|
|
|
|
if overflowRecord == e.value:
|
|
|
|
raise # Oh well...
|
|
|
|
|
|
|
|
overflowRecord = e.value
|
2016-02-02 09:48:26 -05:00
|
|
|
log.info("Attempting to fix OTLOffsetOverflowError %s", e)
|
2013-12-20 21:52:28 -05:00
|
|
|
lastItem = overflowRecord
|
|
|
|
|
|
|
|
ok = 0
|
|
|
|
if overflowRecord.itemName is None:
|
|
|
|
from .otTables import fixLookupOverFlows
|
|
|
|
ok = fixLookupOverFlows(font, overflowRecord)
|
|
|
|
else:
|
|
|
|
from .otTables import fixSubTableOverFlows
|
|
|
|
ok = fixSubTableOverFlows(font, overflowRecord)
|
|
|
|
if not ok:
|
2018-07-25 11:04:48 -07:00
|
|
|
# Try upgrading lookup to Extension and hope
|
|
|
|
# that cross-lookup sharing not happening would
|
|
|
|
# fix overflow...
|
|
|
|
from .otTables import fixLookupOverFlows
|
|
|
|
ok = fixLookupOverFlows(font, overflowRecord)
|
|
|
|
if not ok:
|
|
|
|
raise
|
2006-10-21 14:12:38 +00:00
|
|
|
|
2002-05-11 00:59:27 +00:00
|
|
|
def toXML(self, writer, font):
|
|
|
|
self.table.toXML2(writer, font)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2013-11-27 03:19:32 -05:00
|
|
|
def fromXML(self, name, attrs, content, font):
|
2013-11-27 02:34:11 -05:00
|
|
|
from . import otTables
|
2002-05-11 00:59:27 +00:00
|
|
|
if not hasattr(self, "table"):
|
|
|
|
tableClass = getattr(otTables, self.tableTag)
|
|
|
|
self.table = tableClass()
|
2013-11-27 03:19:32 -05:00
|
|
|
self.table.fromXML(name, attrs, content, font)
|
2019-04-20 17:44:47 +01:00
|
|
|
self.table.populateDefaults()
|
2002-05-11 00:59:27 +00:00
|
|
|
|
|
|
|
|
2013-11-22 19:23:35 -05:00
|
|
|
class OTTableReader(object):
|
2013-11-24 16:59:42 -05:00
|
|
|
|
2002-05-11 10:21:36 +00:00
|
|
|
"""Helper class to retrieve data from an OpenType table."""
|
2013-11-22 19:23:35 -05:00
|
|
|
|
2016-12-28 20:54:57 -05:00
|
|
|
__slots__ = ('data', 'offset', 'pos', 'localState', 'tableTag')
|
2013-11-24 16:59:42 -05:00
|
|
|
|
2016-12-28 20:54:57 -05:00
|
|
|
def __init__(self, data, localState=None, offset=0, tableTag=None):
|
2002-05-11 00:59:27 +00:00
|
|
|
self.data = data
|
|
|
|
self.offset = offset
|
|
|
|
self.pos = offset
|
2013-11-26 17:07:37 -05:00
|
|
|
self.localState = localState
|
2016-12-28 20:54:57 -05:00
|
|
|
self.tableTag = tableTag
|
2013-11-24 16:59:42 -05:00
|
|
|
|
2015-07-02 18:00:41 -07:00
|
|
|
def advance(self, count):
|
|
|
|
self.pos += count
|
2015-10-16 00:05:52 +02:00
|
|
|
|
2015-07-02 18:00:41 -07:00
|
|
|
def seek(self, pos):
|
|
|
|
self.pos = pos
|
|
|
|
|
|
|
|
def copy(self):
|
2016-12-28 20:54:57 -05:00
|
|
|
other = self.__class__(self.data, self.localState, self.offset, self.tableTag)
|
2015-07-02 18:00:41 -07:00
|
|
|
other.pos = self.pos
|
|
|
|
return other
|
|
|
|
|
2013-11-24 18:04:29 -05:00
|
|
|
def getSubReader(self, offset):
|
2002-05-11 00:59:27 +00:00
|
|
|
offset = self.offset + offset
|
2016-12-28 20:54:57 -05:00
|
|
|
return self.__class__(self.data, self.localState, offset, self.tableTag)
|
2013-11-24 16:59:42 -05:00
|
|
|
|
2020-07-01 19:50:48 +01:00
|
|
|
def readValue(self, typecode, staticSize):
|
2002-05-11 00:59:27 +00:00
|
|
|
pos = self.pos
|
2020-07-01 19:50:48 +01:00
|
|
|
newpos = pos + staticSize
|
|
|
|
value, = struct.unpack(f">{typecode}", self.data[pos:newpos])
|
2002-05-11 00:59:27 +00:00
|
|
|
self.pos = newpos
|
|
|
|
return value
|
2013-11-24 16:59:42 -05:00
|
|
|
|
2020-07-01 19:50:48 +01:00
|
|
|
def readUShort(self):
|
|
|
|
return self.readValue("H", staticSize=2)
|
|
|
|
|
|
|
|
def readArray(self, typecode, staticSize, count):
|
2015-10-17 06:47:52 +02:00
|
|
|
pos = self.pos
|
2020-07-01 19:50:48 +01:00
|
|
|
newpos = pos + count * staticSize
|
|
|
|
value = array.array(typecode, self.data[pos:newpos])
|
2018-09-05 21:11:36 -07:00
|
|
|
if sys.byteorder != "big": value.byteswap()
|
2015-10-17 06:47:52 +02:00
|
|
|
self.pos = newpos
|
|
|
|
return value
|
|
|
|
|
2020-07-01 19:50:48 +01:00
|
|
|
def readUShortArray(self, count):
|
|
|
|
return self.readArray("H", staticSize=2, count=count)
|
|
|
|
|
2016-06-06 22:01:09 -07:00
|
|
|
def readInt8(self):
|
2020-07-01 19:50:48 +01:00
|
|
|
return self.readValue("b", staticSize=1)
|
2016-06-06 22:01:09 -07:00
|
|
|
|
2002-05-11 00:59:27 +00:00
|
|
|
def readShort(self):
|
2020-07-01 19:50:48 +01:00
|
|
|
return self.readValue("h", staticSize=2)
|
2013-11-24 16:59:42 -05:00
|
|
|
|
2002-05-11 00:59:27 +00:00
|
|
|
def readLong(self):
|
2020-07-01 19:50:48 +01:00
|
|
|
return self.readValue("l", staticSize=4)
|
2013-11-24 16:59:42 -05:00
|
|
|
|
2015-10-16 00:17:22 +02:00
|
|
|
def readUInt8(self):
|
2020-07-01 19:50:48 +01:00
|
|
|
return self.readValue("B", staticSize=1)
|
2015-10-16 00:17:22 +02:00
|
|
|
|
2013-11-26 19:23:08 -05:00
|
|
|
def readUInt24(self):
|
|
|
|
pos = self.pos
|
|
|
|
newpos = pos + 3
|
2013-11-28 06:38:07 -05:00
|
|
|
value, = struct.unpack(">l", b'\0'+self.data[pos:newpos])
|
2013-11-26 19:23:08 -05:00
|
|
|
self.pos = newpos
|
|
|
|
return value
|
|
|
|
|
2006-10-21 14:12:38 +00:00
|
|
|
def readULong(self):
|
2020-07-01 19:50:48 +01:00
|
|
|
return self.readValue("L", staticSize=4)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-11 00:59:27 +00:00
|
|
|
def readTag(self):
|
|
|
|
pos = self.pos
|
|
|
|
newpos = pos + 4
|
2013-11-27 18:16:43 -05:00
|
|
|
value = Tag(self.data[pos:newpos])
|
2015-10-15 19:55:54 -03:00
|
|
|
assert len(value) == 4, value
|
2002-05-11 00:59:27 +00:00
|
|
|
self.pos = newpos
|
|
|
|
return value
|
2013-11-24 16:59:42 -05:00
|
|
|
|
2015-07-03 01:12:16 -07:00
|
|
|
def readData(self, count):
|
|
|
|
pos = self.pos
|
|
|
|
newpos = pos + count
|
|
|
|
value = self.data[pos:newpos]
|
|
|
|
self.pos = newpos
|
|
|
|
return value
|
|
|
|
|
2013-11-26 17:07:37 -05:00
|
|
|
def __setitem__(self, name, value):
|
|
|
|
state = self.localState.copy() if self.localState else dict()
|
|
|
|
state[name] = value
|
|
|
|
self.localState = state
|
2013-11-24 16:59:42 -05:00
|
|
|
|
2013-11-26 17:07:37 -05:00
|
|
|
def __getitem__(self, name):
|
2015-07-02 18:00:41 -07:00
|
|
|
return self.localState and self.localState[name]
|
|
|
|
|
|
|
|
def __contains__(self, name):
|
|
|
|
return self.localState and name in self.localState
|
2013-11-24 16:59:42 -05:00
|
|
|
|
2002-05-11 00:59:27 +00:00
|
|
|
|
2013-11-22 19:23:35 -05:00
|
|
|
class OTTableWriter(object):
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-11 10:21:36 +00:00
|
|
|
"""Helper class to gather and assemble data for OpenType tables."""
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2020-10-06 16:30:48 +01:00
|
|
|
def __init__(self, localState=None, tableTag=None, offsetSize=2):
|
2002-05-11 00:59:27 +00:00
|
|
|
self.items = []
|
2002-07-22 22:13:57 +00:00
|
|
|
self.pos = None
|
2013-11-26 17:07:37 -05:00
|
|
|
self.localState = localState
|
2016-12-28 20:54:57 -05:00
|
|
|
self.tableTag = tableTag
|
2020-10-06 16:30:48 +01:00
|
|
|
self.offsetSize = offsetSize
|
2013-12-20 21:52:28 -05:00
|
|
|
self.parent = None
|
2013-11-24 16:59:42 -05:00
|
|
|
|
2020-10-06 16:30:48 +01:00
|
|
|
# DEPRECATED: 'longOffset' is kept as a property for backward compat with old code.
|
|
|
|
# You should use 'offsetSize' instead (2, 3 or 4 bytes).
|
|
|
|
@property
|
|
|
|
def longOffset(self):
|
|
|
|
return self.offsetSize == 4
|
|
|
|
|
|
|
|
@longOffset.setter
|
|
|
|
def longOffset(self, value):
|
|
|
|
self.offsetSize = 4 if value else 2
|
|
|
|
|
2013-11-26 17:07:37 -05:00
|
|
|
def __setitem__(self, name, value):
|
|
|
|
state = self.localState.copy() if self.localState else dict()
|
|
|
|
state[name] = value
|
|
|
|
self.localState = state
|
2013-11-24 16:59:42 -05:00
|
|
|
|
2013-11-26 17:07:37 -05:00
|
|
|
def __getitem__(self, name):
|
|
|
|
return self.localState[name]
|
2013-11-24 16:59:42 -05:00
|
|
|
|
2017-06-07 15:22:46 +02:00
|
|
|
def __delitem__(self, name):
|
|
|
|
del self.localState[name]
|
|
|
|
|
2002-07-23 08:43:03 +00:00
|
|
|
# assembler interface
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-07-23 08:43:03 +00:00
|
|
|
def getDataLength(self):
|
|
|
|
"""Return the length of this table in bytes, without subtables."""
|
|
|
|
l = 0
|
|
|
|
for item in self.items:
|
2017-06-07 15:03:23 +02:00
|
|
|
if hasattr(item, "getCountData"):
|
|
|
|
l += item.size
|
|
|
|
elif hasattr(item, "getData"):
|
2020-10-06 16:30:48 +01:00
|
|
|
l += item.offsetSize
|
2002-07-23 08:43:03 +00:00
|
|
|
else:
|
|
|
|
l = l + len(item)
|
|
|
|
return l
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-07-23 08:43:03 +00:00
|
|
|
def getData(self):
|
|
|
|
"""Assemble the data for this writer/table, without subtables."""
|
|
|
|
items = list(self.items) # make a shallow copy
|
2006-10-21 14:12:38 +00:00
|
|
|
pos = self.pos
|
|
|
|
numItems = len(items)
|
|
|
|
for i in range(numItems):
|
2002-07-23 08:43:03 +00:00
|
|
|
item = items[i]
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-07-23 08:43:03 +00:00
|
|
|
if hasattr(item, "getData"):
|
2020-10-06 16:30:48 +01:00
|
|
|
if item.offsetSize == 4:
|
2006-10-21 14:12:38 +00:00
|
|
|
items[i] = packULong(item.pos - pos)
|
2020-10-06 16:30:48 +01:00
|
|
|
elif item.offsetSize == 2:
|
2006-10-21 14:12:38 +00:00
|
|
|
try:
|
|
|
|
items[i] = packUShort(item.pos - pos)
|
2013-12-17 05:46:51 -05:00
|
|
|
except struct.error:
|
2006-10-21 14:12:38 +00:00
|
|
|
# provide data to fix overflow problem.
|
|
|
|
overflowErrorRecord = self.getOverflowErrorRecord(item)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2013-11-27 02:42:28 -05:00
|
|
|
raise OTLOffsetOverflowError(overflowErrorRecord)
|
2020-10-06 16:30:48 +01:00
|
|
|
elif item.offsetSize == 3:
|
|
|
|
items[i] = packUInt24(item.pos - pos)
|
|
|
|
else:
|
|
|
|
raise ValueError(item.offsetSize)
|
2006-10-21 14:12:38 +00:00
|
|
|
|
2013-11-27 21:09:03 -05:00
|
|
|
return bytesjoin(items)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-07-22 22:13:57 +00:00
|
|
|
def __hash__(self):
|
|
|
|
# only works after self._doneWriting() has been called
|
|
|
|
return hash(self.items)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2013-12-06 22:25:48 -05:00
|
|
|
def __ne__(self, other):
|
2016-03-11 14:56:11 +00:00
|
|
|
result = self.__eq__(other)
|
|
|
|
return result if result is NotImplemented else not result
|
|
|
|
|
2013-11-27 18:58:45 -05:00
|
|
|
def __eq__(self, other):
|
|
|
|
if type(self) != type(other):
|
2013-12-07 03:40:44 -05:00
|
|
|
return NotImplemented
|
2020-10-06 16:30:48 +01:00
|
|
|
return self.offsetSize == other.offsetSize and self.items == other.items
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2016-03-12 20:18:59 -08:00
|
|
|
def _doneWriting(self, internedTables):
|
2006-10-21 14:12:38 +00:00
|
|
|
# Convert CountData references to data string items
|
|
|
|
# collapse duplicate table references to a unique entry
|
|
|
|
# "tables" are OTTableWriter objects.
|
|
|
|
|
2016-02-10 17:26:53 +07:00
|
|
|
# For Extension Lookup types, we can
|
|
|
|
# eliminate duplicates only within the tree under the Extension Lookup,
|
|
|
|
# as offsets may exceed 64K even between Extension LookupTable subtables.
|
2016-03-12 20:28:42 -08:00
|
|
|
isExtension = hasattr(self, "Extension")
|
2016-02-10 17:03:46 +07:00
|
|
|
|
2016-02-10 18:00:48 +07:00
|
|
|
# Certain versions of Uniscribe reject the font if the GSUB/GPOS top-level
|
|
|
|
# arrays (ScriptList, FeatureList, LookupList) point to the same, possibly
|
|
|
|
# empty, array. So, we don't share those.
|
2019-03-06 16:01:28 +01:00
|
|
|
# See: https://github.com/fonttools/fonttools/issues/518
|
2016-02-10 18:00:48 +07:00
|
|
|
dontShare = hasattr(self, 'DontShare')
|
|
|
|
|
2016-03-12 20:28:42 -08:00
|
|
|
if isExtension:
|
|
|
|
internedTables = {}
|
|
|
|
|
2016-02-10 17:26:53 +07:00
|
|
|
items = self.items
|
2016-02-10 17:03:46 +07:00
|
|
|
for i in range(len(items)):
|
2002-05-11 00:59:27 +00:00
|
|
|
item = items[i]
|
2002-07-22 22:13:57 +00:00
|
|
|
if hasattr(item, "getCountData"):
|
|
|
|
items[i] = item.getCountData()
|
|
|
|
elif hasattr(item, "getData"):
|
2016-03-12 20:28:42 -08:00
|
|
|
item._doneWriting(internedTables)
|
|
|
|
if not dontShare:
|
|
|
|
items[i] = item = internedTables.setdefault(item, item)
|
2002-07-22 22:13:57 +00:00
|
|
|
self.items = tuple(items)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2016-03-12 20:18:59 -08:00
|
|
|
def _gatherTables(self, tables, extTables, done):
|
2006-10-21 14:12:38 +00:00
|
|
|
# Convert table references in self.items tree to a flat
|
|
|
|
# list of tables in depth-first traversal order.
|
|
|
|
# "tables" are OTTableWriter objects.
|
2015-04-26 02:01:01 -04:00
|
|
|
# We do the traversal in reverse order at each level, in order to
|
2006-10-21 14:12:38 +00:00
|
|
|
# resolve duplicate references to be the last reference in the list of tables.
|
|
|
|
# For extension lookups, duplicate references can be merged only within the
|
|
|
|
# writer tree under the extension lookup.
|
|
|
|
|
2016-03-12 20:41:34 -08:00
|
|
|
done[id(self)] = True
|
2006-10-21 14:12:38 +00:00
|
|
|
|
|
|
|
numItems = len(self.items)
|
2013-11-27 03:34:48 -05:00
|
|
|
iRange = list(range(numItems))
|
2006-10-21 14:12:38 +00:00
|
|
|
iRange.reverse()
|
|
|
|
|
2016-03-12 20:28:42 -08:00
|
|
|
isExtension = hasattr(self, "Extension")
|
|
|
|
|
|
|
|
selfTables = tables
|
|
|
|
|
|
|
|
if isExtension:
|
|
|
|
assert extTables is not None, "Program or XML editing error. Extension subtables cannot contain extensions subtables"
|
|
|
|
tables, extTables, done = extTables, None, {}
|
2006-10-21 14:12:38 +00:00
|
|
|
|
|
|
|
# add Coverage table if it is sorted last.
|
|
|
|
sortCoverageLast = 0
|
|
|
|
if hasattr(self, "sortCoverageLast"):
|
|
|
|
# Find coverage table
|
|
|
|
for i in range(numItems):
|
|
|
|
item = self.items[i]
|
|
|
|
if hasattr(item, "name") and (item.name == "Coverage"):
|
|
|
|
sortCoverageLast = 1
|
|
|
|
break
|
2016-03-12 20:41:34 -08:00
|
|
|
if id(item) not in done:
|
2006-10-21 14:12:38 +00:00
|
|
|
item._gatherTables(tables, extTables, done)
|
|
|
|
else:
|
2013-12-17 06:31:37 -05:00
|
|
|
# We're a new parent of item
|
|
|
|
pass
|
2006-10-21 14:12:38 +00:00
|
|
|
|
|
|
|
for i in iRange:
|
|
|
|
item = self.items[i]
|
2002-07-22 22:13:57 +00:00
|
|
|
if not hasattr(item, "getData"):
|
|
|
|
continue
|
2006-10-21 14:12:38 +00:00
|
|
|
|
|
|
|
if sortCoverageLast and (i==1) and item.name == 'Coverage':
|
|
|
|
# we've already 'gathered' it above
|
|
|
|
continue
|
|
|
|
|
2016-03-12 20:41:34 -08:00
|
|
|
if id(item) not in done:
|
2006-10-21 14:12:38 +00:00
|
|
|
item._gatherTables(tables, extTables, done)
|
|
|
|
else:
|
2016-03-12 20:28:42 -08:00
|
|
|
# Item is already written out by other parent
|
2013-12-17 06:31:37 -05:00
|
|
|
pass
|
2006-10-21 14:12:38 +00:00
|
|
|
|
2016-03-12 20:28:42 -08:00
|
|
|
selfTables.append(self)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2016-03-12 20:15:51 -08:00
|
|
|
def getAllData(self):
|
|
|
|
"""Assemble all data, including all subtables."""
|
2016-03-12 20:18:59 -08:00
|
|
|
internedTables = {}
|
|
|
|
self._doneWriting(internedTables)
|
|
|
|
tables = []
|
|
|
|
extTables = []
|
|
|
|
done = {}
|
|
|
|
self._gatherTables(tables, extTables, done)
|
2016-03-12 20:15:51 -08:00
|
|
|
tables.reverse()
|
|
|
|
extTables.reverse()
|
|
|
|
# Gather all data in two passes: the absolute positions of all
|
|
|
|
# subtable are needed before the actual data can be assembled.
|
|
|
|
pos = 0
|
|
|
|
for table in tables:
|
|
|
|
table.pos = pos
|
|
|
|
pos = pos + table.getDataLength()
|
|
|
|
|
|
|
|
for table in extTables:
|
|
|
|
table.pos = pos
|
|
|
|
pos = pos + table.getDataLength()
|
|
|
|
|
|
|
|
data = []
|
|
|
|
for table in tables:
|
|
|
|
tableData = table.getData()
|
|
|
|
data.append(tableData)
|
|
|
|
|
|
|
|
for table in extTables:
|
|
|
|
tableData = table.getData()
|
|
|
|
data.append(tableData)
|
|
|
|
|
|
|
|
return bytesjoin(data)
|
|
|
|
|
2002-07-23 08:43:03 +00:00
|
|
|
# interface for gathering data, as used by table.compile()
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2020-10-06 16:30:48 +01:00
|
|
|
def getSubWriter(self, offsetSize=2):
|
|
|
|
subwriter = self.__class__(self.localState, self.tableTag, offsetSize=offsetSize)
|
2013-12-17 06:31:37 -05:00
|
|
|
subwriter.parent = self # because some subtables have idential values, we discard
|
|
|
|
# the duplicates under the getAllData method. Hence some
|
|
|
|
# subtable writers can have more than one parent writer.
|
|
|
|
# But we just care about first one right now.
|
2006-10-21 14:12:38 +00:00
|
|
|
return subwriter
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2020-07-01 19:50:48 +01:00
|
|
|
def writeValue(self, typecode, value):
|
|
|
|
self.items.append(struct.pack(f">{typecode}", value))
|
|
|
|
|
2002-05-11 00:59:27 +00:00
|
|
|
def writeUShort(self, value):
|
2015-10-15 19:55:54 -03:00
|
|
|
assert 0 <= value < 0x10000, value
|
2002-05-11 00:59:27 +00:00
|
|
|
self.items.append(struct.pack(">H", value))
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-11 00:59:27 +00:00
|
|
|
def writeShort(self, value):
|
2016-08-10 03:35:42 -07:00
|
|
|
assert -32768 <= value < 32768, value
|
2002-05-11 00:59:27 +00:00
|
|
|
self.items.append(struct.pack(">h", value))
|
2013-11-26 19:23:08 -05:00
|
|
|
|
2015-10-16 00:17:22 +02:00
|
|
|
def writeUInt8(self, value):
|
2016-08-10 03:35:42 -07:00
|
|
|
assert 0 <= value < 256, value
|
2015-10-16 00:17:22 +02:00
|
|
|
self.items.append(struct.pack(">B", value))
|
|
|
|
|
2016-06-06 22:01:09 -07:00
|
|
|
def writeInt8(self, value):
|
2016-08-10 03:35:42 -07:00
|
|
|
assert -128 <= value < 128, value
|
2016-06-06 22:01:09 -07:00
|
|
|
self.items.append(struct.pack(">b", value))
|
|
|
|
|
2013-11-26 19:23:08 -05:00
|
|
|
def writeUInt24(self, value):
|
2015-10-15 19:55:54 -03:00
|
|
|
assert 0 <= value < 0x1000000, value
|
2013-11-28 06:38:07 -05:00
|
|
|
b = struct.pack(">L", value)
|
|
|
|
self.items.append(b[1:])
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-11 00:59:27 +00:00
|
|
|
def writeLong(self, value):
|
|
|
|
self.items.append(struct.pack(">l", value))
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2006-10-21 14:12:38 +00:00
|
|
|
def writeULong(self, value):
|
|
|
|
self.items.append(struct.pack(">L", value))
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-11 00:59:27 +00:00
|
|
|
def writeTag(self, tag):
|
2013-11-27 18:16:43 -05:00
|
|
|
tag = Tag(tag).tobytes()
|
2015-10-15 19:55:54 -03:00
|
|
|
assert len(tag) == 4, tag
|
2002-05-11 00:59:27 +00:00
|
|
|
self.items.append(tag)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-11 00:59:27 +00:00
|
|
|
def writeSubTable(self, subWriter):
|
|
|
|
self.items.append(subWriter)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2016-12-28 16:45:24 -05:00
|
|
|
def writeCountReference(self, table, name, size=2, value=None):
|
|
|
|
ref = CountReference(table, name, size=size, value=value)
|
2013-11-24 16:59:42 -05:00
|
|
|
self.items.append(ref)
|
2013-11-24 17:34:43 -05:00
|
|
|
return ref
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-11 00:59:27 +00:00
|
|
|
def writeStruct(self, format, values):
|
2013-11-27 02:18:18 -05:00
|
|
|
data = struct.pack(*(format,) + values)
|
2002-05-11 00:59:27 +00:00
|
|
|
self.items.append(data)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2006-10-21 14:12:38 +00:00
|
|
|
def writeData(self, data):
|
|
|
|
self.items.append(data)
|
2002-05-11 00:59:27 +00:00
|
|
|
|
2016-03-04 06:06:33 -08:00
|
|
|
def getOverflowErrorRecord(self, item):
|
2006-10-21 14:12:38 +00:00
|
|
|
LookupListIndex = SubTableIndex = itemName = itemIndex = None
|
|
|
|
if self.name == 'LookupList':
|
|
|
|
LookupListIndex = item.repeatIndex
|
|
|
|
elif self.name == 'Lookup':
|
|
|
|
LookupListIndex = self.repeatIndex
|
|
|
|
SubTableIndex = item.repeatIndex
|
|
|
|
else:
|
2016-03-04 06:06:33 -08:00
|
|
|
itemName = getattr(item, 'name', '<none>')
|
2006-10-21 14:12:38 +00:00
|
|
|
if hasattr(item, 'repeatIndex'):
|
|
|
|
itemIndex = item.repeatIndex
|
|
|
|
if self.name == 'SubTable':
|
2013-12-17 06:31:37 -05:00
|
|
|
LookupListIndex = self.parent.repeatIndex
|
2006-10-21 14:12:38 +00:00
|
|
|
SubTableIndex = self.repeatIndex
|
|
|
|
elif self.name == 'ExtSubTable':
|
2013-12-17 06:31:37 -05:00
|
|
|
LookupListIndex = self.parent.parent.repeatIndex
|
|
|
|
SubTableIndex = self.parent.repeatIndex
|
2006-10-21 14:12:38 +00:00
|
|
|
else: # who knows how far below the SubTable level we are! Climb back up to the nearest subtable.
|
2016-03-04 06:06:33 -08:00
|
|
|
itemName = ".".join([self.name, itemName])
|
2013-12-17 06:31:37 -05:00
|
|
|
p1 = self.parent
|
2006-10-21 14:12:38 +00:00
|
|
|
while p1 and p1.name not in ['ExtSubTable', 'SubTable']:
|
2016-03-04 06:06:33 -08:00
|
|
|
itemName = ".".join([p1.name, itemName])
|
2013-12-17 06:31:37 -05:00
|
|
|
p1 = p1.parent
|
2006-10-21 14:12:38 +00:00
|
|
|
if p1:
|
|
|
|
if p1.name == 'ExtSubTable':
|
2013-12-17 06:31:37 -05:00
|
|
|
LookupListIndex = p1.parent.parent.repeatIndex
|
|
|
|
SubTableIndex = p1.parent.repeatIndex
|
2006-10-21 14:12:38 +00:00
|
|
|
else:
|
2013-12-17 06:31:37 -05:00
|
|
|
LookupListIndex = p1.parent.repeatIndex
|
2013-12-17 05:46:51 -05:00
|
|
|
SubTableIndex = p1.repeatIndex
|
2006-10-21 14:12:38 +00:00
|
|
|
|
2016-12-28 20:54:57 -05:00
|
|
|
return OverflowErrorRecord( (self.tableTag, LookupListIndex, SubTableIndex, itemName, itemIndex) )
|
2006-10-21 14:12:38 +00:00
|
|
|
|
2002-05-11 00:59:27 +00:00
|
|
|
|
2013-11-28 14:26:58 -05:00
|
|
|
class CountReference(object):
|
2002-07-22 22:13:57 +00:00
|
|
|
"""A reference to a Count value, not a count of references."""
|
2016-12-28 16:45:24 -05:00
|
|
|
def __init__(self, table, name, size=None, value=None):
|
2002-05-11 00:59:27 +00:00
|
|
|
self.table = table
|
|
|
|
self.name = name
|
2016-09-04 20:00:21 -07:00
|
|
|
self.size = size
|
2016-12-28 16:45:24 -05:00
|
|
|
if value is not None:
|
|
|
|
self.setValue(value)
|
2013-11-24 16:59:42 -05:00
|
|
|
def setValue(self, value):
|
|
|
|
table = self.table
|
|
|
|
name = self.name
|
|
|
|
if table[name] is None:
|
|
|
|
table[name] = value
|
|
|
|
else:
|
2013-11-26 18:41:53 -05:00
|
|
|
assert table[name] == value, (name, table[name], value)
|
2019-10-29 12:52:42 +00:00
|
|
|
def getValue(self):
|
|
|
|
return self.table[self.name]
|
2002-07-22 22:13:57 +00:00
|
|
|
def getCountData(self):
|
2015-12-08 20:41:54 +01:00
|
|
|
v = self.table[self.name]
|
|
|
|
if v is None: v = 0
|
2017-07-10 12:16:42 +01:00
|
|
|
return {1:packUInt8, 2:packUShort, 4:packULong}[self.size](v)
|
2002-05-11 00:59:27 +00:00
|
|
|
|
|
|
|
|
2017-07-10 12:16:42 +01:00
|
|
|
def packUInt8 (value):
|
|
|
|
return struct.pack(">B", value)
|
|
|
|
|
2002-05-11 10:21:36 +00:00
|
|
|
def packUShort(value):
|
|
|
|
return struct.pack(">H", value)
|
2002-05-11 00:59:27 +00:00
|
|
|
|
2006-10-21 14:12:38 +00:00
|
|
|
def packULong(value):
|
2008-03-09 20:48:45 +00:00
|
|
|
assert 0 <= value < 0x100000000, value
|
2006-10-21 14:12:38 +00:00
|
|
|
return struct.pack(">L", value)
|
|
|
|
|
2020-10-06 16:30:48 +01:00
|
|
|
def packUInt24(value):
|
|
|
|
assert 0 <= value < 0x1000000, value
|
|
|
|
return struct.pack(">L", value)[1:]
|
|
|
|
|
2006-10-21 14:12:38 +00:00
|
|
|
|
2013-11-19 17:20:54 -05:00
|
|
|
class BaseTable(object):
|
2013-12-17 00:44:33 -05:00
|
|
|
|
2015-07-01 21:59:37 -07:00
|
|
|
"""Generic base class for all OpenType (sub)tables."""
|
|
|
|
|
2006-10-21 14:12:38 +00:00
|
|
|
def __getattr__(self, attr):
|
2013-12-17 05:59:05 -05:00
|
|
|
reader = self.__dict__.get("reader")
|
2013-12-17 00:58:02 -05:00
|
|
|
if reader:
|
|
|
|
del self.reader
|
|
|
|
font = self.font
|
|
|
|
del self.font
|
|
|
|
self.decompile(reader, font)
|
|
|
|
return getattr(self, attr)
|
|
|
|
|
|
|
|
raise AttributeError(attr)
|
2006-10-21 14:12:38 +00:00
|
|
|
|
2015-07-01 21:59:37 -07:00
|
|
|
def ensureDecompiled(self):
|
|
|
|
reader = self.__dict__.get("reader")
|
|
|
|
if reader:
|
|
|
|
del self.reader
|
|
|
|
font = self.font
|
|
|
|
del self.font
|
|
|
|
self.decompile(reader, font)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2015-07-02 18:00:41 -07:00
|
|
|
@classmethod
|
|
|
|
def getRecordSize(cls, reader):
|
|
|
|
totalSize = 0
|
|
|
|
for conv in cls.converters:
|
|
|
|
size = conv.getRecordSize(reader)
|
|
|
|
if size is NotImplemented: return NotImplemented
|
|
|
|
countValue = 1
|
|
|
|
if conv.repeat:
|
|
|
|
if conv.repeat in reader:
|
|
|
|
countValue = reader[conv.repeat]
|
|
|
|
else:
|
|
|
|
return NotImplemented
|
|
|
|
totalSize += size * countValue
|
|
|
|
return totalSize
|
|
|
|
|
2002-05-11 00:59:27 +00:00
|
|
|
def getConverters(self):
|
|
|
|
return self.converters
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-11 00:59:27 +00:00
|
|
|
def getConverterByName(self, name):
|
|
|
|
return self.convertersByName[name]
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2016-12-31 23:16:12 -05:00
|
|
|
def populateDefaults(self, propagator=None):
|
|
|
|
for conv in self.getConverters():
|
|
|
|
if conv.repeat:
|
|
|
|
if not hasattr(self, conv.name):
|
|
|
|
setattr(self, conv.name, [])
|
|
|
|
countValue = len(getattr(self, conv.name)) - conv.aux
|
|
|
|
try:
|
|
|
|
count_conv = self.getConverterByName(conv.repeat)
|
|
|
|
setattr(self, conv.repeat, countValue)
|
|
|
|
except KeyError:
|
|
|
|
# conv.repeat is a propagated count
|
|
|
|
if propagator and conv.repeat in propagator:
|
|
|
|
propagator[conv.repeat].setValue(countValue)
|
|
|
|
else:
|
|
|
|
if conv.aux and not eval(conv.aux, None, self.__dict__):
|
|
|
|
continue
|
|
|
|
if hasattr(self, conv.name):
|
|
|
|
continue # Warn if it should NOT be present?!
|
|
|
|
if hasattr(conv, 'writeNullOffset'):
|
|
|
|
setattr(self, conv.name, None) # Warn?
|
|
|
|
#elif not conv.isCount:
|
|
|
|
# # Warn?
|
|
|
|
# pass
|
|
|
|
|
2013-11-24 17:08:06 -05:00
|
|
|
def decompile(self, reader, font):
|
2002-09-10 19:26:38 +00:00
|
|
|
self.readFormat(reader)
|
2002-05-11 00:59:27 +00:00
|
|
|
table = {}
|
|
|
|
self.__rawTable = table # for debugging
|
2016-12-31 23:16:12 -05:00
|
|
|
for conv in self.getConverters():
|
2002-05-11 00:59:27 +00:00
|
|
|
if conv.name == "SubTable":
|
2016-12-28 20:54:57 -05:00
|
|
|
conv = conv.getConverter(reader.tableTag,
|
2002-05-11 00:59:27 +00:00
|
|
|
table["LookupType"])
|
2006-10-21 14:12:38 +00:00
|
|
|
if conv.name == "ExtSubTable":
|
2016-12-28 20:54:57 -05:00
|
|
|
conv = conv.getConverter(reader.tableTag,
|
2006-10-21 14:12:38 +00:00
|
|
|
table["ExtensionLookupType"])
|
2013-11-26 19:23:08 -05:00
|
|
|
if conv.name == "FeatureParams":
|
|
|
|
conv = conv.getConverter(reader["FeatureTag"])
|
2017-06-07 10:49:47 +02:00
|
|
|
if conv.name == "SubStruct":
|
2017-06-07 11:36:15 +02:00
|
|
|
conv = conv.getConverter(reader.tableTag,
|
2017-06-07 10:49:47 +02:00
|
|
|
table["MorphType"])
|
2018-04-25 15:53:19 -07:00
|
|
|
try:
|
|
|
|
if conv.repeat:
|
|
|
|
if isinstance(conv.repeat, int):
|
|
|
|
countValue = conv.repeat
|
|
|
|
elif conv.repeat in table:
|
|
|
|
countValue = table[conv.repeat]
|
|
|
|
else:
|
|
|
|
# conv.repeat is a propagated count
|
|
|
|
countValue = reader[conv.repeat]
|
|
|
|
countValue += conv.aux
|
|
|
|
table[conv.name] = conv.readArray(reader, font, table, countValue)
|
2013-11-24 17:34:43 -05:00
|
|
|
else:
|
2018-04-25 15:53:19 -07:00
|
|
|
if conv.aux and not eval(conv.aux, None, table):
|
|
|
|
continue
|
|
|
|
table[conv.name] = conv.read(reader, font, table)
|
|
|
|
if conv.isPropagated:
|
|
|
|
reader[conv.name] = table[conv.name]
|
|
|
|
except Exception as e:
|
|
|
|
name = conv.name
|
|
|
|
e.args = e.args + (name,)
|
|
|
|
raise
|
2013-11-22 19:12:14 -05:00
|
|
|
|
2016-12-29 16:38:13 -05:00
|
|
|
if hasattr(self, 'postRead'):
|
|
|
|
self.postRead(table, font)
|
|
|
|
else:
|
|
|
|
self.__dict__.update(table)
|
2013-11-22 19:12:14 -05:00
|
|
|
|
2002-05-11 00:59:27 +00:00
|
|
|
del self.__rawTable # succeeded, get rid of debugging info
|
2006-10-21 14:12:38 +00:00
|
|
|
|
2013-11-24 17:08:06 -05:00
|
|
|
def compile(self, writer, font):
|
2013-11-26 19:42:55 -05:00
|
|
|
self.ensureDecompiled()
|
2021-03-23 18:13:07 -07:00
|
|
|
# TODO Following hack to be removed by rewriting how FormatSwitching tables
|
|
|
|
# are handled.
|
|
|
|
# https://github.com/fonttools/fonttools/pull/2238#issuecomment-805192631
|
2016-12-29 16:38:13 -05:00
|
|
|
if hasattr(self, 'preWrite'):
|
2021-03-23 18:13:07 -07:00
|
|
|
deleteFormat = not hasattr(self, 'Format')
|
2016-12-29 16:38:13 -05:00
|
|
|
table = self.preWrite(font)
|
2021-03-23 18:13:07 -07:00
|
|
|
deleteFormat = deleteFormat and hasattr(self, 'Format')
|
2016-12-29 16:38:13 -05:00
|
|
|
else:
|
2021-03-23 18:13:07 -07:00
|
|
|
deleteFormat = False
|
2016-12-29 16:38:13 -05:00
|
|
|
table = self.__dict__.copy()
|
|
|
|
|
2020-02-04 17:14:12 +00:00
|
|
|
# some count references may have been initialized in a custom preWrite; we set
|
|
|
|
# these in the writer's state beforehand (instead of sequentially) so they will
|
|
|
|
# be propagated to all nested subtables even if the count appears in the current
|
|
|
|
# table only *after* the offset to the subtable that it is counting.
|
|
|
|
for conv in self.getConverters():
|
|
|
|
if conv.isCount and conv.isPropagated:
|
|
|
|
value = table.get(conv.name)
|
|
|
|
if isinstance(value, CountReference):
|
|
|
|
writer[conv.name] = value
|
2006-10-21 14:12:38 +00:00
|
|
|
|
|
|
|
if hasattr(self, 'sortCoverageLast'):
|
|
|
|
writer.sortCoverageLast = 1
|
|
|
|
|
2016-02-10 18:00:48 +07:00
|
|
|
if hasattr(self, 'DontShare'):
|
|
|
|
writer.DontShare = True
|
|
|
|
|
2013-12-17 02:42:18 -05:00
|
|
|
if hasattr(self.__class__, 'LookupType'):
|
|
|
|
writer['LookupType'].setValue(self.__class__.LookupType)
|
|
|
|
|
2016-01-23 21:30:45 +01:00
|
|
|
self.writeFormat(writer)
|
|
|
|
for conv in self.getConverters():
|
|
|
|
value = table.get(conv.name) # TODO Handle defaults instead of defaulting to None!
|
|
|
|
if conv.repeat:
|
|
|
|
if value is None:
|
|
|
|
value = []
|
|
|
|
countValue = len(value) - conv.aux
|
2017-08-24 08:54:27 +02:00
|
|
|
if isinstance(conv.repeat, int):
|
|
|
|
assert len(value) == conv.repeat, 'expected %d values, got %d' % (conv.repeat, len(value))
|
|
|
|
elif conv.repeat in table:
|
2016-12-28 16:45:24 -05:00
|
|
|
CountReference(table, conv.repeat, value=countValue)
|
2013-11-24 17:34:43 -05:00
|
|
|
else:
|
2016-01-23 21:30:45 +01:00
|
|
|
# conv.repeat is a propagated count
|
|
|
|
writer[conv.repeat].setValue(countValue)
|
|
|
|
values = value
|
|
|
|
for i, value in enumerate(values):
|
|
|
|
try:
|
|
|
|
conv.write(writer, font, table, value, i)
|
|
|
|
except Exception as e:
|
|
|
|
name = value.__class__.__name__ if value is not None else conv.name
|
|
|
|
e.args = e.args + (name+'['+str(i)+']',)
|
|
|
|
raise
|
|
|
|
elif conv.isCount:
|
|
|
|
# Special-case Count values.
|
|
|
|
# Assumption: a Count field will *always* precede
|
|
|
|
# the actual array(s).
|
|
|
|
# We need a default value, as it may be set later by a nested
|
|
|
|
# table. We will later store it here.
|
|
|
|
# We add a reference: by the time the data is assembled
|
|
|
|
# the Count value will be filled in.
|
2019-10-29 12:52:42 +00:00
|
|
|
# We ignore the current count value since it will be recomputed,
|
|
|
|
# unless it's a CountReference that was already initialized in a custom preWrite.
|
|
|
|
if isinstance(value, CountReference):
|
|
|
|
ref = value
|
|
|
|
ref.size = conv.staticSize
|
|
|
|
writer.writeData(ref)
|
|
|
|
table[conv.name] = ref.getValue()
|
|
|
|
else:
|
|
|
|
ref = writer.writeCountReference(table, conv.name, conv.staticSize)
|
|
|
|
table[conv.name] = None
|
2016-01-23 21:30:45 +01:00
|
|
|
if conv.isPropagated:
|
|
|
|
writer[conv.name] = ref
|
|
|
|
elif conv.isLookupType:
|
2016-12-28 20:11:57 -05:00
|
|
|
# We make sure that subtables have the same lookup type,
|
|
|
|
# and that the type is the same as the one set on the
|
|
|
|
# Lookup object, if any is set.
|
2017-01-08 23:44:10 -08:00
|
|
|
if conv.name not in table:
|
|
|
|
table[conv.name] = None
|
|
|
|
ref = writer.writeCountReference(table, conv.name, conv.staticSize, table[conv.name])
|
2016-01-23 21:30:45 +01:00
|
|
|
writer['LookupType'] = ref
|
|
|
|
else:
|
|
|
|
if conv.aux and not eval(conv.aux, None, table):
|
|
|
|
continue
|
|
|
|
try:
|
Print out object hierarchy, when an Exception happens while compiling otData tables
The common stacktrace like this:
File "fonttools/Lib/fontTools/ttLib/__init__.py", line 202, in save
self._writeTable(tag, writer, done)
File "fonttools/Lib/fontTools/ttLib/__init__.py", line 631, in _writeTable
tabledata = self.getTableData(tag)
File "fonttools/Lib/fontTools/ttLib/__init__.py", line 644, in getTableData
return self.tables[tag].compile(self)
File "fonttools/Lib/fontTools/ttLib/tables/otBase.py", line 86, in compile
self.table.compile(writer, font)
File "fonttools/Lib/fontTools/ttLib/tables/otBase.py", line 681, in compile
conv.write(writer, font, table, value)
File "fonttools/Lib/fontTools/ttLib/tables/otConverters.py", line 354, in write
value.compile(subWriter, font)
File "fonttools/Lib/fontTools/ttLib/tables/otBase.py", line 661, in compile
conv.write(writer, font, table, value, i)
File "fonttools/Lib/fontTools/ttLib/tables/otConverters.py", line 354, in write
value.compile(subWriter, font)
File "fonttools/Lib/fontTools/ttLib/tables/otBase.py", line 661, in compile
conv.write(writer, font, table, value, i)
File "fonttools/Lib/fontTools/ttLib/tables/otConverters.py", line 354, in write
value.compile(subWriter, font)
File "fonttools/Lib/fontTools/ttLib/tables/otBase.py", line 661, in compile
conv.write(writer, font, table, value, i)
File "fonttools/Lib/fontTools/ttLib/tables/otConverters.py", line 277, in write
value.compile(writer, font)
File "fonttools/Lib/fontTools/ttLib/tables/otBase.py", line 681, in compile
conv.write(writer, font, table, value)
File "fonttools/Lib/fontTools/ttLib/tables/otConverters.py", line 175, in write
writer.writeUShort(value)
File "fonttools/Lib/fontTools/ttLib/tables/otBase.py", line 457, in writeUShort
assert 0 <= value < 0x10000, value
AssertionError: None
now has this as the last line:
AssertionError: (None, 'LookupListIndex', 'SubstLookupRecord', 'ChainContextSubst', 'Lookup', 'LookupList')
which means a value of None was tried for writing a LookupListIndex from a ChainContextSubset...
It's a hack, but a very useful one.
2016-01-23 14:20:33 +01:00
|
|
|
conv.write(writer, font, table, value)
|
2016-01-23 21:30:45 +01:00
|
|
|
except Exception as e:
|
|
|
|
name = value.__class__.__name__ if value is not None else conv.name
|
|
|
|
e.args = e.args + (name,)
|
|
|
|
raise
|
|
|
|
if conv.isPropagated:
|
|
|
|
writer[conv.name] = value
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2021-03-23 18:13:07 -07:00
|
|
|
if deleteFormat:
|
|
|
|
del self.Format
|
|
|
|
|
2002-09-10 19:26:38 +00:00
|
|
|
def readFormat(self, reader):
|
|
|
|
pass
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-09-10 19:26:38 +00:00
|
|
|
def writeFormat(self, writer):
|
|
|
|
pass
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2013-12-09 00:39:25 -05:00
|
|
|
def toXML(self, xmlWriter, font, attrs=None, name=None):
|
|
|
|
tableName = name if name else self.__class__.__name__
|
2002-05-11 00:59:27 +00:00
|
|
|
if attrs is None:
|
|
|
|
attrs = []
|
2016-01-22 14:03:23 +01:00
|
|
|
if hasattr(self, "Format"):
|
2002-05-11 10:21:36 +00:00
|
|
|
attrs = attrs + [("Format", self.Format)]
|
2002-05-11 00:59:27 +00:00
|
|
|
xmlWriter.begintag(tableName, attrs)
|
|
|
|
xmlWriter.newline()
|
|
|
|
self.toXML2(xmlWriter, font)
|
|
|
|
xmlWriter.endtag(tableName)
|
|
|
|
xmlWriter.newline()
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-11 00:59:27 +00:00
|
|
|
def toXML2(self, xmlWriter, font):
|
|
|
|
# Simpler variant of toXML, *only* for the top level tables (like GPOS, GSUB).
|
|
|
|
# This is because in TTX our parent writes our main tag, and in otBase.py we
|
|
|
|
# do it ourselves. I think I'm getting schizophrenic...
|
|
|
|
for conv in self.getConverters():
|
2002-05-11 10:21:36 +00:00
|
|
|
if conv.repeat:
|
2016-01-13 19:05:45 +00:00
|
|
|
value = getattr(self, conv.name, [])
|
2002-05-11 00:59:27 +00:00
|
|
|
for i in range(len(value)):
|
|
|
|
item = value[i]
|
2002-05-11 10:21:36 +00:00
|
|
|
conv.xmlWrite(xmlWriter, font, item, conv.name,
|
|
|
|
[("index", i)])
|
|
|
|
else:
|
2013-11-25 04:01:56 -05:00
|
|
|
if conv.aux and not eval(conv.aux, None, vars(self)):
|
|
|
|
continue
|
2016-01-13 19:05:45 +00:00
|
|
|
value = getattr(self, conv.name, None) # TODO Handle defaults instead of defaulting to None!
|
2002-05-11 10:21:36 +00:00
|
|
|
conv.xmlWrite(xmlWriter, font, value, conv.name, [])
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2013-11-27 03:19:32 -05:00
|
|
|
def fromXML(self, name, attrs, content, font):
|
2002-05-11 00:59:27 +00:00
|
|
|
try:
|
|
|
|
conv = self.getConverterByName(name)
|
|
|
|
except KeyError:
|
|
|
|
raise # XXX on KeyError, raise nice error
|
|
|
|
value = conv.xmlRead(attrs, content, font)
|
|
|
|
if conv.repeat:
|
2002-09-12 16:45:48 +00:00
|
|
|
seq = getattr(self, conv.name, None)
|
|
|
|
if seq is None:
|
2002-05-11 00:59:27 +00:00
|
|
|
seq = []
|
2002-05-11 10:21:36 +00:00
|
|
|
setattr(self, conv.name, seq)
|
2002-05-11 00:59:27 +00:00
|
|
|
seq.append(value)
|
|
|
|
else:
|
2002-05-11 10:21:36 +00:00
|
|
|
setattr(self, conv.name, value)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2013-12-06 22:25:48 -05:00
|
|
|
def __ne__(self, other):
|
2016-03-11 14:56:11 +00:00
|
|
|
result = self.__eq__(other)
|
|
|
|
return result if result is NotImplemented else not result
|
|
|
|
|
2013-11-27 18:58:45 -05:00
|
|
|
def __eq__(self, other):
|
|
|
|
if type(self) != type(other):
|
2013-12-07 03:40:44 -05:00
|
|
|
return NotImplemented
|
2013-08-17 11:11:22 -04:00
|
|
|
|
2013-11-20 18:38:46 -05:00
|
|
|
self.ensureDecompiled()
|
2013-11-27 18:58:45 -05:00
|
|
|
other.ensureDecompiled()
|
2013-11-20 18:38:46 -05:00
|
|
|
|
2013-11-27 18:58:45 -05:00
|
|
|
return self.__dict__ == other.__dict__
|
2002-05-11 00:59:27 +00:00
|
|
|
|
|
|
|
|
|
|
|
class FormatSwitchingBaseTable(BaseTable):
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-07-22 22:13:57 +00:00
|
|
|
"""Minor specialization of BaseTable, for tables that have multiple
|
2002-05-11 10:21:36 +00:00
|
|
|
formats, eg. CoverageFormat1 vs. CoverageFormat2."""
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2015-07-02 18:00:41 -07:00
|
|
|
@classmethod
|
|
|
|
def getRecordSize(cls, reader):
|
|
|
|
return NotImplemented
|
|
|
|
|
2002-05-11 00:59:27 +00:00
|
|
|
def getConverters(self):
|
2017-12-14 19:02:28 -08:00
|
|
|
return self.converters.get(self.Format, [])
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-11 00:59:27 +00:00
|
|
|
def getConverterByName(self, name):
|
|
|
|
return self.convertersByName[self.Format][name]
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-09-10 19:26:38 +00:00
|
|
|
def readFormat(self, reader):
|
2002-05-11 00:59:27 +00:00
|
|
|
self.Format = reader.readUShort()
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-09-10 19:26:38 +00:00
|
|
|
def writeFormat(self, writer):
|
2002-05-11 00:59:27 +00:00
|
|
|
writer.writeUShort(self.Format)
|
|
|
|
|
2013-12-09 00:39:25 -05:00
|
|
|
def toXML(self, xmlWriter, font, attrs=None, name=None):
|
2016-01-13 15:58:18 +00:00
|
|
|
BaseTable.toXML(self, xmlWriter, font, attrs, name)
|
2013-12-09 00:39:25 -05:00
|
|
|
|
2002-05-11 00:59:27 +00:00
|
|
|
|
2020-10-07 18:45:56 +01:00
|
|
|
class UInt8FormatSwitchingBaseTable(FormatSwitchingBaseTable):
|
|
|
|
def readFormat(self, reader):
|
|
|
|
self.Format = reader.readUInt8()
|
|
|
|
|
|
|
|
def writeFormat(self, writer):
|
|
|
|
writer.writeUInt8(self.Format)
|
|
|
|
|
|
|
|
|
|
|
|
formatSwitchingBaseTables = {
|
|
|
|
"uint16": FormatSwitchingBaseTable,
|
|
|
|
"uint8": UInt8FormatSwitchingBaseTable,
|
|
|
|
}
|
|
|
|
|
|
|
|
def getFormatSwitchingBaseTableClass(formatType):
|
|
|
|
try:
|
|
|
|
return formatSwitchingBaseTables[formatType]
|
|
|
|
except KeyError:
|
|
|
|
raise TypeError(f"Unsupported format type: {formatType!r}")
|
|
|
|
|
|
|
|
|
2002-05-11 10:21:36 +00:00
|
|
|
#
|
|
|
|
# Support for ValueRecords
|
|
|
|
#
|
|
|
|
# This data type is so different from all other OpenType data types that
|
|
|
|
# it requires quite a bit of code for itself. It even has special support
|
|
|
|
# in OTTableReader and OTTableWriter...
|
|
|
|
#
|
|
|
|
|
2002-05-11 00:59:27 +00:00
|
|
|
valueRecordFormat = [
|
2015-04-26 00:54:30 -04:00
|
|
|
# Mask Name isDevice signed
|
|
|
|
(0x0001, "XPlacement", 0, 1),
|
|
|
|
(0x0002, "YPlacement", 0, 1),
|
|
|
|
(0x0004, "XAdvance", 0, 1),
|
|
|
|
(0x0008, "YAdvance", 0, 1),
|
|
|
|
(0x0010, "XPlaDevice", 1, 0),
|
|
|
|
(0x0020, "YPlaDevice", 1, 0),
|
|
|
|
(0x0040, "XAdvDevice", 1, 0),
|
|
|
|
(0x0080, "YAdvDevice", 1, 0),
|
|
|
|
# reserved:
|
|
|
|
(0x0100, "Reserved1", 0, 0),
|
|
|
|
(0x0200, "Reserved2", 0, 0),
|
|
|
|
(0x0400, "Reserved3", 0, 0),
|
|
|
|
(0x0800, "Reserved4", 0, 0),
|
|
|
|
(0x1000, "Reserved5", 0, 0),
|
|
|
|
(0x2000, "Reserved6", 0, 0),
|
|
|
|
(0x4000, "Reserved7", 0, 0),
|
|
|
|
(0x8000, "Reserved8", 0, 0),
|
2002-05-11 00:59:27 +00:00
|
|
|
]
|
|
|
|
|
|
|
|
def _buildDict():
|
|
|
|
d = {}
|
|
|
|
for mask, name, isDevice, signed in valueRecordFormat:
|
|
|
|
d[name] = mask, isDevice, signed
|
|
|
|
return d
|
|
|
|
|
|
|
|
valueRecordFormatDict = _buildDict()
|
|
|
|
|
|
|
|
|
2013-11-28 14:26:58 -05:00
|
|
|
class ValueRecordFactory(object):
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-11 10:21:36 +00:00
|
|
|
"""Given a format code, this object convert ValueRecords."""
|
2013-11-22 15:21:41 -05:00
|
|
|
|
2013-11-23 20:20:39 -05:00
|
|
|
def __init__(self, valueFormat):
|
2002-05-11 00:59:27 +00:00
|
|
|
format = []
|
|
|
|
for mask, name, isDevice, signed in valueRecordFormat:
|
|
|
|
if valueFormat & mask:
|
|
|
|
format.append((name, isDevice, signed))
|
|
|
|
self.format = format
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2015-07-02 18:00:41 -07:00
|
|
|
def __len__(self):
|
|
|
|
return len(self.format)
|
|
|
|
|
2002-05-11 00:59:27 +00:00
|
|
|
def readValueRecord(self, reader, font):
|
|
|
|
format = self.format
|
|
|
|
if not format:
|
|
|
|
return None
|
|
|
|
valueRecord = ValueRecord()
|
|
|
|
for name, isDevice, signed in format:
|
|
|
|
if signed:
|
|
|
|
value = reader.readShort()
|
|
|
|
else:
|
|
|
|
value = reader.readUShort()
|
|
|
|
if isDevice:
|
|
|
|
if value:
|
2013-11-27 02:34:11 -05:00
|
|
|
from . import otTables
|
2002-05-11 00:59:27 +00:00
|
|
|
subReader = reader.getSubReader(value)
|
|
|
|
value = getattr(otTables, name)()
|
|
|
|
value.decompile(subReader, font)
|
|
|
|
else:
|
|
|
|
value = None
|
|
|
|
setattr(valueRecord, name, value)
|
|
|
|
return valueRecord
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-11 00:59:27 +00:00
|
|
|
def writeValueRecord(self, writer, font, valueRecord):
|
|
|
|
for name, isDevice, signed in self.format:
|
|
|
|
value = getattr(valueRecord, name, 0)
|
|
|
|
if isDevice:
|
|
|
|
if value:
|
|
|
|
subWriter = writer.getSubWriter()
|
|
|
|
writer.writeSubTable(subWriter)
|
|
|
|
value.compile(subWriter, font)
|
|
|
|
else:
|
|
|
|
writer.writeUShort(0)
|
|
|
|
elif signed:
|
|
|
|
writer.writeShort(value)
|
|
|
|
else:
|
|
|
|
writer.writeUShort(value)
|
|
|
|
|
|
|
|
|
2013-11-28 14:26:58 -05:00
|
|
|
class ValueRecord(object):
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-11 00:59:27 +00:00
|
|
|
# see ValueRecordFactory
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2016-12-26 17:14:39 -05:00
|
|
|
def __init__(self, valueFormat=None, src=None):
|
2016-10-12 16:11:20 -07:00
|
|
|
if valueFormat is not None:
|
|
|
|
for mask, name, isDevice, signed in valueRecordFormat:
|
|
|
|
if valueFormat & mask:
|
|
|
|
setattr(self, name, None if isDevice else 0)
|
2016-12-26 17:14:39 -05:00
|
|
|
if src is not None:
|
|
|
|
for key,val in src.__dict__.items():
|
2018-03-26 12:53:21 -07:00
|
|
|
if not hasattr(self, key):
|
|
|
|
continue
|
2016-12-26 17:14:39 -05:00
|
|
|
setattr(self, key, val)
|
|
|
|
elif src is not None:
|
|
|
|
self.__dict__ = src.__dict__.copy()
|
2016-10-12 16:11:20 -07:00
|
|
|
|
2002-05-11 00:59:27 +00:00
|
|
|
def getFormat(self):
|
|
|
|
format = 0
|
|
|
|
for name in self.__dict__.keys():
|
|
|
|
format = format | valueRecordFormatDict[name][0]
|
|
|
|
return format
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2002-05-11 00:59:27 +00:00
|
|
|
def toXML(self, xmlWriter, font, valueName, attrs=None):
|
|
|
|
if attrs is None:
|
|
|
|
simpleItems = []
|
|
|
|
else:
|
|
|
|
simpleItems = list(attrs)
|
|
|
|
for mask, name, isDevice, format in valueRecordFormat[:4]: # "simple" values
|
|
|
|
if hasattr(self, name):
|
|
|
|
simpleItems.append((name, getattr(self, name)))
|
|
|
|
deviceItems = []
|
|
|
|
for mask, name, isDevice, format in valueRecordFormat[4:8]: # device records
|
|
|
|
if hasattr(self, name):
|
|
|
|
device = getattr(self, name)
|
|
|
|
if device is not None:
|
|
|
|
deviceItems.append((name, device))
|
|
|
|
if deviceItems:
|
|
|
|
xmlWriter.begintag(valueName, simpleItems)
|
|
|
|
xmlWriter.newline()
|
|
|
|
for name, deviceRecord in deviceItems:
|
|
|
|
if deviceRecord is not None:
|
2016-01-13 17:19:29 +00:00
|
|
|
deviceRecord.toXML(xmlWriter, font, name=name)
|
2002-05-11 00:59:27 +00:00
|
|
|
xmlWriter.endtag(valueName)
|
|
|
|
xmlWriter.newline()
|
|
|
|
else:
|
|
|
|
xmlWriter.simpletag(valueName, simpleItems)
|
|
|
|
xmlWriter.newline()
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2013-11-27 03:19:32 -05:00
|
|
|
def fromXML(self, name, attrs, content, font):
|
2013-11-27 02:34:11 -05:00
|
|
|
from . import otTables
|
2002-05-11 00:59:27 +00:00
|
|
|
for k, v in attrs.items():
|
|
|
|
setattr(self, k, int(v))
|
|
|
|
for element in content:
|
2013-11-27 05:17:37 -05:00
|
|
|
if not isinstance(element, tuple):
|
2002-05-11 00:59:27 +00:00
|
|
|
continue
|
|
|
|
name, attrs, content = element
|
|
|
|
value = getattr(otTables, name)()
|
|
|
|
for elem2 in content:
|
2013-11-27 05:17:37 -05:00
|
|
|
if not isinstance(elem2, tuple):
|
2002-05-11 00:59:27 +00:00
|
|
|
continue
|
2013-11-27 03:19:32 -05:00
|
|
|
name2, attrs2, content2 = elem2
|
|
|
|
value.fromXML(name2, attrs2, content2, font)
|
2002-05-11 00:59:27 +00:00
|
|
|
setattr(self, name, value)
|
2015-04-26 02:01:01 -04:00
|
|
|
|
2013-12-06 22:25:48 -05:00
|
|
|
def __ne__(self, other):
|
2016-03-11 14:56:11 +00:00
|
|
|
result = self.__eq__(other)
|
|
|
|
return result if result is NotImplemented else not result
|
|
|
|
|
2013-11-27 18:58:45 -05:00
|
|
|
def __eq__(self, other):
|
|
|
|
if type(self) != type(other):
|
2013-12-07 03:40:44 -05:00
|
|
|
return NotImplemented
|
2013-11-27 18:58:45 -05:00
|
|
|
return self.__dict__ == other.__dict__
|