2013-09-18 20:47:53 -04:00
|
|
|
# Copyright 2013 Google, Inc. All Rights Reserved.
|
|
|
|
#
|
2013-12-18 00:45:12 -08:00
|
|
|
# Google Author(s): Behdad Esfahbod, Roozbeh Pournader
|
2013-09-18 20:47:53 -04:00
|
|
|
|
|
|
|
"""Font merger.
|
|
|
|
"""
|
|
|
|
|
2014-01-14 15:07:50 +08:00
|
|
|
from __future__ import print_function, division, absolute_import
|
2013-12-18 17:14:26 -05:00
|
|
|
from fontTools.misc.py23 import *
|
2015-08-09 00:33:50 -07:00
|
|
|
from fontTools.misc.timeTools import timestampNow
|
2013-12-18 17:14:26 -05:00
|
|
|
from fontTools import ttLib, cffLib
|
2013-12-18 12:15:46 -08:00
|
|
|
from fontTools.ttLib.tables import otTables, _h_e_a_d
|
2013-12-19 04:20:26 -05:00
|
|
|
from fontTools.ttLib.tables.DefaultTable import DefaultTable
|
2016-01-24 16:16:11 +00:00
|
|
|
from fontTools.misc.loggingTools import Timer
|
2018-02-18 14:38:57 -08:00
|
|
|
from fontTools.pens.recordingPen import DecomposingRecordingPen
|
2018-07-07 14:41:46 +02:00
|
|
|
from fontTools.subset import _DesubroutinizingT2Decompiler
|
2013-12-18 17:14:26 -05:00
|
|
|
from functools import reduce
|
2013-09-18 20:47:53 -04:00
|
|
|
import sys
|
2013-09-19 16:16:39 -04:00
|
|
|
import time
|
2013-12-18 17:34:17 -05:00
|
|
|
import operator
|
2016-01-24 16:16:11 +00:00
|
|
|
import logging
|
2018-07-07 14:41:46 +02:00
|
|
|
import os
|
2016-01-24 16:16:11 +00:00
|
|
|
|
|
|
|
|
2017-01-11 11:58:17 +00:00
|
|
|
log = logging.getLogger("fontTools.merge")
|
2016-01-24 16:16:11 +00:00
|
|
|
timer = Timer(logger=logging.getLogger(__name__+".timer"), level=logging.INFO)
|
2013-09-18 20:47:53 -04:00
|
|
|
|
|
|
|
|
2013-12-19 04:20:26 -05:00
|
|
|
def _add_method(*clazzes, **kwargs):
|
2013-09-19 20:09:23 -04:00
|
|
|
"""Returns a decorator function that adds a new method to one or
|
|
|
|
more classes."""
|
2013-12-19 14:19:23 -05:00
|
|
|
allowDefault = kwargs.get('allowDefaultTable', False)
|
2013-09-19 20:09:23 -04:00
|
|
|
def wrapper(method):
|
2016-01-13 17:03:31 +00:00
|
|
|
done = []
|
2013-09-19 20:09:23 -04:00
|
|
|
for clazz in clazzes:
|
2016-01-13 17:03:31 +00:00
|
|
|
if clazz in done: continue # Support multiple names of a clazz
|
|
|
|
done.append(clazz)
|
2013-12-20 21:34:09 -05:00
|
|
|
assert allowDefault or clazz != DefaultTable, 'Oops, table class not found.'
|
2013-12-19 04:20:26 -05:00
|
|
|
assert method.__name__ not in clazz.__dict__, \
|
2015-04-26 00:54:30 -04:00
|
|
|
"Oops, class '%s' has method '%s'." % (clazz.__name__, method.__name__)
|
2013-12-19 04:20:26 -05:00
|
|
|
setattr(clazz, method.__name__, method)
|
2013-09-19 20:09:23 -04:00
|
|
|
return None
|
|
|
|
return wrapper
|
2013-09-18 20:47:53 -04:00
|
|
|
|
2013-12-18 00:45:12 -08:00
|
|
|
# General utility functions for merging values from different fonts
|
2013-12-19 04:56:50 -05:00
|
|
|
|
2013-12-18 17:34:17 -05:00
|
|
|
def equal(lst):
|
2014-03-28 13:52:48 -07:00
|
|
|
lst = list(lst)
|
2013-12-18 17:34:17 -05:00
|
|
|
t = iter(lst)
|
|
|
|
first = next(t)
|
2014-03-28 13:52:48 -07:00
|
|
|
assert all(item == first for item in t), "Expected all items to be equal: %s" % lst
|
2013-12-18 12:15:46 -08:00
|
|
|
return first
|
2013-12-18 00:45:12 -08:00
|
|
|
|
|
|
|
def first(lst):
|
2013-12-18 17:34:17 -05:00
|
|
|
return next(iter(lst))
|
2013-12-18 00:45:12 -08:00
|
|
|
|
2013-12-18 12:15:46 -08:00
|
|
|
def recalculate(lst):
|
2013-12-19 04:56:50 -05:00
|
|
|
return NotImplemented
|
2013-12-18 12:15:46 -08:00
|
|
|
|
|
|
|
def current_time(lst):
|
2015-06-12 16:11:31 -07:00
|
|
|
return timestampNow()
|
2013-12-18 12:15:46 -08:00
|
|
|
|
2013-12-21 01:04:18 -08:00
|
|
|
def bitwise_and(lst):
|
|
|
|
return reduce(operator.and_, lst)
|
|
|
|
|
2013-12-18 12:15:46 -08:00
|
|
|
def bitwise_or(lst):
|
2013-12-18 17:34:17 -05:00
|
|
|
return reduce(operator.or_, lst)
|
2013-12-18 12:15:46 -08:00
|
|
|
|
2013-12-19 04:56:50 -05:00
|
|
|
def avg_int(lst):
|
|
|
|
lst = list(lst)
|
|
|
|
return sum(lst) // len(lst)
|
2013-09-18 20:47:53 -04:00
|
|
|
|
2014-03-28 14:37:32 -07:00
|
|
|
def onlyExisting(func):
|
2013-12-19 04:56:50 -05:00
|
|
|
"""Returns a filter func that when called with a list,
|
|
|
|
only calls func on the non-NotImplemented items of the list,
|
|
|
|
and only so if there's at least one item remaining.
|
|
|
|
Otherwise returns NotImplemented."""
|
|
|
|
|
|
|
|
def wrapper(lst):
|
|
|
|
items = [item for item in lst if item is not NotImplemented]
|
|
|
|
return func(items) if items else NotImplemented
|
|
|
|
|
|
|
|
return wrapper
|
|
|
|
|
2013-12-19 04:20:26 -05:00
|
|
|
def sumLists(lst):
|
|
|
|
l = []
|
|
|
|
for item in lst:
|
|
|
|
l.extend(item)
|
|
|
|
return l
|
|
|
|
|
|
|
|
def sumDicts(lst):
|
|
|
|
d = {}
|
|
|
|
for item in lst:
|
|
|
|
d.update(item)
|
|
|
|
return d
|
2013-09-19 16:16:39 -04:00
|
|
|
|
2013-12-19 05:58:06 -05:00
|
|
|
def mergeObjects(lst):
|
2014-03-28 15:02:40 -07:00
|
|
|
lst = [item for item in lst if item is not NotImplemented]
|
2013-12-19 05:58:06 -05:00
|
|
|
if not lst:
|
2014-03-28 15:02:40 -07:00
|
|
|
return NotImplemented
|
|
|
|
lst = [item for item in lst if item is not None]
|
|
|
|
if not lst:
|
|
|
|
return None
|
2013-12-19 05:58:06 -05:00
|
|
|
|
|
|
|
clazz = lst[0].__class__
|
|
|
|
assert all(type(item) == clazz for item in lst), lst
|
2014-03-28 15:02:40 -07:00
|
|
|
|
2013-12-19 05:58:06 -05:00
|
|
|
logic = clazz.mergeMap
|
|
|
|
returnTable = clazz()
|
2014-03-28 14:41:53 -07:00
|
|
|
returnDict = {}
|
2013-12-19 05:58:06 -05:00
|
|
|
|
|
|
|
allKeys = set.union(set(), *(vars(table).keys() for table in lst))
|
|
|
|
for key in allKeys:
|
|
|
|
try:
|
|
|
|
mergeLogic = logic[key]
|
|
|
|
except KeyError:
|
|
|
|
try:
|
|
|
|
mergeLogic = logic['*']
|
|
|
|
except KeyError:
|
|
|
|
raise Exception("Don't know how to merge key %s of class %s" %
|
|
|
|
(key, clazz.__name__))
|
|
|
|
if mergeLogic is NotImplemented:
|
|
|
|
continue
|
|
|
|
value = mergeLogic(getattr(table, key, NotImplemented) for table in lst)
|
|
|
|
if value is not NotImplemented:
|
2014-03-28 14:41:53 -07:00
|
|
|
returnDict[key] = value
|
|
|
|
|
|
|
|
returnTable.__dict__ = returnDict
|
2013-12-19 05:58:06 -05:00
|
|
|
|
|
|
|
return returnTable
|
|
|
|
|
2014-03-28 14:58:12 -07:00
|
|
|
def mergeBits(bitmap):
|
|
|
|
|
|
|
|
def wrapper(lst):
|
|
|
|
lst = list(lst)
|
|
|
|
returnValue = 0
|
|
|
|
for bitNumber in range(bitmap['size']):
|
2013-12-21 01:04:18 -08:00
|
|
|
try:
|
2014-03-28 14:58:12 -07:00
|
|
|
mergeLogic = bitmap[bitNumber]
|
2013-12-21 01:04:18 -08:00
|
|
|
except KeyError:
|
2014-03-28 14:58:12 -07:00
|
|
|
try:
|
|
|
|
mergeLogic = bitmap['*']
|
|
|
|
except KeyError:
|
|
|
|
raise Exception("Don't know how to merge bit %s" % bitNumber)
|
|
|
|
shiftedBit = 1 << bitNumber
|
|
|
|
mergedValue = mergeLogic(bool(item & shiftedBit) for item in lst)
|
|
|
|
returnValue |= mergedValue << bitNumber
|
|
|
|
return returnValue
|
|
|
|
|
|
|
|
return wrapper
|
2013-12-21 01:04:18 -08:00
|
|
|
|
2013-09-18 21:02:41 -04:00
|
|
|
|
2013-12-19 04:20:26 -05:00
|
|
|
@_add_method(DefaultTable, allowDefaultTable=True)
|
2013-12-19 04:45:17 -05:00
|
|
|
def merge(self, m, tables):
|
2013-12-19 04:20:26 -05:00
|
|
|
if not hasattr(self, 'mergeMap'):
|
2016-01-24 16:16:11 +00:00
|
|
|
log.info("Don't know how to merge '%s'.", self.tableTag)
|
2013-12-19 04:56:50 -05:00
|
|
|
return NotImplemented
|
2013-12-19 04:20:26 -05:00
|
|
|
|
2014-01-27 21:01:45 -05:00
|
|
|
logic = self.mergeMap
|
|
|
|
|
|
|
|
if isinstance(logic, dict):
|
|
|
|
return m.mergeObjects(self, self.mergeMap, tables)
|
|
|
|
else:
|
|
|
|
return logic(tables)
|
|
|
|
|
2013-09-19 19:43:17 -04:00
|
|
|
|
2013-12-19 04:20:26 -05:00
|
|
|
ttLib.getTableClass('maxp').mergeMap = {
|
|
|
|
'*': max,
|
|
|
|
'tableTag': equal,
|
|
|
|
'tableVersion': equal,
|
|
|
|
'numGlyphs': sum,
|
2014-01-27 21:01:45 -05:00
|
|
|
'maxStorage': first,
|
|
|
|
'maxFunctionDefs': first,
|
|
|
|
'maxInstructionDefs': first,
|
2013-12-19 04:20:26 -05:00
|
|
|
# TODO When we correctly merge hinting data, update these values:
|
|
|
|
# maxFunctionDefs, maxInstructionDefs, maxSizeOfInstructions
|
|
|
|
}
|
|
|
|
|
2014-03-28 13:54:37 -07:00
|
|
|
headFlagsMergeBitMap = {
|
2013-12-21 01:04:18 -08:00
|
|
|
'size': 16,
|
|
|
|
'*': bitwise_or,
|
|
|
|
1: bitwise_and, # Baseline at y = 0
|
|
|
|
2: bitwise_and, # lsb at x = 0
|
|
|
|
3: bitwise_and, # Force ppem to integer values. FIXME?
|
|
|
|
5: bitwise_and, # Font is vertical
|
|
|
|
6: lambda bit: 0, # Always set to zero
|
|
|
|
11: bitwise_and, # Font data is 'lossless'
|
|
|
|
13: bitwise_and, # Optimized for ClearType
|
|
|
|
14: bitwise_and, # Last resort font. FIXME? equal or first may be better
|
|
|
|
15: lambda bit: 0, # Always set to zero
|
|
|
|
}
|
|
|
|
|
2013-12-19 04:20:26 -05:00
|
|
|
ttLib.getTableClass('head').mergeMap = {
|
|
|
|
'tableTag': equal,
|
|
|
|
'tableVersion': max,
|
|
|
|
'fontRevision': max,
|
2013-12-19 04:56:50 -05:00
|
|
|
'checkSumAdjustment': lambda lst: 0, # We need *something* here
|
2013-12-19 04:20:26 -05:00
|
|
|
'magicNumber': equal,
|
2014-03-28 14:58:12 -07:00
|
|
|
'flags': mergeBits(headFlagsMergeBitMap),
|
2013-12-19 04:20:26 -05:00
|
|
|
'unitsPerEm': equal,
|
|
|
|
'created': current_time,
|
|
|
|
'modified': current_time,
|
|
|
|
'xMin': min,
|
|
|
|
'yMin': min,
|
|
|
|
'xMax': max,
|
|
|
|
'yMax': max,
|
|
|
|
'macStyle': first,
|
|
|
|
'lowestRecPPEM': max,
|
|
|
|
'fontDirectionHint': lambda lst: 2,
|
2018-07-07 14:41:46 +02:00
|
|
|
'indexToLocFormat': equal,
|
2013-12-19 04:20:26 -05:00
|
|
|
'glyphDataFormat': equal,
|
|
|
|
}
|
|
|
|
|
|
|
|
ttLib.getTableClass('hhea').mergeMap = {
|
|
|
|
'*': equal,
|
|
|
|
'tableTag': equal,
|
|
|
|
'tableVersion': max,
|
|
|
|
'ascent': max,
|
|
|
|
'descent': min,
|
|
|
|
'lineGap': max,
|
|
|
|
'advanceWidthMax': max,
|
|
|
|
'minLeftSideBearing': min,
|
|
|
|
'minRightSideBearing': min,
|
|
|
|
'xMaxExtent': max,
|
2013-12-21 01:04:18 -08:00
|
|
|
'caretSlopeRise': first,
|
|
|
|
'caretSlopeRun': first,
|
|
|
|
'caretOffset': first,
|
2013-12-19 04:20:26 -05:00
|
|
|
'numberOfHMetrics': recalculate,
|
|
|
|
}
|
|
|
|
|
2014-03-28 13:54:37 -07:00
|
|
|
os2FsTypeMergeBitMap = {
|
2013-12-21 01:04:18 -08:00
|
|
|
'size': 16,
|
|
|
|
'*': lambda bit: 0,
|
|
|
|
1: bitwise_or, # no embedding permitted
|
|
|
|
2: bitwise_and, # allow previewing and printing documents
|
|
|
|
3: bitwise_and, # allow editing documents
|
|
|
|
8: bitwise_or, # no subsetting permitted
|
|
|
|
9: bitwise_or, # no embedding of outlines permitted
|
|
|
|
}
|
|
|
|
|
|
|
|
def mergeOs2FsType(lst):
|
|
|
|
lst = list(lst)
|
|
|
|
if all(item == 0 for item in lst):
|
|
|
|
return 0
|
|
|
|
|
|
|
|
# Compute least restrictive logic for each fsType value
|
|
|
|
for i in range(len(lst)):
|
|
|
|
# unset bit 1 (no embedding permitted) if either bit 2 or 3 is set
|
|
|
|
if lst[i] & 0x000C:
|
|
|
|
lst[i] &= ~0x0002
|
|
|
|
# set bit 2 (allow previewing) if bit 3 is set (allow editing)
|
|
|
|
elif lst[i] & 0x0008:
|
|
|
|
lst[i] |= 0x0004
|
|
|
|
# set bits 2 and 3 if everything is allowed
|
|
|
|
elif lst[i] == 0:
|
|
|
|
lst[i] = 0x000C
|
|
|
|
|
2014-03-28 14:58:12 -07:00
|
|
|
fsType = mergeBits(os2FsTypeMergeBitMap)(lst)
|
2013-12-21 01:04:18 -08:00
|
|
|
# unset bits 2 and 3 if bit 1 is set (some font is "no embedding")
|
|
|
|
if fsType & 0x0002:
|
|
|
|
fsType &= ~0x000C
|
|
|
|
return fsType
|
|
|
|
|
|
|
|
|
2013-12-19 04:20:26 -05:00
|
|
|
ttLib.getTableClass('OS/2').mergeMap = {
|
|
|
|
'*': first,
|
|
|
|
'tableTag': equal,
|
|
|
|
'version': max,
|
2013-12-19 04:56:50 -05:00
|
|
|
'xAvgCharWidth': avg_int, # Apparently fontTools doesn't recalc this
|
2013-12-21 01:04:18 -08:00
|
|
|
'fsType': mergeOs2FsType, # Will be overwritten
|
|
|
|
'panose': first, # FIXME: should really be the first Latin font
|
2013-12-19 04:20:26 -05:00
|
|
|
'ulUnicodeRange1': bitwise_or,
|
|
|
|
'ulUnicodeRange2': bitwise_or,
|
|
|
|
'ulUnicodeRange3': bitwise_or,
|
|
|
|
'ulUnicodeRange4': bitwise_or,
|
|
|
|
'fsFirstCharIndex': min,
|
|
|
|
'fsLastCharIndex': max,
|
|
|
|
'sTypoAscender': max,
|
|
|
|
'sTypoDescender': min,
|
|
|
|
'sTypoLineGap': max,
|
|
|
|
'usWinAscent': max,
|
|
|
|
'usWinDescent': max,
|
2014-03-28 14:56:27 -07:00
|
|
|
# Version 2,3,4
|
2014-03-28 14:48:09 -07:00
|
|
|
'ulCodePageRange1': onlyExisting(bitwise_or),
|
|
|
|
'ulCodePageRange2': onlyExisting(bitwise_or),
|
|
|
|
'usMaxContex': onlyExisting(max),
|
2013-12-19 04:20:26 -05:00
|
|
|
# TODO version 5
|
|
|
|
}
|
|
|
|
|
2013-12-21 01:04:18 -08:00
|
|
|
@_add_method(ttLib.getTableClass('OS/2'))
|
|
|
|
def merge(self, m, tables):
|
|
|
|
DefaultTable.merge(self, m, tables)
|
|
|
|
if self.version < 2:
|
|
|
|
# bits 8 and 9 are reserved and should be set to zero
|
|
|
|
self.fsType &= ~0x0300
|
|
|
|
if self.version >= 3:
|
|
|
|
# Only one of bits 1, 2, and 3 may be set. We already take
|
|
|
|
# care of bit 1 implications in mergeOs2FsType. So unset
|
|
|
|
# bit 2 if bit 3 is already set.
|
|
|
|
if self.fsType & 0x0008:
|
|
|
|
self.fsType &= ~0x0004
|
|
|
|
return self
|
|
|
|
|
2013-12-19 04:20:26 -05:00
|
|
|
ttLib.getTableClass('post').mergeMap = {
|
|
|
|
'*': first,
|
|
|
|
'tableTag': equal,
|
|
|
|
'formatType': max,
|
|
|
|
'isFixedPitch': min,
|
|
|
|
'minMemType42': max,
|
|
|
|
'maxMemType42': lambda lst: 0,
|
|
|
|
'minMemType1': max,
|
|
|
|
'maxMemType1': lambda lst: 0,
|
2014-03-28 14:37:32 -07:00
|
|
|
'mapping': onlyExisting(sumDicts),
|
2013-12-19 14:19:23 -05:00
|
|
|
'extraNames': lambda lst: [],
|
2013-12-19 04:20:26 -05:00
|
|
|
}
|
2013-09-19 16:16:39 -04:00
|
|
|
|
2013-12-19 04:20:26 -05:00
|
|
|
ttLib.getTableClass('vmtx').mergeMap = ttLib.getTableClass('hmtx').mergeMap = {
|
|
|
|
'tableTag': equal,
|
|
|
|
'metrics': sumDicts,
|
|
|
|
}
|
2013-09-18 21:02:41 -04:00
|
|
|
|
2013-12-19 15:46:05 -08:00
|
|
|
ttLib.getTableClass('name').mergeMap = {
|
|
|
|
'tableTag': equal,
|
|
|
|
'names': first, # FIXME? Does mixing name records make sense?
|
|
|
|
}
|
|
|
|
|
2013-12-19 04:20:26 -05:00
|
|
|
ttLib.getTableClass('loca').mergeMap = {
|
2013-12-19 04:56:50 -05:00
|
|
|
'*': recalculate,
|
2013-12-19 04:20:26 -05:00
|
|
|
'tableTag': equal,
|
|
|
|
}
|
|
|
|
|
|
|
|
ttLib.getTableClass('glyf').mergeMap = {
|
|
|
|
'tableTag': equal,
|
|
|
|
'glyphs': sumDicts,
|
|
|
|
'glyphOrder': sumLists,
|
|
|
|
}
|
2013-09-19 16:16:39 -04:00
|
|
|
|
2013-09-19 20:37:01 -04:00
|
|
|
@_add_method(ttLib.getTableClass('glyf'))
|
2013-12-19 04:45:17 -05:00
|
|
|
def merge(self, m, tables):
|
2014-01-27 21:01:45 -05:00
|
|
|
for i,table in enumerate(tables):
|
2013-09-20 16:33:33 -04:00
|
|
|
for g in table.glyphs.values():
|
2014-01-27 21:01:45 -05:00
|
|
|
if i:
|
|
|
|
# Drop hints for all but first font, since
|
|
|
|
# we don't map functions / CVT values.
|
|
|
|
g.removeHinting()
|
2013-09-20 16:33:33 -04:00
|
|
|
# Expand composite glyphs to load their
|
|
|
|
# composite glyph names.
|
|
|
|
if g.isComposite():
|
|
|
|
g.expand(table)
|
2013-12-19 04:56:50 -05:00
|
|
|
return DefaultTable.merge(self, m, tables)
|
2013-09-19 16:16:39 -04:00
|
|
|
|
2014-01-27 21:01:45 -05:00
|
|
|
ttLib.getTableClass('prep').mergeMap = lambda self, lst: first(lst)
|
|
|
|
ttLib.getTableClass('fpgm').mergeMap = lambda self, lst: first(lst)
|
|
|
|
ttLib.getTableClass('cvt ').mergeMap = lambda self, lst: first(lst)
|
2017-02-27 11:34:48 -08:00
|
|
|
ttLib.getTableClass('gasp').mergeMap = lambda self, lst: first(lst) # FIXME? Appears irreconcilable
|
2013-09-18 20:47:53 -04:00
|
|
|
|
2018-02-18 14:38:57 -08:00
|
|
|
def _glyphsAreSame(glyphSet1, glyphSet2, glyph1, glyph2):
|
|
|
|
pen1 = DecomposingRecordingPen(glyphSet1)
|
|
|
|
pen2 = DecomposingRecordingPen(glyphSet2)
|
|
|
|
g1 = glyphSet1[glyph1]
|
|
|
|
g2 = glyphSet2[glyph2]
|
|
|
|
g1.draw(pen1)
|
|
|
|
g2.draw(pen2)
|
|
|
|
return (pen1.value == pen2.value and
|
|
|
|
g1.width == g2.width and
|
|
|
|
(not hasattr(g1, 'height') or g1.height == g2.height))
|
|
|
|
|
2018-07-07 14:41:46 +02:00
|
|
|
@_add_method(ttLib.getTableClass('CFF '))
|
|
|
|
def merge(self, m, tables):
|
|
|
|
newcff = tables[0]
|
|
|
|
newfont = newcff.cff[0]
|
|
|
|
private = newfont.Private
|
|
|
|
storedNamesStrings = []
|
|
|
|
glyphOrderStrings = []
|
|
|
|
for name in newfont.strings.strings:
|
|
|
|
if name not in newfont.getGlyphOrder():
|
|
|
|
storedNamesStrings.append(name)
|
|
|
|
else:
|
|
|
|
glyphOrderStrings.append(name)
|
|
|
|
chrset = list(newfont.charset)
|
|
|
|
newcs = newfont.CharStrings
|
|
|
|
newcsi = newcs.charStringsIndex
|
|
|
|
log.info("Font 0 global subrs: %s." % len(newcff.cff.GlobalSubrs))
|
|
|
|
ls = None
|
|
|
|
try:
|
|
|
|
ls = newfont.Private.Subrs
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
if ls is not None:
|
|
|
|
log.info("Font 0 local subrs: %s." % str(len(ls)))
|
|
|
|
else:
|
|
|
|
log.info("Font 0 has no local subrs.")
|
|
|
|
log.info("FONT 0 CharStrings: %s, %s." % (str(len(newcs)), str(len(newcsi))))
|
|
|
|
baseIndex = len(newcsi)
|
|
|
|
for i, table in enumerate(tables[1:]):
|
|
|
|
font = table.cff[0]
|
|
|
|
font.Private = private
|
|
|
|
for name in font.strings.strings:
|
|
|
|
if name in font.getGlyphOrder():
|
|
|
|
glyphOrderStrings.append(name)
|
|
|
|
cs = font.CharStrings
|
|
|
|
gs = table.cff.GlobalSubrs
|
|
|
|
log.info("Font %s global subrs: %s." % (str(i+1), str(len(gs))))
|
|
|
|
ls = None
|
|
|
|
try:
|
|
|
|
ls = font.Private.Subrs
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
if ls is not None:
|
|
|
|
log.info("Font %s global subrs: %s." % (str(i+1), str(len(ls))))
|
|
|
|
else:
|
|
|
|
log.info("Font %s has no local subrs." % str(i+1))
|
|
|
|
log.info("Font %s CharStrings: %s, %s." % (str(i+1), str(len(cs)), str(len(cs.charStringsIndex))))
|
|
|
|
if hasattr(font, "FDSelect"):
|
|
|
|
sel = font.FDSelect
|
|
|
|
log.info("HAS FDSelect %s." % str(sel))
|
|
|
|
else:
|
|
|
|
log.info("HAS NO FDSelect.")
|
|
|
|
numCharsExcludingNotDef = len(cs.charStringsIndex)-1
|
|
|
|
newcsi.items.extend([None] * numCharsExcludingNotDef)
|
|
|
|
j = baseIndex
|
|
|
|
for name,i in cs.charStrings.items():
|
|
|
|
if name in ['.notdef']:
|
|
|
|
continue
|
|
|
|
chrset.append(name)
|
|
|
|
newcs.charStrings[name] = j
|
|
|
|
ch = cs.charStringsIndex[i]
|
|
|
|
newcsi[j] = ch
|
|
|
|
j += 1
|
|
|
|
baseIndex = j
|
|
|
|
|
|
|
|
newfont.charset = chrset
|
|
|
|
newfont.numGlyphs = len(chrset)
|
|
|
|
newfont.strings.strings = glyphOrderStrings + storedNamesStrings
|
|
|
|
|
|
|
|
return newcff
|
|
|
|
|
2013-09-19 20:37:01 -04:00
|
|
|
@_add_method(ttLib.getTableClass('cmap'))
|
2013-12-19 04:45:17 -05:00
|
|
|
def merge(self, m, tables):
|
2013-09-19 19:43:17 -04:00
|
|
|
# TODO Handle format=14.
|
2016-07-13 15:00:36 -06:00
|
|
|
# Only merges 4/3/1 and 12/3/10 subtables, ignores all other subtables
|
|
|
|
# If there is a format 12 table for the same font, ignore the format 4 table
|
|
|
|
cmapTables = []
|
|
|
|
for fontIdx,table in enumerate(tables):
|
|
|
|
format4 = None
|
|
|
|
format12 = None
|
|
|
|
for subtable in table.tables:
|
|
|
|
properties = (subtable.format, subtable.platformID, subtable.platEncID)
|
|
|
|
if properties == (4,3,1):
|
|
|
|
format4 = subtable
|
|
|
|
elif properties == (12,3,10):
|
|
|
|
format12 = subtable
|
|
|
|
if format12 is not None:
|
|
|
|
cmapTables.append((format12, fontIdx))
|
|
|
|
elif format4 is not None:
|
|
|
|
cmapTables.append((format4, fontIdx))
|
|
|
|
|
|
|
|
# Build a unicode mapping, then decide which format is needed to store it.
|
|
|
|
cmap = {}
|
2018-02-18 14:38:57 -08:00
|
|
|
fontIndexForGlyph = {}
|
2018-02-18 15:37:07 -08:00
|
|
|
glyphSets = [None for f in m.fonts] if hasattr(m, 'fonts') else None
|
2014-03-28 17:41:01 -07:00
|
|
|
for table,fontIdx in cmapTables:
|
2016-07-13 15:00:36 -06:00
|
|
|
# handle duplicates
|
2014-03-28 17:41:01 -07:00
|
|
|
for uni,gid in table.cmap.items():
|
|
|
|
oldgid = cmap.get(uni, None)
|
|
|
|
if oldgid is None:
|
|
|
|
cmap[uni] = gid
|
2018-02-18 14:38:57 -08:00
|
|
|
fontIndexForGlyph[gid] = fontIdx
|
2014-03-28 17:41:01 -07:00
|
|
|
elif oldgid != gid:
|
|
|
|
# Char previously mapped to oldgid, now to gid.
|
|
|
|
# Record, to fix up in GSUB 'locl' later.
|
2018-02-18 14:38:57 -08:00
|
|
|
if m.duplicateGlyphsPerFont[fontIdx].get(oldgid) is None:
|
2018-02-18 15:37:07 -08:00
|
|
|
if glyphSets is not None:
|
|
|
|
oldFontIdx = fontIndexForGlyph[oldgid]
|
|
|
|
for idx in (fontIdx, oldFontIdx):
|
|
|
|
if glyphSets[idx] is None:
|
|
|
|
glyphSets[idx] = m.fonts[idx].getGlyphSet()
|
|
|
|
if _glyphsAreSame(glyphSets[oldFontIdx], glyphSets[fontIdx], oldgid, gid):
|
|
|
|
continue
|
|
|
|
m.duplicateGlyphsPerFont[fontIdx][oldgid] = gid
|
2018-02-18 14:38:57 -08:00
|
|
|
elif m.duplicateGlyphsPerFont[fontIdx][oldgid] != gid:
|
|
|
|
# Char previously mapped to oldgid but oldgid is already remapped to a different
|
|
|
|
# gid, because of another Unicode character.
|
2017-10-19 10:52:05 -07:00
|
|
|
# TODO: Try harder to do something about these.
|
2017-05-24 09:49:37 +01:00
|
|
|
log.warning("Dropped mapping from codepoint %#06X to glyphId '%s'", uni, gid)
|
2016-07-13 15:00:36 -06:00
|
|
|
|
|
|
|
cmapBmpOnly = {uni: gid for uni,gid in cmap.items() if uni <= 0xFFFF}
|
|
|
|
self.tables = []
|
|
|
|
module = ttLib.getTableModule('cmap')
|
|
|
|
if len(cmapBmpOnly) != len(cmap):
|
|
|
|
# format-12 required.
|
|
|
|
cmapTable = module.cmap_classes[12](12)
|
|
|
|
cmapTable.platformID = 3
|
|
|
|
cmapTable.platEncID = 10
|
|
|
|
cmapTable.language = 0
|
|
|
|
cmapTable.cmap = cmap
|
|
|
|
self.tables.append(cmapTable)
|
|
|
|
# always create format-4
|
|
|
|
cmapTable = module.cmap_classes[4](4)
|
|
|
|
cmapTable.platformID = 3
|
|
|
|
cmapTable.platEncID = 1
|
|
|
|
cmapTable.language = 0
|
|
|
|
cmapTable.cmap = cmapBmpOnly
|
|
|
|
# ordered by platform then encoding
|
|
|
|
self.tables.insert(0, cmapTable)
|
2013-09-19 19:43:17 -04:00
|
|
|
self.tableVersion = 0
|
|
|
|
self.numSubTables = len(self.tables)
|
2013-12-19 04:56:50 -05:00
|
|
|
return self
|
2013-09-19 19:43:17 -04:00
|
|
|
|
2013-09-19 21:22:54 -04:00
|
|
|
|
2017-09-15 00:24:07 -04:00
|
|
|
def mergeLookupLists(lst):
|
|
|
|
# TODO Do smarter merge.
|
|
|
|
return sumLists(lst)
|
|
|
|
|
|
|
|
def mergeFeatures(lst):
|
|
|
|
assert lst
|
|
|
|
self = otTables.Feature()
|
|
|
|
self.FeatureParams = None
|
|
|
|
self.LookupListIndex = mergeLookupLists([l.LookupListIndex for l in lst if l.LookupListIndex])
|
|
|
|
self.LookupCount = len(self.LookupListIndex)
|
|
|
|
return self
|
|
|
|
|
|
|
|
def mergeFeatureLists(lst):
|
|
|
|
d = {}
|
|
|
|
for l in lst:
|
|
|
|
for f in l:
|
|
|
|
tag = f.FeatureTag
|
|
|
|
if tag not in d:
|
|
|
|
d[tag] = []
|
|
|
|
d[tag].append(f.Feature)
|
|
|
|
ret = []
|
|
|
|
for tag in sorted(d.keys()):
|
|
|
|
rec = otTables.FeatureRecord()
|
|
|
|
rec.FeatureTag = tag
|
|
|
|
rec.Feature = mergeFeatures(d[tag])
|
|
|
|
ret.append(rec)
|
|
|
|
return ret
|
|
|
|
|
|
|
|
def mergeLangSyses(lst):
|
|
|
|
assert lst
|
|
|
|
|
|
|
|
# TODO Support merging ReqFeatureIndex
|
|
|
|
assert all(l.ReqFeatureIndex == 0xFFFF for l in lst)
|
|
|
|
|
|
|
|
self = otTables.LangSys()
|
|
|
|
self.LookupOrder = None
|
|
|
|
self.ReqFeatureIndex = 0xFFFF
|
|
|
|
self.FeatureIndex = mergeFeatureLists([l.FeatureIndex for l in lst if l.FeatureIndex])
|
|
|
|
self.FeatureCount = len(self.FeatureIndex)
|
|
|
|
return self
|
|
|
|
|
|
|
|
def mergeScripts(lst):
|
|
|
|
assert lst
|
|
|
|
|
|
|
|
if len(lst) == 1:
|
|
|
|
return lst[0]
|
2018-02-14 12:22:42 +00:00
|
|
|
langSyses = {}
|
|
|
|
for sr in lst:
|
|
|
|
for lsr in sr.LangSysRecord:
|
|
|
|
if lsr.LangSysTag not in langSyses:
|
|
|
|
langSyses[lsr.LangSysTag] = []
|
|
|
|
langSyses[lsr.LangSysTag].append(lsr.LangSys)
|
|
|
|
lsrecords = []
|
|
|
|
for tag, langSys_list in sorted(langSyses.items()):
|
|
|
|
lsr = otTables.LangSysRecord()
|
|
|
|
lsr.LangSys = mergeLangSyses(langSys_list)
|
|
|
|
lsr.LangSysTag = tag
|
|
|
|
lsrecords.append(lsr)
|
2017-09-15 00:24:07 -04:00
|
|
|
|
|
|
|
self = otTables.Script()
|
2018-02-14 12:22:42 +00:00
|
|
|
self.LangSysRecord = lsrecords
|
|
|
|
self.LangSysCount = len(lsrecords)
|
2017-09-15 00:24:07 -04:00
|
|
|
self.DefaultLangSys = mergeLangSyses([s.DefaultLangSys for s in lst if s.DefaultLangSys])
|
|
|
|
return self
|
|
|
|
|
|
|
|
def mergeScriptRecords(lst):
|
|
|
|
d = {}
|
|
|
|
for l in lst:
|
|
|
|
for s in l:
|
|
|
|
tag = s.ScriptTag
|
|
|
|
if tag not in d:
|
|
|
|
d[tag] = []
|
|
|
|
d[tag].append(s.Script)
|
|
|
|
ret = []
|
|
|
|
for tag in sorted(d.keys()):
|
|
|
|
rec = otTables.ScriptRecord()
|
|
|
|
rec.ScriptTag = tag
|
|
|
|
rec.Script = mergeScripts(d[tag])
|
|
|
|
ret.append(rec)
|
|
|
|
return ret
|
|
|
|
|
2013-12-19 11:53:47 -05:00
|
|
|
otTables.ScriptList.mergeMap = {
|
2017-09-15 00:24:07 -04:00
|
|
|
'ScriptCount': lambda lst: None, # TODO
|
|
|
|
'ScriptRecord': mergeScriptRecords,
|
2013-12-19 11:53:47 -05:00
|
|
|
}
|
2014-07-03 14:01:44 -04:00
|
|
|
otTables.BaseScriptList.mergeMap = {
|
2017-09-15 00:24:07 -04:00
|
|
|
'BaseScriptCount': lambda lst: None, # TODO
|
2017-03-22 15:03:52 -07:00
|
|
|
# TODO: Merge duplicate entries
|
2014-07-03 14:01:44 -04:00
|
|
|
'BaseScriptRecord': lambda lst: sorted(sumLists(lst), key=lambda s: s.BaseScriptTag),
|
|
|
|
}
|
2013-12-19 11:53:47 -05:00
|
|
|
|
|
|
|
otTables.FeatureList.mergeMap = {
|
|
|
|
'FeatureCount': sum,
|
2016-07-11 17:02:26 -07:00
|
|
|
'FeatureRecord': lambda lst: sorted(sumLists(lst), key=lambda s: s.FeatureTag),
|
2013-12-19 11:53:47 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
otTables.LookupList.mergeMap = {
|
|
|
|
'LookupCount': sum,
|
|
|
|
'Lookup': sumLists,
|
|
|
|
}
|
|
|
|
|
2013-12-19 05:58:06 -05:00
|
|
|
otTables.Coverage.mergeMap = {
|
2017-03-08 12:49:56 -08:00
|
|
|
'Format': min,
|
2013-12-19 05:58:06 -05:00
|
|
|
'glyphs': sumLists,
|
|
|
|
}
|
|
|
|
|
|
|
|
otTables.ClassDef.mergeMap = {
|
2017-03-08 12:49:56 -08:00
|
|
|
'Format': min,
|
2013-12-19 05:58:06 -05:00
|
|
|
'classDefs': sumDicts,
|
|
|
|
}
|
|
|
|
|
|
|
|
otTables.LigCaretList.mergeMap = {
|
|
|
|
'Coverage': mergeObjects,
|
|
|
|
'LigGlyphCount': sum,
|
|
|
|
'LigGlyph': sumLists,
|
|
|
|
}
|
2013-09-19 21:22:54 -04:00
|
|
|
|
2013-12-19 05:58:06 -05:00
|
|
|
otTables.AttachList.mergeMap = {
|
|
|
|
'Coverage': mergeObjects,
|
|
|
|
'GlyphCount': sum,
|
|
|
|
'AttachPoint': sumLists,
|
|
|
|
}
|
|
|
|
|
|
|
|
# XXX Renumber MarkFilterSets of lookups
|
|
|
|
otTables.MarkGlyphSetsDef.mergeMap = {
|
|
|
|
'MarkSetTableFormat': equal,
|
|
|
|
'MarkSetCount': sum,
|
|
|
|
'Coverage': sumLists,
|
|
|
|
}
|
|
|
|
|
2014-07-03 14:01:44 -04:00
|
|
|
otTables.Axis.mergeMap = {
|
2013-12-19 05:58:06 -05:00
|
|
|
'*': mergeObjects,
|
|
|
|
}
|
|
|
|
|
2014-07-03 14:01:44 -04:00
|
|
|
# XXX Fix BASE table merging
|
|
|
|
otTables.BaseTagList.mergeMap = {
|
|
|
|
'BaseTagCount': sum,
|
|
|
|
'BaselineTag': sumLists,
|
|
|
|
}
|
|
|
|
|
|
|
|
otTables.GDEF.mergeMap = \
|
|
|
|
otTables.GSUB.mergeMap = \
|
|
|
|
otTables.GPOS.mergeMap = \
|
|
|
|
otTables.BASE.mergeMap = \
|
|
|
|
otTables.JSTF.mergeMap = \
|
|
|
|
otTables.MATH.mergeMap = \
|
|
|
|
{
|
2013-12-19 11:53:47 -05:00
|
|
|
'*': mergeObjects,
|
2013-12-19 05:58:06 -05:00
|
|
|
'Version': max,
|
2013-12-19 11:53:47 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
ttLib.getTableClass('GDEF').mergeMap = \
|
|
|
|
ttLib.getTableClass('GSUB').mergeMap = \
|
|
|
|
ttLib.getTableClass('GPOS').mergeMap = \
|
|
|
|
ttLib.getTableClass('BASE').mergeMap = \
|
|
|
|
ttLib.getTableClass('JSTF').mergeMap = \
|
|
|
|
ttLib.getTableClass('MATH').mergeMap = \
|
|
|
|
{
|
2014-03-28 14:37:32 -07:00
|
|
|
'tableTag': onlyExisting(equal), # XXX clean me up
|
2013-12-19 05:58:06 -05:00
|
|
|
'table': mergeObjects,
|
|
|
|
}
|
2013-09-19 16:16:39 -04:00
|
|
|
|
2014-03-28 17:41:01 -07:00
|
|
|
@_add_method(ttLib.getTableClass('GSUB'))
|
|
|
|
def merge(self, m, tables):
|
|
|
|
|
|
|
|
assert len(tables) == len(m.duplicateGlyphsPerFont)
|
|
|
|
for i,(table,dups) in enumerate(zip(tables, m.duplicateGlyphsPerFont)):
|
|
|
|
if not dups: continue
|
2017-10-19 10:13:05 -07:00
|
|
|
assert (table is not None and table is not NotImplemented), "Have duplicates to resolve for font %d but no GSUB: %s" % (i + 1, dups)
|
2014-03-28 17:41:01 -07:00
|
|
|
synthFeature = None
|
|
|
|
synthLookup = None
|
|
|
|
for script in table.table.ScriptList.ScriptRecord:
|
|
|
|
if script.ScriptTag == 'DFLT': continue # XXX
|
|
|
|
for langsys in [script.Script.DefaultLangSys] + [l.LangSys for l in script.Script.LangSysRecord]:
|
2016-11-08 14:06:50 -08:00
|
|
|
if langsys is None: continue # XXX Create!
|
2017-09-14 21:35:43 -04:00
|
|
|
feature = [v for v in langsys.FeatureIndex if v.FeatureTag == 'locl']
|
2014-03-28 17:41:01 -07:00
|
|
|
assert len(feature) <= 1
|
|
|
|
if feature:
|
|
|
|
feature = feature[0]
|
|
|
|
else:
|
|
|
|
if not synthFeature:
|
|
|
|
synthFeature = otTables.FeatureRecord()
|
|
|
|
synthFeature.FeatureTag = 'locl'
|
|
|
|
f = synthFeature.Feature = otTables.Feature()
|
|
|
|
f.FeatureParams = None
|
|
|
|
f.LookupCount = 0
|
|
|
|
f.LookupListIndex = []
|
2017-09-14 21:35:43 -04:00
|
|
|
langsys.FeatureIndex.append(synthFeature)
|
|
|
|
langsys.FeatureIndex.sort(key=lambda v: v.FeatureTag)
|
2014-03-28 17:41:01 -07:00
|
|
|
table.table.FeatureList.FeatureRecord.append(synthFeature)
|
|
|
|
table.table.FeatureList.FeatureCount += 1
|
|
|
|
feature = synthFeature
|
|
|
|
|
|
|
|
if not synthLookup:
|
|
|
|
subtable = otTables.SingleSubst()
|
|
|
|
subtable.mapping = dups
|
|
|
|
synthLookup = otTables.Lookup()
|
|
|
|
synthLookup.LookupFlag = 0
|
|
|
|
synthLookup.LookupType = 1
|
|
|
|
synthLookup.SubTableCount = 1
|
|
|
|
synthLookup.SubTable = [subtable]
|
2018-03-15 20:18:30 +00:00
|
|
|
if table.table.LookupList is None:
|
|
|
|
# mtiLib uses None as default value for LookupList,
|
|
|
|
# while feaLib points to an empty array with count 0
|
|
|
|
# TODO: make them do the same
|
|
|
|
table.table.LookupList = otTables.LookupList()
|
|
|
|
table.table.LookupList.Lookup = []
|
|
|
|
table.table.LookupList.LookupCount = 0
|
2014-03-28 17:41:01 -07:00
|
|
|
table.table.LookupList.Lookup.append(synthLookup)
|
|
|
|
table.table.LookupList.LookupCount += 1
|
|
|
|
|
2017-09-14 21:35:43 -04:00
|
|
|
feature.Feature.LookupListIndex[:0] = [synthLookup]
|
2014-03-28 17:41:01 -07:00
|
|
|
feature.Feature.LookupCount += 1
|
|
|
|
|
|
|
|
DefaultTable.merge(self, m, tables)
|
|
|
|
return self
|
|
|
|
|
2014-02-10 18:14:37 -05:00
|
|
|
@_add_method(otTables.SingleSubst,
|
2015-08-09 00:33:50 -07:00
|
|
|
otTables.MultipleSubst,
|
|
|
|
otTables.AlternateSubst,
|
|
|
|
otTables.LigatureSubst,
|
|
|
|
otTables.ReverseChainSingleSubst,
|
|
|
|
otTables.SinglePos,
|
|
|
|
otTables.PairPos,
|
|
|
|
otTables.CursivePos,
|
|
|
|
otTables.MarkBasePos,
|
|
|
|
otTables.MarkLigPos,
|
|
|
|
otTables.MarkMarkPos)
|
2014-02-10 18:14:37 -05:00
|
|
|
def mapLookups(self, lookupMap):
|
2015-08-09 00:33:50 -07:00
|
|
|
pass
|
2014-02-10 18:14:37 -05:00
|
|
|
|
|
|
|
# Copied and trimmed down from subset.py
|
|
|
|
@_add_method(otTables.ContextSubst,
|
2015-08-09 00:33:50 -07:00
|
|
|
otTables.ChainContextSubst,
|
|
|
|
otTables.ContextPos,
|
|
|
|
otTables.ChainContextPos)
|
2014-10-07 17:35:42 -07:00
|
|
|
def __merge_classify_context(self):
|
2014-02-10 18:14:37 -05:00
|
|
|
|
2015-06-14 19:45:20 +01:00
|
|
|
class ContextHelper(object):
|
|
|
|
def __init__(self, klass, Format):
|
|
|
|
if klass.__name__.endswith('Subst'):
|
|
|
|
Typ = 'Sub'
|
|
|
|
Type = 'Subst'
|
|
|
|
else:
|
|
|
|
Typ = 'Pos'
|
|
|
|
Type = 'Pos'
|
|
|
|
if klass.__name__.startswith('Chain'):
|
|
|
|
Chain = 'Chain'
|
|
|
|
else:
|
|
|
|
Chain = ''
|
|
|
|
ChainTyp = Chain+Typ
|
|
|
|
|
|
|
|
self.Typ = Typ
|
|
|
|
self.Type = Type
|
|
|
|
self.Chain = Chain
|
|
|
|
self.ChainTyp = ChainTyp
|
|
|
|
|
|
|
|
self.LookupRecord = Type+'LookupRecord'
|
|
|
|
|
|
|
|
if Format == 1:
|
|
|
|
self.Rule = ChainTyp+'Rule'
|
|
|
|
self.RuleSet = ChainTyp+'RuleSet'
|
|
|
|
elif Format == 2:
|
|
|
|
self.Rule = ChainTyp+'ClassRule'
|
|
|
|
self.RuleSet = ChainTyp+'ClassSet'
|
|
|
|
|
|
|
|
if self.Format not in [1, 2, 3]:
|
|
|
|
return None # Don't shoot the messenger; let it go
|
|
|
|
if not hasattr(self.__class__, "__ContextHelpers"):
|
|
|
|
self.__class__.__ContextHelpers = {}
|
|
|
|
if self.Format not in self.__class__.__ContextHelpers:
|
|
|
|
helper = ContextHelper(self.__class__, self.Format)
|
|
|
|
self.__class__.__ContextHelpers[self.Format] = helper
|
|
|
|
return self.__class__.__ContextHelpers[self.Format]
|
2014-02-10 18:14:37 -05:00
|
|
|
|
|
|
|
|
|
|
|
@_add_method(otTables.ContextSubst,
|
2015-08-09 00:33:50 -07:00
|
|
|
otTables.ChainContextSubst,
|
|
|
|
otTables.ContextPos,
|
|
|
|
otTables.ChainContextPos)
|
2014-02-10 18:14:37 -05:00
|
|
|
def mapLookups(self, lookupMap):
|
2015-06-14 19:45:20 +01:00
|
|
|
c = self.__merge_classify_context()
|
|
|
|
|
|
|
|
if self.Format in [1, 2]:
|
|
|
|
for rs in getattr(self, c.RuleSet):
|
|
|
|
if not rs: continue
|
|
|
|
for r in getattr(rs, c.Rule):
|
|
|
|
if not r: continue
|
|
|
|
for ll in getattr(r, c.LookupRecord):
|
|
|
|
if not ll: continue
|
|
|
|
ll.LookupListIndex = lookupMap[ll.LookupListIndex]
|
|
|
|
elif self.Format == 3:
|
|
|
|
for ll in getattr(self, c.LookupRecord):
|
|
|
|
if not ll: continue
|
|
|
|
ll.LookupListIndex = lookupMap[ll.LookupListIndex]
|
|
|
|
else:
|
|
|
|
assert 0, "unknown format: %s" % self.Format
|
2014-02-10 18:14:37 -05:00
|
|
|
|
2014-07-03 13:46:23 -04:00
|
|
|
@_add_method(otTables.ExtensionSubst,
|
2015-08-09 00:33:50 -07:00
|
|
|
otTables.ExtensionPos)
|
2014-07-03 13:46:23 -04:00
|
|
|
def mapLookups(self, lookupMap):
|
2015-08-09 00:33:50 -07:00
|
|
|
if self.Format == 1:
|
|
|
|
self.ExtSubTable.mapLookups(lookupMap)
|
|
|
|
else:
|
|
|
|
assert 0, "unknown format: %s" % self.Format
|
2014-07-03 13:46:23 -04:00
|
|
|
|
2014-02-10 18:14:37 -05:00
|
|
|
@_add_method(otTables.Lookup)
|
|
|
|
def mapLookups(self, lookupMap):
|
|
|
|
for st in self.SubTable:
|
|
|
|
if not st: continue
|
|
|
|
st.mapLookups(lookupMap)
|
|
|
|
|
|
|
|
@_add_method(otTables.LookupList)
|
|
|
|
def mapLookups(self, lookupMap):
|
|
|
|
for l in self.Lookup:
|
|
|
|
if not l: continue
|
|
|
|
l.mapLookups(lookupMap)
|
|
|
|
|
2013-12-19 11:53:47 -05:00
|
|
|
@_add_method(otTables.Feature)
|
|
|
|
def mapLookups(self, lookupMap):
|
|
|
|
self.LookupListIndex = [lookupMap[i] for i in self.LookupListIndex]
|
|
|
|
|
|
|
|
@_add_method(otTables.FeatureList)
|
|
|
|
def mapLookups(self, lookupMap):
|
|
|
|
for f in self.FeatureRecord:
|
|
|
|
if not f or not f.Feature: continue
|
|
|
|
f.Feature.mapLookups(lookupMap)
|
|
|
|
|
|
|
|
@_add_method(otTables.DefaultLangSys,
|
2015-08-09 00:33:50 -07:00
|
|
|
otTables.LangSys)
|
2013-12-19 11:53:47 -05:00
|
|
|
def mapFeatures(self, featureMap):
|
|
|
|
self.FeatureIndex = [featureMap[i] for i in self.FeatureIndex]
|
|
|
|
if self.ReqFeatureIndex != 65535:
|
|
|
|
self.ReqFeatureIndex = featureMap[self.ReqFeatureIndex]
|
|
|
|
|
|
|
|
@_add_method(otTables.Script)
|
|
|
|
def mapFeatures(self, featureMap):
|
|
|
|
if self.DefaultLangSys:
|
|
|
|
self.DefaultLangSys.mapFeatures(featureMap)
|
|
|
|
for l in self.LangSysRecord:
|
|
|
|
if not l or not l.LangSys: continue
|
|
|
|
l.LangSys.mapFeatures(featureMap)
|
|
|
|
|
|
|
|
@_add_method(otTables.ScriptList)
|
2013-12-19 15:30:24 -05:00
|
|
|
def mapFeatures(self, featureMap):
|
2013-12-19 11:53:47 -05:00
|
|
|
for s in self.ScriptRecord:
|
|
|
|
if not s or not s.Script: continue
|
|
|
|
s.Script.mapFeatures(featureMap)
|
|
|
|
|
|
|
|
|
2013-09-19 16:16:39 -04:00
|
|
|
class Options(object):
|
|
|
|
|
2015-06-14 19:45:20 +01:00
|
|
|
class UnknownOptionError(Exception):
|
|
|
|
pass
|
|
|
|
|
|
|
|
def __init__(self, **kwargs):
|
|
|
|
|
2016-01-24 16:16:11 +00:00
|
|
|
self.verbose = False
|
|
|
|
self.timing = False
|
|
|
|
|
2015-06-14 19:45:20 +01:00
|
|
|
self.set(**kwargs)
|
|
|
|
|
|
|
|
def set(self, **kwargs):
|
|
|
|
for k,v in kwargs.items():
|
|
|
|
if not hasattr(self, k):
|
|
|
|
raise self.UnknownOptionError("Unknown option '%s'" % k)
|
|
|
|
setattr(self, k, v)
|
|
|
|
|
2017-09-14 21:30:59 -04:00
|
|
|
def parse_opts(self, argv, ignore_unknown=[]):
|
2015-06-14 19:45:20 +01:00
|
|
|
ret = []
|
|
|
|
opts = {}
|
|
|
|
for a in argv:
|
|
|
|
orig_a = a
|
|
|
|
if not a.startswith('--'):
|
|
|
|
ret.append(a)
|
|
|
|
continue
|
|
|
|
a = a[2:]
|
|
|
|
i = a.find('=')
|
|
|
|
op = '='
|
|
|
|
if i == -1:
|
|
|
|
if a.startswith("no-"):
|
|
|
|
k = a[3:]
|
|
|
|
v = False
|
|
|
|
else:
|
|
|
|
k = a
|
|
|
|
v = True
|
|
|
|
else:
|
|
|
|
k = a[:i]
|
|
|
|
if k[-1] in "-+":
|
|
|
|
op = k[-1]+'=' # Ops is '-=' or '+=' now.
|
|
|
|
k = k[:-1]
|
|
|
|
v = a[i+1:]
|
|
|
|
k = k.replace('-', '_')
|
|
|
|
if not hasattr(self, k):
|
|
|
|
if ignore_unknown is True or k in ignore_unknown:
|
|
|
|
ret.append(orig_a)
|
|
|
|
continue
|
|
|
|
else:
|
|
|
|
raise self.UnknownOptionError("Unknown option '%s'" % a)
|
|
|
|
|
|
|
|
ov = getattr(self, k)
|
|
|
|
if isinstance(ov, bool):
|
|
|
|
v = bool(v)
|
|
|
|
elif isinstance(ov, int):
|
|
|
|
v = int(v)
|
|
|
|
elif isinstance(ov, list):
|
|
|
|
vv = v.split(',')
|
|
|
|
if vv == ['']:
|
|
|
|
vv = []
|
|
|
|
vv = [int(x, 0) if len(x) and x[0] in "0123456789" else x for x in vv]
|
|
|
|
if op == '=':
|
|
|
|
v = vv
|
|
|
|
elif op == '+=':
|
|
|
|
v = ov
|
|
|
|
v.extend(vv)
|
|
|
|
elif op == '-=':
|
|
|
|
v = ov
|
|
|
|
for x in vv:
|
|
|
|
if x in v:
|
|
|
|
v.remove(x)
|
|
|
|
else:
|
|
|
|
assert 0
|
2013-09-19 16:16:39 -04:00
|
|
|
|
2015-06-14 19:45:20 +01:00
|
|
|
opts[k] = v
|
|
|
|
self.set(**opts)
|
2013-09-19 16:16:39 -04:00
|
|
|
|
2015-06-14 19:45:20 +01:00
|
|
|
return ret
|
2013-09-19 16:16:39 -04:00
|
|
|
|
2018-02-14 01:11:31 -08:00
|
|
|
class _AttendanceRecordingIdentityDict(object):
|
2017-09-15 00:54:07 -04:00
|
|
|
"""A dictionary-like object that records indices of items actually accessed
|
2018-02-14 01:11:31 -08:00
|
|
|
from a list."""
|
2017-09-14 21:35:43 -04:00
|
|
|
|
|
|
|
def __init__(self, lst):
|
|
|
|
self.l = lst
|
|
|
|
self.d = {id(v):i for i,v in enumerate(lst)}
|
2017-09-15 00:54:07 -04:00
|
|
|
self.s = set()
|
|
|
|
|
|
|
|
def __getitem__(self, v):
|
|
|
|
self.s.add(self.d[id(v)])
|
|
|
|
return v
|
|
|
|
|
2018-02-14 01:11:31 -08:00
|
|
|
class _GregariousIdentityDict(object):
|
2017-09-15 00:54:07 -04:00
|
|
|
"""A dictionary-like object that welcomes guests without reservations and
|
|
|
|
adds them to the end of the guest list."""
|
|
|
|
|
|
|
|
def __init__(self, lst):
|
|
|
|
self.l = lst
|
|
|
|
self.s = set(id(v) for v in lst)
|
2017-09-14 21:35:43 -04:00
|
|
|
|
|
|
|
def __getitem__(self, v):
|
2017-09-15 00:54:07 -04:00
|
|
|
if id(v) not in self.s:
|
|
|
|
self.s.add(id(v))
|
2017-09-14 21:35:43 -04:00
|
|
|
self.l.append(v)
|
2017-09-15 00:54:07 -04:00
|
|
|
return v
|
|
|
|
|
2018-02-14 01:11:31 -08:00
|
|
|
class _NonhashableDict(object):
|
|
|
|
"""A dictionary-like object mapping objects to values."""
|
2017-09-15 00:54:07 -04:00
|
|
|
|
2018-02-14 01:11:31 -08:00
|
|
|
def __init__(self, keys, values=None):
|
|
|
|
if values is None:
|
|
|
|
self.d = {id(v):i for i,v in enumerate(keys)}
|
|
|
|
else:
|
|
|
|
self.d = {id(k):v for k,v in zip(keys, values)}
|
2017-09-15 00:54:07 -04:00
|
|
|
|
2018-02-14 01:11:31 -08:00
|
|
|
def __getitem__(self, k):
|
|
|
|
return self.d[id(k)]
|
|
|
|
|
|
|
|
def __setitem__(self, k, v):
|
|
|
|
self.d[id(k)] = v
|
|
|
|
|
|
|
|
def __delitem__(self, k):
|
|
|
|
del self.d[id(k)]
|
2013-09-19 16:16:39 -04:00
|
|
|
|
2013-12-19 04:20:26 -05:00
|
|
|
class Merger(object):
|
2013-09-18 20:47:53 -04:00
|
|
|
|
2016-01-24 16:16:11 +00:00
|
|
|
def __init__(self, options=None):
|
2013-09-19 16:16:39 -04:00
|
|
|
|
|
|
|
if not options:
|
|
|
|
options = Options()
|
|
|
|
|
|
|
|
self.options = options
|
2013-09-18 20:47:53 -04:00
|
|
|
|
2013-09-19 16:16:39 -04:00
|
|
|
def merge(self, fontfiles):
|
2018-07-07 14:41:46 +02:00
|
|
|
# Take first input file sfntVersion
|
|
|
|
sfntVersion = ttLib.TTFont(fontfiles[0]).sfntVersion
|
|
|
|
mega = ttLib.TTFont(sfntVersion=sfntVersion)
|
2013-09-18 20:47:53 -04:00
|
|
|
|
|
|
|
#
|
|
|
|
# Settle on a mega glyph order.
|
|
|
|
#
|
2013-09-19 16:16:39 -04:00
|
|
|
fonts = [ttLib.TTFont(fontfile) for fontfile in fontfiles]
|
2018-07-07 14:41:46 +02:00
|
|
|
|
|
|
|
|
|
|
|
# Check if all fonts have CFF
|
|
|
|
allOTFs = all(cff is not None for cff in [font.get('CFF ') for font in fonts])
|
|
|
|
|
|
|
|
cffTables = []
|
|
|
|
if allOTFs:
|
|
|
|
for font in fonts:
|
2018-07-07 15:35:39 +02:00
|
|
|
cffTable = font.get('CFF ')
|
|
|
|
# Desubroutinize
|
|
|
|
cs = cffTable.cff[0].CharStrings
|
|
|
|
for g in cffTable.cff[0].charset:
|
|
|
|
c, _ = cs.getItemAndSelector(g)
|
|
|
|
c.decompile()
|
|
|
|
subrs = getattr(c.private, "Subrs", [])
|
|
|
|
decompiler = _DesubroutinizingT2Decompiler(subrs, c.globalSubrs)
|
|
|
|
decompiler.execute(c)
|
|
|
|
c.program = c._desubroutinized
|
|
|
|
|
|
|
|
|
|
|
|
cffTable.cff[0].Private.Subrs = cffLib.SubrsIndex()
|
|
|
|
cffTable.cff.GlobalSubrs = cffLib.GlobalSubrsIndex()
|
|
|
|
|
|
|
|
cffTables.append(cffTable)
|
2018-07-07 14:41:46 +02:00
|
|
|
|
2013-09-18 20:47:53 -04:00
|
|
|
glyphOrders = [font.getGlyphOrder() for font in fonts]
|
2018-07-07 14:41:46 +02:00
|
|
|
|
|
|
|
# Handle glyphOrder merging and cff glyphs renaming together
|
|
|
|
megaGlyphOrder, newcffTables = self._mergeGlyphOrders(glyphOrders, cffTables)
|
|
|
|
|
2013-09-18 20:47:53 -04:00
|
|
|
# Reload fonts and set new glyph names on them.
|
|
|
|
# TODO Is it necessary to reload font? I think it is. At least
|
|
|
|
# it's safer, in case tables were loaded to provide glyph names.
|
2013-09-19 16:16:39 -04:00
|
|
|
fonts = [ttLib.TTFont(fontfile) for fontfile in fontfiles]
|
2018-07-07 14:41:46 +02:00
|
|
|
|
|
|
|
if len(newcffTables) > 0:
|
|
|
|
for font, glyphOrder, cffTable in zip(fonts, glyphOrders, newcffTables):
|
|
|
|
font.setGlyphOrder(glyphOrder)
|
|
|
|
font['CFF '] = cffTable
|
|
|
|
else:
|
|
|
|
for font, glyphOrder in zip(fonts, glyphOrders):
|
|
|
|
font.setGlyphOrder(glyphOrder)
|
|
|
|
|
2013-09-18 20:47:53 -04:00
|
|
|
mega.setGlyphOrder(megaGlyphOrder)
|
|
|
|
|
2013-12-19 11:53:47 -05:00
|
|
|
for font in fonts:
|
|
|
|
self._preMerge(font)
|
|
|
|
|
2018-02-18 14:38:57 -08:00
|
|
|
self.fonts = fonts
|
2014-03-28 17:41:01 -07:00
|
|
|
self.duplicateGlyphsPerFont = [{} for f in fonts]
|
|
|
|
|
2013-12-18 17:14:26 -05:00
|
|
|
allTags = reduce(set.union, (list(font.keys()) for font in fonts), set())
|
2013-09-18 20:47:53 -04:00
|
|
|
allTags.remove('GlyphOrder')
|
2014-07-09 17:13:16 -04:00
|
|
|
|
2018-07-07 14:41:46 +02:00
|
|
|
# Make sure we process CFF before cmap before GSUB as we have a dependency there.
|
2014-07-09 17:13:16 -04:00
|
|
|
if 'GSUB' in allTags:
|
|
|
|
allTags.remove('GSUB')
|
|
|
|
allTags = ['GSUB'] + list(allTags)
|
|
|
|
if 'cmap' in allTags:
|
|
|
|
allTags.remove('cmap')
|
|
|
|
allTags = ['cmap'] + list(allTags)
|
2018-07-07 14:41:46 +02:00
|
|
|
if 'CFF ' in allTags:
|
|
|
|
allTags.remove('CFF ')
|
|
|
|
allTags = ['CFF '] + list(allTags)
|
2014-07-09 17:13:16 -04:00
|
|
|
|
2013-09-18 20:47:53 -04:00
|
|
|
for tag in allTags:
|
2016-01-24 16:16:11 +00:00
|
|
|
with timer("merge '%s'" % tag):
|
|
|
|
tables = [font.get(tag, NotImplemented) for font in fonts]
|
2013-09-19 16:16:39 -04:00
|
|
|
|
2016-07-13 15:00:36 -06:00
|
|
|
log.info("Merging '%s'.", tag)
|
2016-01-24 16:16:11 +00:00
|
|
|
clazz = ttLib.getTableClass(tag)
|
|
|
|
table = clazz(tag).merge(self, tables)
|
|
|
|
# XXX Clean this up and use: table = mergeObjects(tables)
|
2014-03-28 15:37:18 -07:00
|
|
|
|
2016-01-24 16:16:11 +00:00
|
|
|
if table is not NotImplemented and table is not False:
|
|
|
|
mega[tag] = table
|
|
|
|
log.info("Merged '%s'.", tag)
|
|
|
|
else:
|
|
|
|
log.info("Dropped '%s'.", tag)
|
2013-09-18 20:47:53 -04:00
|
|
|
|
2014-03-28 17:41:01 -07:00
|
|
|
del self.duplicateGlyphsPerFont
|
2018-02-18 14:38:57 -08:00
|
|
|
del self.fonts
|
2014-03-28 17:41:01 -07:00
|
|
|
|
2013-12-19 11:53:47 -05:00
|
|
|
self._postMerge(mega)
|
|
|
|
|
2018-07-07 14:41:46 +02:00
|
|
|
# Compress CFF table
|
2018-07-07 15:35:39 +02:00
|
|
|
# if mega.get('CFF '):
|
|
|
|
# tempFile = tempfile.NamedTemporaryFile(suffix='.otf', delete=False)
|
|
|
|
# mega.save(tempFile.name)
|
|
|
|
# f = ttLib.TTFont(tempFile.name)
|
|
|
|
# compreffor.compress(f)
|
|
|
|
# mega = f
|
|
|
|
# os.remove(tempFile.name)
|
|
|
|
# log.info("FINAL final global subrs: %s." % len(mega.get('CFF ').cff.GlobalSubrs))
|
|
|
|
# ls = None
|
|
|
|
# try:
|
|
|
|
# ls = mega.get('CFF ').cff[0].Private.Subrs
|
|
|
|
# except:
|
|
|
|
# pass
|
|
|
|
# if ls is not None:
|
|
|
|
# log.info("FINAL font local subrs: %s." % len(ls))
|
|
|
|
# else:
|
|
|
|
# log.info("FINAL font has no local subrs.")
|
2018-07-07 14:41:46 +02:00
|
|
|
|
2013-09-18 20:47:53 -04:00
|
|
|
return mega
|
|
|
|
|
2018-07-07 14:41:46 +02:00
|
|
|
def _mergeGlyphOrders(self, glyphOrders, cffTables):
|
2013-09-20 16:25:48 -04:00
|
|
|
"""Modifies passed-in glyphOrders to reflect new glyph names.
|
|
|
|
Returns glyphOrder for the merged font."""
|
2013-09-18 20:47:53 -04:00
|
|
|
# Simply append font index to the glyph name for now.
|
2013-09-20 16:25:48 -04:00
|
|
|
# TODO Even this simplistic numbering can result in conflicts.
|
|
|
|
# But then again, we have to improve this soon anyway.
|
2018-07-07 14:41:46 +02:00
|
|
|
|
|
|
|
newcffTables = []
|
|
|
|
# If we deal with CFF tables,
|
|
|
|
# rename topDictIndex glyph names
|
|
|
|
for n,cffTable in enumerate(cffTables):
|
|
|
|
td = cffTable.cff.topDictIndex[0]
|
|
|
|
d = {}
|
|
|
|
for i, (glyphName, v) in enumerate(td.CharStrings.charStrings.items()):
|
|
|
|
if glyphName not in ['.notdef']:
|
|
|
|
glyphName += "#" + repr(n)
|
|
|
|
elif n > 0:
|
|
|
|
continue
|
|
|
|
d[glyphName] = v
|
|
|
|
cffTable.cff.topDictIndex[0].CharStrings.charStrings = d
|
|
|
|
newcffTables.append(cffTable)
|
|
|
|
|
2013-09-18 20:47:53 -04:00
|
|
|
mega = []
|
2018-07-07 14:41:46 +02:00
|
|
|
|
|
|
|
# If we deal with CFF tables,
|
|
|
|
# make sure to keep .notdef as first glyph,
|
|
|
|
# we don't rename it, and we skip any additional .notdef
|
|
|
|
if len(cffTables) > 0:
|
|
|
|
for n,glyphOrder in enumerate(glyphOrders):
|
|
|
|
for i,glyphName in enumerate(glyphOrder):
|
|
|
|
if glyphName not in ['.notdef']:
|
|
|
|
glyphName += "#" + repr(n)
|
|
|
|
elif n > 0:
|
|
|
|
continue
|
|
|
|
glyphOrder[i] = glyphName
|
|
|
|
mega.append(glyphName)
|
|
|
|
|
|
|
|
# TTF fonts
|
|
|
|
else:
|
|
|
|
for n,glyphOrder in enumerate(glyphOrders):
|
|
|
|
for i,glyphName in enumerate(glyphOrder):
|
|
|
|
glyphName += "#" + repr(n)
|
|
|
|
glyphOrder[i] = glyphName
|
|
|
|
mega.append(glyphName)
|
|
|
|
|
|
|
|
return mega, newcffTables
|
|
|
|
|
2013-09-18 20:47:53 -04:00
|
|
|
|
2013-12-19 04:47:34 -05:00
|
|
|
def mergeObjects(self, returnTable, logic, tables):
|
2013-12-19 04:56:50 -05:00
|
|
|
# Right now we don't use self at all. Will use in the future
|
|
|
|
# for options and logging.
|
|
|
|
|
|
|
|
allKeys = set.union(set(), *(vars(table).keys() for table in tables if table is not NotImplemented))
|
2013-12-18 00:45:12 -08:00
|
|
|
for key in allKeys:
|
2013-12-18 12:15:46 -08:00
|
|
|
try:
|
2013-12-19 04:47:34 -05:00
|
|
|
mergeLogic = logic[key]
|
2013-12-18 12:15:46 -08:00
|
|
|
except KeyError:
|
2013-12-19 04:20:26 -05:00
|
|
|
try:
|
2013-12-19 04:47:34 -05:00
|
|
|
mergeLogic = logic['*']
|
2013-12-19 04:20:26 -05:00
|
|
|
except KeyError:
|
2015-04-26 02:01:01 -04:00
|
|
|
raise Exception("Don't know how to merge key %s of class %s" %
|
2013-12-19 04:47:34 -05:00
|
|
|
(key, returnTable.__class__.__name__))
|
2013-12-19 04:56:50 -05:00
|
|
|
if mergeLogic is NotImplemented:
|
2013-12-18 12:15:46 -08:00
|
|
|
continue
|
2013-12-19 04:56:50 -05:00
|
|
|
value = mergeLogic(getattr(table, key, NotImplemented) for table in tables)
|
|
|
|
if value is not NotImplemented:
|
|
|
|
setattr(returnTable, key, value)
|
|
|
|
|
|
|
|
return returnTable
|
2013-12-18 00:45:12 -08:00
|
|
|
|
2013-12-19 11:53:47 -05:00
|
|
|
def _preMerge(self, font):
|
|
|
|
|
2014-03-28 17:41:01 -07:00
|
|
|
# Map indices to references
|
|
|
|
|
2013-12-19 11:53:47 -05:00
|
|
|
GDEF = font.get('GDEF')
|
|
|
|
GSUB = font.get('GSUB')
|
|
|
|
GPOS = font.get('GPOS')
|
|
|
|
|
|
|
|
for t in [GSUB, GPOS]:
|
2013-12-19 15:30:24 -05:00
|
|
|
if not t: continue
|
|
|
|
|
2014-02-10 18:14:37 -05:00
|
|
|
if t.table.LookupList:
|
2017-09-14 21:35:43 -04:00
|
|
|
lookupMap = {i:v for i,v in enumerate(t.table.LookupList.Lookup)}
|
2014-02-10 18:14:37 -05:00
|
|
|
t.table.LookupList.mapLookups(lookupMap)
|
2017-09-15 00:54:07 -04:00
|
|
|
t.table.FeatureList.mapLookups(lookupMap)
|
2013-12-19 11:53:47 -05:00
|
|
|
|
2013-12-19 15:30:24 -05:00
|
|
|
if t.table.FeatureList and t.table.ScriptList:
|
2017-09-14 21:35:43 -04:00
|
|
|
featureMap = {i:v for i,v in enumerate(t.table.FeatureList.FeatureRecord)}
|
2013-12-19 15:30:24 -05:00
|
|
|
t.table.ScriptList.mapFeatures(featureMap)
|
|
|
|
|
|
|
|
# TODO GDEF/Lookup MarkFilteringSets
|
2013-12-19 11:53:47 -05:00
|
|
|
# TODO FeatureParams nameIDs
|
|
|
|
|
|
|
|
def _postMerge(self, font):
|
2013-12-19 15:30:24 -05:00
|
|
|
|
2014-03-28 17:41:01 -07:00
|
|
|
# Map references back to indices
|
|
|
|
|
2013-12-19 15:30:24 -05:00
|
|
|
GDEF = font.get('GDEF')
|
|
|
|
GSUB = font.get('GSUB')
|
|
|
|
GPOS = font.get('GPOS')
|
|
|
|
|
|
|
|
for t in [GSUB, GPOS]:
|
|
|
|
if not t: continue
|
|
|
|
|
2017-09-15 00:24:07 -04:00
|
|
|
if t.table.FeatureList and t.table.ScriptList:
|
2017-09-15 00:54:07 -04:00
|
|
|
|
|
|
|
# Collect unregistered (new) features.
|
2018-02-14 01:11:31 -08:00
|
|
|
featureMap = _GregariousIdentityDict(t.table.FeatureList.FeatureRecord)
|
2017-09-15 00:24:07 -04:00
|
|
|
t.table.ScriptList.mapFeatures(featureMap)
|
2017-09-15 00:54:07 -04:00
|
|
|
|
|
|
|
# Record used features.
|
|
|
|
featureMap = _AttendanceRecordingIdentityDict(t.table.FeatureList.FeatureRecord)
|
|
|
|
t.table.ScriptList.mapFeatures(featureMap)
|
|
|
|
usedIndices = featureMap.s
|
|
|
|
|
|
|
|
# Remove unused features
|
|
|
|
t.table.FeatureList.FeatureRecord = [f for i,f in enumerate(t.table.FeatureList.FeatureRecord) if i in usedIndices]
|
|
|
|
|
|
|
|
# Map back to indices.
|
|
|
|
featureMap = _NonhashableDict(t.table.FeatureList.FeatureRecord)
|
|
|
|
t.table.ScriptList.mapFeatures(featureMap)
|
|
|
|
|
2017-09-15 00:24:07 -04:00
|
|
|
t.table.FeatureList.FeatureCount = len(t.table.FeatureList.FeatureRecord)
|
|
|
|
|
2014-02-10 18:14:37 -05:00
|
|
|
if t.table.LookupList:
|
2017-09-15 00:54:07 -04:00
|
|
|
|
|
|
|
# Collect unregistered (new) lookups.
|
2018-02-14 01:11:31 -08:00
|
|
|
lookupMap = _GregariousIdentityDict(t.table.LookupList.Lookup)
|
2017-09-15 00:54:07 -04:00
|
|
|
t.table.FeatureList.mapLookups(lookupMap)
|
|
|
|
t.table.LookupList.mapLookups(lookupMap)
|
|
|
|
|
|
|
|
# Record used lookups.
|
|
|
|
lookupMap = _AttendanceRecordingIdentityDict(t.table.LookupList.Lookup)
|
|
|
|
t.table.FeatureList.mapLookups(lookupMap)
|
2017-09-15 00:24:07 -04:00
|
|
|
t.table.LookupList.mapLookups(lookupMap)
|
2017-09-15 00:54:07 -04:00
|
|
|
usedIndices = lookupMap.s
|
|
|
|
|
|
|
|
# Remove unused lookups
|
|
|
|
t.table.LookupList.Lookup = [l for i,l in enumerate(t.table.LookupList.Lookup) if i in usedIndices]
|
|
|
|
|
|
|
|
# Map back to indices.
|
|
|
|
lookupMap = _NonhashableDict(t.table.LookupList.Lookup)
|
|
|
|
t.table.FeatureList.mapLookups(lookupMap)
|
|
|
|
t.table.LookupList.mapLookups(lookupMap)
|
|
|
|
|
2017-09-14 21:35:43 -04:00
|
|
|
t.table.LookupList.LookupCount = len(t.table.LookupList.Lookup)
|
2013-12-19 15:30:24 -05:00
|
|
|
|
|
|
|
# TODO GDEF/Lookup MarkFilteringSets
|
|
|
|
# TODO FeatureParams nameIDs
|
2013-12-19 11:53:47 -05:00
|
|
|
|
2013-09-19 16:16:39 -04:00
|
|
|
|
|
|
|
__all__ = [
|
2015-08-09 00:33:50 -07:00
|
|
|
'Options',
|
|
|
|
'Merger',
|
|
|
|
'main'
|
2013-09-19 16:16:39 -04:00
|
|
|
]
|
|
|
|
|
2016-01-24 16:16:11 +00:00
|
|
|
@timer("make one with everything (TOTAL TIME)")
|
2015-05-20 11:02:43 +01:00
|
|
|
def main(args=None):
|
2016-01-24 16:16:11 +00:00
|
|
|
from fontTools import configLogger
|
2015-05-20 11:02:43 +01:00
|
|
|
|
|
|
|
if args is None:
|
|
|
|
args = sys.argv[1:]
|
2013-09-19 16:16:39 -04:00
|
|
|
|
|
|
|
options = Options()
|
|
|
|
args = options.parse_opts(args)
|
|
|
|
|
2013-09-18 20:47:53 -04:00
|
|
|
if len(args) < 1:
|
2013-12-18 17:14:26 -05:00
|
|
|
print("usage: pyftmerge font...", file=sys.stderr)
|
2017-01-11 12:10:58 +00:00
|
|
|
return 1
|
2013-09-19 16:16:39 -04:00
|
|
|
|
2016-01-24 16:16:11 +00:00
|
|
|
configLogger(level=logging.INFO if options.verbose else logging.WARNING)
|
|
|
|
if options.timing:
|
|
|
|
timer.logger.setLevel(logging.DEBUG)
|
|
|
|
else:
|
|
|
|
timer.logger.disabled = True
|
|
|
|
|
|
|
|
merger = Merger(options=options)
|
2013-09-19 16:16:39 -04:00
|
|
|
font = merger.merge(args)
|
2013-09-18 20:47:53 -04:00
|
|
|
outfile = 'merged.ttf'
|
2016-01-24 16:16:11 +00:00
|
|
|
with timer("compile and save font"):
|
|
|
|
font.save(outfile)
|
2013-09-19 20:12:56 -04:00
|
|
|
|
2013-09-18 20:47:53 -04:00
|
|
|
|
|
|
|
if __name__ == "__main__":
|
2017-01-11 12:10:58 +00:00
|
|
|
sys.exit(main())
|