670 lines
21 KiB
Python
670 lines
21 KiB
Python
from fontTools.misc import psCharStrings
|
|
from fontTools import ttLib
|
|
from fontTools.pens.basePen import NullPen
|
|
from fontTools.pens.boundsPen import BoundsPen
|
|
from fontTools.misc.fixedTools import otRound
|
|
from fontTools.varLib.varStore import VarStoreInstancer
|
|
|
|
def _add_method(*clazzes):
|
|
"""Returns a decorator function that adds a new method to one or
|
|
more classes."""
|
|
def wrapper(method):
|
|
done = []
|
|
for clazz in clazzes:
|
|
if clazz in done: continue # Support multiple names of a clazz
|
|
done.append(clazz)
|
|
assert clazz.__name__ != 'DefaultTable', \
|
|
'Oops, table class not found.'
|
|
assert not hasattr(clazz, method.__name__), \
|
|
"Oops, class '%s' has method '%s'." % (clazz.__name__,
|
|
method.__name__)
|
|
setattr(clazz, method.__name__, method)
|
|
return None
|
|
return wrapper
|
|
|
|
def _uniq_sort(l):
|
|
return sorted(set(l))
|
|
|
|
class _ClosureGlyphsT2Decompiler(psCharStrings.SimpleT2Decompiler):
|
|
|
|
def __init__(self, components, localSubrs, globalSubrs):
|
|
psCharStrings.SimpleT2Decompiler.__init__(self,
|
|
localSubrs,
|
|
globalSubrs)
|
|
self.components = components
|
|
|
|
def op_endchar(self, index):
|
|
args = self.popall()
|
|
if len(args) >= 4:
|
|
from fontTools.encodings.StandardEncoding import StandardEncoding
|
|
# endchar can do seac accent bulding; The T2 spec says it's deprecated,
|
|
# but recent software that shall remain nameless does output it.
|
|
adx, ady, bchar, achar = args[-4:]
|
|
baseGlyph = StandardEncoding[bchar]
|
|
accentGlyph = StandardEncoding[achar]
|
|
self.components.add(baseGlyph)
|
|
self.components.add(accentGlyph)
|
|
|
|
@_add_method(ttLib.getTableClass('CFF '))
|
|
def closure_glyphs(self, s):
|
|
cff = self.cff
|
|
assert len(cff) == 1
|
|
font = cff[cff.keys()[0]]
|
|
glyphSet = font.CharStrings
|
|
|
|
decompose = s.glyphs
|
|
while decompose:
|
|
components = set()
|
|
for g in decompose:
|
|
if g not in glyphSet:
|
|
continue
|
|
gl = glyphSet[g]
|
|
|
|
subrs = getattr(gl.private, "Subrs", [])
|
|
decompiler = _ClosureGlyphsT2Decompiler(components, subrs, gl.globalSubrs)
|
|
decompiler.execute(gl)
|
|
components -= s.glyphs
|
|
s.glyphs.update(components)
|
|
decompose = components
|
|
|
|
@_add_method(ttLib.getTableClass('CFF '))
|
|
def prune_pre_subset(self, font, options):
|
|
cff = self.cff
|
|
# CFF table must have one font only
|
|
cff.fontNames = cff.fontNames[:1]
|
|
|
|
if options.notdef_glyph and not options.notdef_outline:
|
|
for fontname in cff.keys():
|
|
font = cff[fontname]
|
|
c, fdSelectIndex = font.CharStrings.getItemAndSelector('.notdef')
|
|
if hasattr(font, 'FDArray') and font.FDArray is not None:
|
|
private = font.FDArray[fdSelectIndex].Private
|
|
else:
|
|
private = font.Private
|
|
dfltWdX = private.defaultWidthX
|
|
nmnlWdX = private.nominalWidthX
|
|
pen = NullPen()
|
|
c.draw(pen) # this will set the charstring's width
|
|
if c.width != dfltWdX:
|
|
c.program = [c.width - nmnlWdX, 'endchar']
|
|
else:
|
|
c.program = ['endchar']
|
|
|
|
# Clear useless Encoding
|
|
for fontname in cff.keys():
|
|
font = cff[fontname]
|
|
# https://github.com/behdad/fonttools/issues/620
|
|
font.Encoding = "StandardEncoding"
|
|
|
|
return True # bool(cff.fontNames)
|
|
|
|
@_add_method(ttLib.getTableClass('CFF '))
|
|
def subset_glyphs(self, s):
|
|
cff = self.cff
|
|
for fontname in cff.keys():
|
|
font = cff[fontname]
|
|
cs = font.CharStrings
|
|
|
|
# Load all glyphs
|
|
for g in font.charset:
|
|
if g not in s.glyphs: continue
|
|
c, _ = cs.getItemAndSelector(g)
|
|
|
|
if cs.charStringsAreIndexed:
|
|
indices = [i for i,g in enumerate(font.charset) if g in s.glyphs]
|
|
csi = cs.charStringsIndex
|
|
csi.items = [csi.items[i] for i in indices]
|
|
del csi.file, csi.offsets
|
|
if hasattr(font, "FDSelect"):
|
|
sel = font.FDSelect
|
|
# XXX We want to set sel.format to None, such that the
|
|
# most compact format is selected. However, OTS was
|
|
# broken and couldn't parse a FDSelect format 0 that
|
|
# happened before CharStrings. As such, always force
|
|
# format 3 until we fix cffLib to always generate
|
|
# FDSelect after CharStrings.
|
|
# https://github.com/khaledhosny/ots/pull/31
|
|
#sel.format = None
|
|
sel.format = 3
|
|
sel.gidArray = [sel.gidArray[i] for i in indices]
|
|
cs.charStrings = {g:indices.index(v)
|
|
for g,v in cs.charStrings.items()
|
|
if g in s.glyphs}
|
|
else:
|
|
cs.charStrings = {g:v
|
|
for g,v in cs.charStrings.items()
|
|
if g in s.glyphs}
|
|
font.charset = [g for g in font.charset if g in s.glyphs]
|
|
font.numGlyphs = len(font.charset)
|
|
|
|
return True # any(cff[fontname].numGlyphs for fontname in cff.keys())
|
|
|
|
@_add_method(psCharStrings.T2CharString)
|
|
def subset_subroutines(self, subrs, gsubrs):
|
|
p = self.program
|
|
assert len(p)
|
|
for i in range(1, len(p)):
|
|
if p[i] == 'callsubr':
|
|
assert isinstance(p[i-1], int)
|
|
p[i-1] = subrs._used.index(p[i-1] + subrs._old_bias) - subrs._new_bias
|
|
elif p[i] == 'callgsubr':
|
|
assert isinstance(p[i-1], int)
|
|
p[i-1] = gsubrs._used.index(p[i-1] + gsubrs._old_bias) - gsubrs._new_bias
|
|
|
|
@_add_method(psCharStrings.T2CharString)
|
|
def drop_hints(self):
|
|
hints = self._hints
|
|
|
|
if hints.deletions:
|
|
p = self.program
|
|
for idx in reversed(hints.deletions):
|
|
del p[idx-2:idx]
|
|
|
|
if hints.has_hint:
|
|
assert not hints.deletions or hints.last_hint <= hints.deletions[0]
|
|
self.program = self.program[hints.last_hint:]
|
|
if not self.program:
|
|
# TODO CFF2 no need for endchar.
|
|
self.program.append('endchar')
|
|
if hasattr(self, 'width'):
|
|
# Insert width back if needed
|
|
if self.width != self.private.defaultWidthX:
|
|
self.program.insert(0, self.width - self.private.nominalWidthX)
|
|
|
|
if hints.has_hintmask:
|
|
i = 0
|
|
p = self.program
|
|
while i < len(p):
|
|
if p[i] in ['hintmask', 'cntrmask']:
|
|
assert i + 1 <= len(p)
|
|
del p[i:i+2]
|
|
continue
|
|
i += 1
|
|
|
|
assert len(self.program)
|
|
|
|
del self._hints
|
|
|
|
class _MarkingT2Decompiler(psCharStrings.SimpleT2Decompiler):
|
|
|
|
def __init__(self, localSubrs, globalSubrs):
|
|
psCharStrings.SimpleT2Decompiler.__init__(self,
|
|
localSubrs,
|
|
globalSubrs)
|
|
for subrs in [localSubrs, globalSubrs]:
|
|
if subrs and not hasattr(subrs, "_used"):
|
|
subrs._used = set()
|
|
|
|
def op_callsubr(self, index):
|
|
self.localSubrs._used.add(self.operandStack[-1]+self.localBias)
|
|
psCharStrings.SimpleT2Decompiler.op_callsubr(self, index)
|
|
|
|
def op_callgsubr(self, index):
|
|
self.globalSubrs._used.add(self.operandStack[-1]+self.globalBias)
|
|
psCharStrings.SimpleT2Decompiler.op_callgsubr(self, index)
|
|
|
|
class _DehintingT2Decompiler(psCharStrings.T2WidthExtractor):
|
|
|
|
class Hints(object):
|
|
def __init__(self):
|
|
# Whether calling this charstring produces any hint stems
|
|
# Note that if a charstring starts with hintmask, it will
|
|
# have has_hint set to True, because it *might* produce an
|
|
# implicit vstem if called under certain conditions.
|
|
self.has_hint = False
|
|
# Index to start at to drop all hints
|
|
self.last_hint = 0
|
|
# Index up to which we know more hints are possible.
|
|
# Only relevant if status is 0 or 1.
|
|
self.last_checked = 0
|
|
# The status means:
|
|
# 0: after dropping hints, this charstring is empty
|
|
# 1: after dropping hints, there may be more hints
|
|
# continuing after this, or there might be
|
|
# other things. Not clear yet.
|
|
# 2: no more hints possible after this charstring
|
|
self.status = 0
|
|
# Has hintmask instructions; not recursive
|
|
self.has_hintmask = False
|
|
# List of indices of calls to empty subroutines to remove.
|
|
self.deletions = []
|
|
pass
|
|
|
|
def __init__(self, css, localSubrs, globalSubrs, nominalWidthX, defaultWidthX):
|
|
self._css = css
|
|
psCharStrings.T2WidthExtractor.__init__(
|
|
self, localSubrs, globalSubrs, nominalWidthX, defaultWidthX)
|
|
|
|
def execute(self, charString):
|
|
old_hints = charString._hints if hasattr(charString, '_hints') else None
|
|
charString._hints = self.Hints()
|
|
|
|
psCharStrings.T2WidthExtractor.execute(self, charString)
|
|
|
|
hints = charString._hints
|
|
|
|
if hints.has_hint or hints.has_hintmask:
|
|
self._css.add(charString)
|
|
|
|
if hints.status != 2:
|
|
# Check from last_check, make sure we didn't have any operators.
|
|
for i in range(hints.last_checked, len(charString.program) - 1):
|
|
if isinstance(charString.program[i], str):
|
|
hints.status = 2
|
|
break
|
|
else:
|
|
hints.status = 1 # There's *something* here
|
|
hints.last_checked = len(charString.program)
|
|
|
|
if old_hints:
|
|
assert hints.__dict__ == old_hints.__dict__
|
|
|
|
def op_callsubr(self, index):
|
|
subr = self.localSubrs[self.operandStack[-1]+self.localBias]
|
|
psCharStrings.T2WidthExtractor.op_callsubr(self, index)
|
|
self.processSubr(index, subr)
|
|
|
|
def op_callgsubr(self, index):
|
|
subr = self.globalSubrs[self.operandStack[-1]+self.globalBias]
|
|
psCharStrings.T2WidthExtractor.op_callgsubr(self, index)
|
|
self.processSubr(index, subr)
|
|
|
|
def op_hstem(self, index):
|
|
psCharStrings.T2WidthExtractor.op_hstem(self, index)
|
|
self.processHint(index)
|
|
def op_vstem(self, index):
|
|
psCharStrings.T2WidthExtractor.op_vstem(self, index)
|
|
self.processHint(index)
|
|
def op_hstemhm(self, index):
|
|
psCharStrings.T2WidthExtractor.op_hstemhm(self, index)
|
|
self.processHint(index)
|
|
def op_vstemhm(self, index):
|
|
psCharStrings.T2WidthExtractor.op_vstemhm(self, index)
|
|
self.processHint(index)
|
|
def op_hintmask(self, index):
|
|
rv = psCharStrings.T2WidthExtractor.op_hintmask(self, index)
|
|
self.processHintmask(index)
|
|
return rv
|
|
def op_cntrmask(self, index):
|
|
rv = psCharStrings.T2WidthExtractor.op_cntrmask(self, index)
|
|
self.processHintmask(index)
|
|
return rv
|
|
|
|
def processHintmask(self, index):
|
|
cs = self.callingStack[-1]
|
|
hints = cs._hints
|
|
hints.has_hintmask = True
|
|
if hints.status != 2:
|
|
# Check from last_check, see if we may be an implicit vstem
|
|
for i in range(hints.last_checked, index - 1):
|
|
if isinstance(cs.program[i], str):
|
|
hints.status = 2
|
|
break
|
|
else:
|
|
# We are an implicit vstem
|
|
hints.has_hint = True
|
|
hints.last_hint = index + 1
|
|
hints.status = 0
|
|
hints.last_checked = index + 1
|
|
|
|
def processHint(self, index):
|
|
cs = self.callingStack[-1]
|
|
hints = cs._hints
|
|
hints.has_hint = True
|
|
hints.last_hint = index
|
|
hints.last_checked = index
|
|
|
|
def processSubr(self, index, subr):
|
|
cs = self.callingStack[-1]
|
|
hints = cs._hints
|
|
subr_hints = subr._hints
|
|
|
|
# Check from last_check, make sure we didn't have
|
|
# any operators.
|
|
if hints.status != 2:
|
|
for i in range(hints.last_checked, index - 1):
|
|
if isinstance(cs.program[i], str):
|
|
hints.status = 2
|
|
break
|
|
hints.last_checked = index
|
|
|
|
if hints.status != 2:
|
|
if subr_hints.has_hint:
|
|
hints.has_hint = True
|
|
|
|
# Decide where to chop off from
|
|
if subr_hints.status == 0:
|
|
hints.last_hint = index
|
|
else:
|
|
hints.last_hint = index - 2 # Leave the subr call in
|
|
|
|
elif subr_hints.status == 0:
|
|
hints.deletions.append(index)
|
|
|
|
hints.status = max(hints.status, subr_hints.status)
|
|
|
|
class _DesubroutinizingT2Decompiler(psCharStrings.SimpleT2Decompiler):
|
|
|
|
def __init__(self, localSubrs, globalSubrs, private=None):
|
|
psCharStrings.SimpleT2Decompiler.__init__(self,
|
|
localSubrs,
|
|
globalSubrs, private)
|
|
|
|
def execute(self, charString):
|
|
if (hasattr(charString, '_desubroutinized') and
|
|
charString._desubroutinized):
|
|
return
|
|
|
|
charString._patches = []
|
|
psCharStrings.SimpleT2Decompiler.execute(self, charString)
|
|
desubroutinized = charString.program[:]
|
|
for idx,expansion in reversed (charString._patches):
|
|
assert idx >= 2
|
|
assert desubroutinized[idx - 1] in ['callsubr', 'callgsubr'], desubroutinized[idx - 1]
|
|
assert type(desubroutinized[idx - 2]) == int
|
|
if expansion[-1] == 'return':
|
|
expansion = expansion[:-1]
|
|
desubroutinized[idx-2:idx] = expansion
|
|
if not charString.isCFF2:
|
|
if 'endchar' in desubroutinized:
|
|
# Cut off after first endchar
|
|
desubroutinized = desubroutinized[:desubroutinized.index('endchar') + 1]
|
|
else:
|
|
if not len(desubroutinized) or desubroutinized[-1] != 'return':
|
|
desubroutinized.append('return')
|
|
|
|
charString._desubroutinized = desubroutinized
|
|
del charString._patches
|
|
|
|
def op_callsubr(self, index):
|
|
subr = self.localSubrs[self.operandStack[-1]+self.localBias]
|
|
psCharStrings.SimpleT2Decompiler.op_callsubr(self, index)
|
|
self.processSubr(index, subr)
|
|
|
|
def op_callgsubr(self, index):
|
|
subr = self.globalSubrs[self.operandStack[-1]+self.globalBias]
|
|
psCharStrings.SimpleT2Decompiler.op_callgsubr(self, index)
|
|
self.processSubr(index, subr)
|
|
|
|
def processSubr(self, index, subr):
|
|
cs = self.callingStack[-1]
|
|
cs._patches.append((index, subr._desubroutinized))
|
|
|
|
|
|
@_add_method(ttLib.getTableClass('CFF '))
|
|
def prune_post_subset(self, font, options):
|
|
cff = self.cff
|
|
for fontname in cff.keys():
|
|
font = cff[fontname]
|
|
cs = font.CharStrings
|
|
|
|
# Drop unused FontDictionaries
|
|
if hasattr(font, "FDSelect"):
|
|
sel = font.FDSelect
|
|
indices = _uniq_sort(sel.gidArray)
|
|
sel.gidArray = [indices.index (ss) for ss in sel.gidArray]
|
|
arr = font.FDArray
|
|
arr.items = [arr[i] for i in indices]
|
|
del arr.file, arr.offsets
|
|
|
|
# Desubroutinize if asked for
|
|
if options.desubroutinize:
|
|
for g in font.charset:
|
|
c, _ = cs.getItemAndSelector(g)
|
|
c.decompile()
|
|
subrs = getattr(c.private, "Subrs", [])
|
|
decompiler = _DesubroutinizingT2Decompiler(subrs, c.globalSubrs)
|
|
decompiler.execute(c)
|
|
c.program = c._desubroutinized
|
|
|
|
# Drop hints if not needed
|
|
if not options.hinting:
|
|
|
|
# This can be tricky, but doesn't have to. What we do is:
|
|
#
|
|
# - Run all used glyph charstrings and recurse into subroutines,
|
|
# - For each charstring (including subroutines), if it has any
|
|
# of the hint stem operators, we mark it as such.
|
|
# Upon returning, for each charstring we note all the
|
|
# subroutine calls it makes that (recursively) contain a stem,
|
|
# - Dropping hinting then consists of the following two ops:
|
|
# * Drop the piece of the program in each charstring before the
|
|
# last call to a stem op or a stem-calling subroutine,
|
|
# * Drop all hintmask operations.
|
|
# - It's trickier... A hintmask right after hints and a few numbers
|
|
# will act as an implicit vstemhm. As such, we track whether
|
|
# we have seen any non-hint operators so far and do the right
|
|
# thing, recursively... Good luck understanding that :(
|
|
css = set()
|
|
for g in font.charset:
|
|
c, _ = cs.getItemAndSelector(g)
|
|
c.decompile()
|
|
subrs = getattr(c.private, "Subrs", [])
|
|
decompiler = _DehintingT2Decompiler(css, subrs, c.globalSubrs,
|
|
c.private.nominalWidthX,
|
|
c.private.defaultWidthX)
|
|
decompiler.execute(c)
|
|
c.width = decompiler.width
|
|
for charstring in css:
|
|
charstring.drop_hints()
|
|
del css
|
|
|
|
# Drop font-wide hinting values
|
|
all_privs = []
|
|
if hasattr(font, 'FDSelect'):
|
|
all_privs.extend(fd.Private for fd in font.FDArray)
|
|
else:
|
|
all_privs.append(font.Private)
|
|
for priv in all_privs:
|
|
for k in ['BlueValues', 'OtherBlues',
|
|
'FamilyBlues', 'FamilyOtherBlues',
|
|
'BlueScale', 'BlueShift', 'BlueFuzz',
|
|
'StemSnapH', 'StemSnapV', 'StdHW', 'StdVW',
|
|
'ForceBold', 'LanguageGroup', 'ExpansionFactor']:
|
|
if hasattr(priv, k):
|
|
setattr(priv, k, None)
|
|
|
|
# Renumber subroutines to remove unused ones
|
|
|
|
# Mark all used subroutines
|
|
for g in font.charset:
|
|
c, _ = cs.getItemAndSelector(g)
|
|
subrs = getattr(c.private, "Subrs", [])
|
|
decompiler = _MarkingT2Decompiler(subrs, c.globalSubrs)
|
|
decompiler.execute(c)
|
|
|
|
all_subrs = [font.GlobalSubrs]
|
|
if hasattr(font, 'FDSelect'):
|
|
all_subrs.extend(fd.Private.Subrs for fd in font.FDArray if hasattr(fd.Private, 'Subrs') and fd.Private.Subrs)
|
|
elif hasattr(font.Private, 'Subrs') and font.Private.Subrs:
|
|
all_subrs.append(font.Private.Subrs)
|
|
|
|
subrs = set(subrs) # Remove duplicates
|
|
|
|
# Prepare
|
|
for subrs in all_subrs:
|
|
if not hasattr(subrs, '_used'):
|
|
subrs._used = set()
|
|
subrs._used = _uniq_sort(subrs._used)
|
|
subrs._old_bias = psCharStrings.calcSubrBias(subrs)
|
|
subrs._new_bias = psCharStrings.calcSubrBias(subrs._used)
|
|
|
|
# Renumber glyph charstrings
|
|
for g in font.charset:
|
|
c, _ = cs.getItemAndSelector(g)
|
|
subrs = getattr(c.private, "Subrs", [])
|
|
c.subset_subroutines (subrs, font.GlobalSubrs)
|
|
|
|
# Renumber subroutines themselves
|
|
for subrs in all_subrs:
|
|
if subrs == font.GlobalSubrs:
|
|
if not hasattr(font, 'FDSelect') and hasattr(font.Private, 'Subrs'):
|
|
local_subrs = font.Private.Subrs
|
|
else:
|
|
local_subrs = []
|
|
else:
|
|
local_subrs = subrs
|
|
|
|
subrs.items = [subrs.items[i] for i in subrs._used]
|
|
if hasattr(subrs, 'file'):
|
|
del subrs.file
|
|
if hasattr(subrs, 'offsets'):
|
|
del subrs.offsets
|
|
|
|
for subr in subrs.items:
|
|
subr.subset_subroutines (local_subrs, font.GlobalSubrs)
|
|
|
|
# Delete local SubrsIndex if empty
|
|
if hasattr(font, 'FDSelect'):
|
|
for fd in font.FDArray:
|
|
_delete_empty_subrs(fd.Private)
|
|
else:
|
|
_delete_empty_subrs(font.Private)
|
|
|
|
# Cleanup
|
|
for subrs in all_subrs:
|
|
del subrs._used, subrs._old_bias, subrs._new_bias
|
|
|
|
return True
|
|
|
|
|
|
def _delete_empty_subrs(private_dict):
|
|
if hasattr(private_dict, 'Subrs') and not private_dict.Subrs:
|
|
if 'Subrs' in private_dict.rawDict:
|
|
del private_dict.rawDict['Subrs']
|
|
del private_dict.Subrs
|
|
|
|
@_add_method(ttLib.getTableClass('CFF2'))
|
|
def desubroutinize(self, font):
|
|
cff = self.cff
|
|
for fontname in cff.keys():
|
|
font = cff[fontname]
|
|
cs = font.CharStrings
|
|
for g in font.charset:
|
|
c, _ = cs.getItemAndSelector(g)
|
|
c.decompile()
|
|
subrs = getattr(c.private, "Subrs", [])
|
|
decompiler = _DesubroutinizingT2Decompiler(subrs, c.globalSubrs, c.private)
|
|
decompiler.execute(c)
|
|
c.program = c._desubroutinized
|
|
# Delete All the Subrs!!!
|
|
if font.GlobalSubrs:
|
|
del font.GlobalSubrs
|
|
if hasattr(font, 'FDArray'):
|
|
for fd in font.FDArray:
|
|
pd = fd.Private
|
|
if hasattr(pd, 'Subrs'):
|
|
del pd.Subrs
|
|
if 'Subrs' in pd.rawDict:
|
|
del pd.rawDict['Subrs']
|
|
|
|
|
|
def interpolate_cff2_PrivateDict(topDict, interpolateFromDeltas):
|
|
pd_blend_lists = ("BlueValues", "OtherBlues", "FamilyBlues",
|
|
"FamilyOtherBlues", "StemSnapH",
|
|
"StemSnapV")
|
|
pd_blend_values = ("BlueScale", "BlueShift",
|
|
"BlueFuzz", "StdHW", "StdVW")
|
|
for fontDict in topDict.FDArray:
|
|
pd = fontDict.Private
|
|
vsindex = pd.vsindex if (hasattr(pd, 'vsindex')) else 0
|
|
for key, value in pd.rawDict.items():
|
|
if (key in pd_blend_values) and isinstance(value, list):
|
|
delta = interpolateFromDeltas(vsindex, value[1:])
|
|
pd.rawDict[key] = otRound(value[0] + delta)
|
|
elif (key in pd_blend_lists) and isinstance(value[0], list):
|
|
"""If any argument in a BlueValues list is a blend list,
|
|
then they all are. The first value of each list is an
|
|
absolute value. The delta tuples are calculated from
|
|
relative master values, hence we need to append all the
|
|
deltas to date to each successive absolute value."""
|
|
delta = 0
|
|
for i, val_list in enumerate(value):
|
|
delta += otRound(interpolateFromDeltas(vsindex,
|
|
val_list[1:]))
|
|
value[i] = val_list[0] + delta
|
|
|
|
|
|
def interpolate_cff2_charstrings(topDict, interpolateFromDeltas, glyphOrder):
|
|
charstrings = topDict.CharStrings
|
|
for gname in glyphOrder:
|
|
# Interpolate charstring
|
|
charstring = charstrings[gname]
|
|
pd = charstring.private
|
|
vsindex = pd.vsindex if (hasattr(pd, 'vsindex')) else 0
|
|
num_regions = pd.getNumRegions(vsindex)
|
|
numMasters = num_regions + 1
|
|
new_program = []
|
|
last_i = 0
|
|
for i, token in enumerate(charstring.program):
|
|
if token == 'blend':
|
|
num_args = charstring.program[i - 1]
|
|
""" The stack is now:
|
|
..args for following operations
|
|
num_args values from the default font
|
|
num_args tuples, each with numMasters-1 delta values
|
|
num_blend_args
|
|
'blend'
|
|
"""
|
|
argi = i - (num_args*numMasters + 1)
|
|
end_args = tuplei = argi + num_args
|
|
while argi < end_args:
|
|
next_ti = tuplei + num_regions
|
|
deltas = charstring.program[tuplei:next_ti]
|
|
delta = interpolateFromDeltas(vsindex, deltas)
|
|
charstring.program[argi] += otRound(delta)
|
|
tuplei = next_ti
|
|
argi += 1
|
|
new_program.extend(charstring.program[last_i:end_args])
|
|
last_i = i + 1
|
|
if last_i != 0:
|
|
new_program.extend(charstring.program[last_i:])
|
|
charstring.program = new_program
|
|
|
|
|
|
def interpolate_cff2_metrics(varfont, topDict, glyphOrder, loc):
|
|
"""Unlike TrueType glyphs, neither advance width nor bounding box
|
|
info is stored in a CFF2 charstring. The width data exists only in
|
|
the hmtx and HVAR tables. Since LSB data cannot be interpolated
|
|
reliably from the master LSB values in the hmtx table, we traverse
|
|
the charstring to determine the actual bound box. """
|
|
|
|
charstrings = topDict.CharStrings
|
|
boundsPen = BoundsPen(glyphOrder)
|
|
hmtx = varfont['hmtx']
|
|
hvar_table = None
|
|
if 'HVAR' in varfont:
|
|
hvar_table = varfont['HVAR'].table
|
|
fvar = varfont['fvar']
|
|
varStoreInstancer = VarStoreInstancer(hvar_table.VarStore, fvar.axes, loc)
|
|
|
|
for gid, gname in enumerate(glyphOrder):
|
|
entry = list(hmtx[gname])
|
|
# get width delta.
|
|
if hvar_table:
|
|
if hvar_table.AdvWidthMap:
|
|
width_idx = hvar_table.AdvWidthMap.mapping[gname]
|
|
else:
|
|
width_idx = gid
|
|
width_delta = otRound(varStoreInstancer[width_idx])
|
|
else:
|
|
width_delta = 0
|
|
|
|
# get LSB.
|
|
boundsPen.init()
|
|
charstring = charstrings[gname]
|
|
charstring.draw(boundsPen)
|
|
if boundsPen.bounds is None:
|
|
# Happens with non-marking glyphs
|
|
lsb_delta = 0
|
|
else:
|
|
lsb = boundsPen.bounds[0]
|
|
lsb_delta = entry[1] - lsb
|
|
|
|
if lsb_delta or width_delta:
|
|
if width_delta:
|
|
entry[0] += width_delta
|
|
if lsb_delta:
|
|
entry[1] = lsb
|
|
hmtx[gname] = tuple(entry)
|