Blacken code

This commit is contained in:
Nikolaus Waxweiler 2022-12-13 11:26:36 +00:00
parent 698d8fb387
commit d584daa8fd
359 changed files with 80867 additions and 67305 deletions

View File

@ -30,14 +30,17 @@ needs_sphinx = "1.3"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.viewcode", "sphinx.ext.napoleon", "sphinx.ext.coverage", "sphinx.ext.autosectionlabel"]
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.viewcode",
"sphinx.ext.napoleon",
"sphinx.ext.coverage",
"sphinx.ext.autosectionlabel",
]
autodoc_mock_imports = ["gtk", "reportlab"]
autodoc_default_options = {
'members': True,
'inherited-members': True
}
autodoc_default_options = {"members": True, "inherited-members": True}
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
@ -52,9 +55,11 @@ source_suffix = ".rst"
master_doc = "index"
# General information about the project.
project = u"fontTools"
copyright = u"2020, Just van Rossum, Behdad Esfahbod, and the fontTools Authors. CC BY-SA 4.0"
author = u"Just van Rossum, Behdad Esfahbod, and the fontTools Authors"
project = "fontTools"
copyright = (
"2020, Just van Rossum, Behdad Esfahbod, and the fontTools Authors. CC BY-SA 4.0"
)
author = "Just van Rossum, Behdad Esfahbod, and the fontTools Authors"
# HTML page title
html_title = "fontTools Documentation"
@ -64,9 +69,9 @@ html_title = "fontTools Documentation"
# built documents.
#
# The short X.Y version.
version = u"4.0"
version = "4.0"
# The full version, including alpha/beta/rc tags.
release = u"4.0"
release = "4.0"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
@ -142,8 +147,8 @@ latex_documents = [
(
master_doc,
"fontTools.tex",
u"fontTools Documentation",
u"Just van Rossum, Behdad Esfahbod et al.",
"fontTools Documentation",
"Just van Rossum, Behdad Esfahbod et al.",
"manual",
)
]
@ -153,7 +158,7 @@ latex_documents = [
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "fonttools", u"fontTools Documentation", [author], 1)]
man_pages = [(master_doc, "fonttools", "fontTools Documentation", [author], 1)]
# -- Options for Texinfo output -------------------------------------------
@ -165,7 +170,7 @@ texinfo_documents = [
(
master_doc,
"fontTools",
u"fontTools Documentation",
"fontTools Documentation",
author,
"fontTools",
"A library for manipulating fonts, written in Python.",

View File

@ -22,13 +22,14 @@ def main(args=None):
sys.argv.append("help")
if sys.argv[1] == "-h" or sys.argv[1] == "--help":
sys.argv[1] = "help"
mod = 'fontTools.'+sys.argv[1]
sys.argv[1] = sys.argv[0] + ' ' + sys.argv[1]
mod = "fontTools." + sys.argv[1]
sys.argv[1] = sys.argv[0] + " " + sys.argv[1]
del sys.argv[0]
import runpy
runpy.run_module(mod, run_name='__main__')
runpy.run_module(mod, run_name="__main__")
if __name__ == '__main__':
if __name__ == "__main__":
sys.exit(main())

View File

@ -82,10 +82,7 @@ kernRE = re.compile(
# regular expressions to parse composite info lines of the form:
# Aacute 2 ; PCC A 0 0 ; PCC acute 182 211 ;
compositeRE = re.compile(
r"([.A-Za-z0-9_]+)" # char name
r"\s+"
r"(\d+)" # number of parts
r"\s*;\s*"
r"([.A-Za-z0-9_]+)" r"\s+" r"(\d+)" r"\s*;\s*" # char name # number of parts
)
componentRE = re.compile(
r"PCC\s+" # PPC
@ -125,16 +122,17 @@ class AFM(object):
_attrs = None
_keywords = ['StartFontMetrics',
'EndFontMetrics',
'StartCharMetrics',
'EndCharMetrics',
'StartKernData',
'StartKernPairs',
'EndKernPairs',
'EndKernData',
'StartComposites',
'EndComposites',
_keywords = [
"StartFontMetrics",
"EndFontMetrics",
"StartCharMetrics",
"EndCharMetrics",
"StartKernData",
"StartKernPairs",
"EndKernPairs",
"EndKernData",
"StartComposites",
"EndComposites",
]
def __init__(self, path=None):
@ -235,13 +233,15 @@ class AFM(object):
assert len(components) == ncomponents
self._composites[charname] = components
def write(self, path, sep='\r'):
def write(self, path, sep="\r"):
"""Writes out an AFM font to the given path."""
import time
lines = [ "StartFontMetrics 2.0",
"Comment Generated by afmLib; at %s" % (
time.strftime("%m/%d/%Y %H:%M:%S",
time.localtime(time.time())))]
lines = [
"StartFontMetrics 2.0",
"Comment Generated by afmLib; at %s"
% (time.strftime("%m/%d/%Y %H:%M:%S", time.localtime(time.time()))),
]
# write comments, assuming (possibly wrongly!) they should
# all appear at the top
@ -267,19 +267,25 @@ class AFM(object):
# write char metrics
lines.append("StartCharMetrics " + repr(len(self._chars)))
items = [(charnum, (charname, width, box)) for charname, (charnum, width, box) in self._chars.items()]
items = [
(charnum, (charname, width, box))
for charname, (charnum, width, box) in self._chars.items()
]
def myKey(a):
"""Custom key function to make sure unencoded chars (-1)
end up at the end of the list after sorting."""
if a[0] == -1:
a = (0xffff,) + a[1:] # 0xffff is an arbitrary large number
a = (0xFFFF,) + a[1:] # 0xffff is an arbitrary large number
return a
items.sort(key=myKey)
for charnum, (charname, width, (l, b, r, t)) in items:
lines.append("C %d ; WX %d ; N %s ; B %d %d %d %d ;" %
(charnum, width, charname, l, b, r, t))
lines.append(
"C %d ; WX %d ; N %s ; B %d %d %d %d ;"
% (charnum, width, charname, l, b, r, t)
)
lines.append("EndCharMetrics")
# write kerning info
@ -394,9 +400,9 @@ class AFM(object):
def __repr__(self):
if hasattr(self, "FullName"):
return '<AFM object for %s>' % self.FullName
return "<AFM object for %s>" % self.FullName
else:
return '<AFM object at %x>' % id(self)
return "<AFM object at %x>" % id(self)
def readlines(path):
@ -404,20 +410,22 @@ def readlines(path):
data = f.read()
return data.splitlines()
def writelines(path, lines, sep='\r'):
def writelines(path, lines, sep="\r"):
with open(path, "w", encoding="ascii", newline=sep) as f:
f.write("\n".join(lines) + "\n")
if __name__ == "__main__":
import EasyDialogs
path = EasyDialogs.AskFileForOpen()
if path:
afm = AFM(path)
char = 'A'
char = "A"
if afm.has_char(char):
print(afm[char]) # print charnum, width and boundingbox
pair = ('A', 'V')
pair = ("A", "V")
if afm.has_kernpair(pair):
print(afm[pair]) # print kerning value for pair
print(afm.Version) # various other afm entries have become attributes

View File

@ -5061,10 +5061,12 @@ _aglfnText = """\
class AGLError(Exception):
pass
LEGACY_AGL2UV = {}
AGL2UV = {}
UV2AGL = {}
def _builddicts():
import re
@ -5073,7 +5075,7 @@ def _builddicts():
parseAGL_RE = re.compile("([A-Za-z0-9]+);((?:[0-9A-F]{4})(?: (?:[0-9A-F]{4}))*)$")
for line in lines:
if not line or line[:1] == '#':
if not line or line[:1] == "#":
continue
m = parseAGL_RE.match(line)
if not m:
@ -5089,7 +5091,7 @@ def _builddicts():
parseAGLFN_RE = re.compile("([0-9A-F]{4});([A-Za-z0-9]+);.*?$")
for line in lines:
if not line or line[:1] == '#':
if not line or line[:1] == "#":
continue
m = parseAGLFN_RE.match(line)
if not m:
@ -5101,6 +5103,7 @@ def _builddicts():
AGL2UV[glyphName] = unicode
UV2AGL[unicode] = glyphName
_builddicts()
@ -5123,8 +5126,7 @@ def toUnicode(glyph, isZapfDingbats=False):
# 3. Map each component to a character string according to the
# procedure below, and concatenate those strings; the result
# is the character string to which the glyph name is mapped.
result = [_glyphComponentToUnicode(c, isZapfDingbats)
for c in components]
result = [_glyphComponentToUnicode(c, isZapfDingbats) for c in components]
return "".join(result)
@ -5169,7 +5171,7 @@ def _glyphComponentToUnicode(component, isZapfDingbats):
return uni
# Otherwise, map the component to an empty string.
return ''
return ""
# https://github.com/adobe-type-tools/agl-aglfn/blob/master/zapfdingbats.txt
@ -5177,12 +5179,13 @@ _AGL_ZAPF_DINGBATS = (
" ✁✂✄☎✆✝✞✟✠✡☛☞✌✍✎✏✑✒✓✔✕✖✗✘✙✚✛✜✢✣✤✥✦✧★✩✪✫✬✭✮✯✰✱✲✳✴✵✶✷✸✹✺✻✼✽✾✿❀"
"❁❂❃❄❅❆❇❈❉❊❋●❍■❏❑▲▼◆❖ ◗❘❙❚❯❱❲❳❨❩❬❭❪❫❴❵❛❜❝❞❡❢❣❤✐❥❦❧♠♥♦♣ ✉✈✇"
"①②③④⑤⑥⑦⑧⑨⑩❶❷❸❹❺❻❼❽❾❿➀➁➂➃➄➅➆➇➈➉➊➋➌➍➎➏➐➑➒➓➔→➣↔"
"↕➙➛➜➝➞➟➠➡➢➤➥➦➧➨➩➫➭➯➲➳➵➸➺➻➼➽➾➚➪➶➹➘➴➷➬➮➱✃❐❒❮❰")
"↕➙➛➜➝➞➟➠➡➢➤➥➦➧➨➩➫➭➯➲➳➵➸➺➻➼➽➾➚➪➶➹➘➴➷➬➮➱✃❐❒❮❰"
)
def _zapfDingbatsToUnicode(glyph):
"""Helper for toUnicode()."""
if len(glyph) < 2 or glyph[0] != 'a':
if len(glyph) < 2 or glyph[0] != "a":
return None
try:
gid = int(glyph[1:])
@ -5191,7 +5194,7 @@ def _zapfDingbatsToUnicode(glyph):
if gid < 0 or gid >= len(_AGL_ZAPF_DINGBATS):
return None
uchar = _AGL_ZAPF_DINGBATS[gid]
return uchar if uchar != ' ' else None
return uchar if uchar != " " else None
_re_uni = re.compile("^uni([0-9A-F]+)$")
@ -5205,12 +5208,11 @@ def _uniToUnicode(component):
digits = match.group(1)
if len(digits) % 4 != 0:
return None
chars = [int(digits[i : i + 4], 16)
for i in range(0, len(digits), 4)]
chars = [int(digits[i : i + 4], 16) for i in range(0, len(digits), 4)]
if any(c >= 0xD800 and c <= 0xDFFF for c in chars):
# The AGL specification explicitly excluded surrogate pairs.
return None
return ''.join([chr(c) for c in chars])
return "".join([chr(c) for c in chars])
_re_u = re.compile("^u([0-9A-F]{4,6})$")
@ -5226,7 +5228,6 @@ def _uToUnicode(component):
value = int(digits, 16)
except ValueError:
return None
if ((value >= 0x0000 and value <= 0xD7FF) or
(value >= 0xE000 and value <= 0x10FFFF)):
if (value >= 0x0000 and value <= 0xD7FF) or (value >= 0xE000 and value <= 0x10FFFF):
return chr(value)
return None

File diff suppressed because it is too large Load Diff

View File

@ -33,7 +33,7 @@ def stringToProgram(string):
def programToString(program):
return ' '.join(str(x) for x in program)
return " ".join(str(x) for x in program)
def programToCommands(program, getNumRegions=None):
@ -73,7 +73,7 @@ def programToCommands(program, getNumRegions=None):
stack.append(token)
continue
if token == 'blend':
if token == "blend":
assert getNumRegions is not None
numSourceFonts = 1 + getNumRegions(vsIndex)
# replace the blend op args on the stack with a single list
@ -87,16 +87,24 @@ def programToCommands(program, getNumRegions=None):
# if a blend op exists, this is or will be a CFF2 charstring.
continue
elif token == 'vsindex':
elif token == "vsindex":
vsIndex = stack[-1]
assert type(vsIndex) is int
elif (not seenWidthOp) and token in {'hstem', 'hstemhm', 'vstem', 'vstemhm',
'cntrmask', 'hintmask',
'hmoveto', 'vmoveto', 'rmoveto',
'endchar'}:
elif (not seenWidthOp) and token in {
"hstem",
"hstemhm",
"vstem",
"vstemhm",
"cntrmask",
"hintmask",
"hmoveto",
"vmoveto",
"rmoveto",
"endchar",
}:
seenWidthOp = True
parity = token in {'hmoveto', 'vmoveto'}
parity = token in {"hmoveto", "vmoveto"}
if lenBlendStack:
# lenBlendStack has the number of args represented by the last blend
# arg and all the preceding args. We need to now add the number of
@ -106,18 +114,18 @@ def programToCommands(program, getNumRegions=None):
numArgs = len(stack)
if numArgs and (numArgs % 2) ^ parity:
width = stack.pop(0)
commands.append(('', [width]))
commands.append(("", [width]))
if token in {'hintmask', 'cntrmask'}:
if token in {"hintmask", "cntrmask"}:
if stack:
commands.append(('', stack))
commands.append(("", stack))
commands.append((token, []))
commands.append(('', [next(it)]))
commands.append(("", [next(it)]))
else:
commands.append((token, stack))
stack = []
if stack:
commands.append(('', stack))
commands.append(("", stack))
return commands
@ -126,11 +134,12 @@ def _flattenBlendArgs(args):
for arg in args:
if isinstance(arg, list):
token_list.extend(arg)
token_list.append('blend')
token_list.append("blend")
else:
token_list.append(arg)
return token_list
def commandsToProgram(commands):
"""Takes a commands list as returned by programToCommands() and converts
it back to a T2CharString program list."""
@ -146,75 +155,93 @@ def commandsToProgram(commands):
def _everyN(el, n):
"""Group the list el into groups of size n"""
if len(el) % n != 0: raise ValueError(el)
if len(el) % n != 0:
raise ValueError(el)
for i in range(0, len(el), n):
yield el[i : i + n]
class _GeneralizerDecombinerCommandsMap(object):
@staticmethod
def rmoveto(args):
if len(args) != 2: raise ValueError(args)
yield ('rmoveto', args)
if len(args) != 2:
raise ValueError(args)
yield ("rmoveto", args)
@staticmethod
def hmoveto(args):
if len(args) != 1: raise ValueError(args)
yield ('rmoveto', [args[0], 0])
if len(args) != 1:
raise ValueError(args)
yield ("rmoveto", [args[0], 0])
@staticmethod
def vmoveto(args):
if len(args) != 1: raise ValueError(args)
yield ('rmoveto', [0, args[0]])
if len(args) != 1:
raise ValueError(args)
yield ("rmoveto", [0, args[0]])
@staticmethod
def rlineto(args):
if not args: raise ValueError(args)
if not args:
raise ValueError(args)
for args in _everyN(args, 2):
yield ('rlineto', args)
yield ("rlineto", args)
@staticmethod
def hlineto(args):
if not args: raise ValueError(args)
if not args:
raise ValueError(args)
it = iter(args)
try:
while True:
yield ('rlineto', [next(it), 0])
yield ('rlineto', [0, next(it)])
yield ("rlineto", [next(it), 0])
yield ("rlineto", [0, next(it)])
except StopIteration:
pass
@staticmethod
def vlineto(args):
if not args: raise ValueError(args)
if not args:
raise ValueError(args)
it = iter(args)
try:
while True:
yield ('rlineto', [0, next(it)])
yield ('rlineto', [next(it), 0])
yield ("rlineto", [0, next(it)])
yield ("rlineto", [next(it), 0])
except StopIteration:
pass
@staticmethod
def rrcurveto(args):
if not args: raise ValueError(args)
if not args:
raise ValueError(args)
for args in _everyN(args, 6):
yield ('rrcurveto', args)
yield ("rrcurveto", args)
@staticmethod
def hhcurveto(args):
if len(args) < 4 or len(args) % 4 > 1: raise ValueError(args)
if len(args) < 4 or len(args) % 4 > 1:
raise ValueError(args)
if len(args) % 2 == 1:
yield ('rrcurveto', [args[1], args[0], args[2], args[3], args[4], 0])
yield ("rrcurveto", [args[1], args[0], args[2], args[3], args[4], 0])
args = args[5:]
for args in _everyN(args, 4):
yield ('rrcurveto', [args[0], 0, args[1], args[2], args[3], 0])
yield ("rrcurveto", [args[0], 0, args[1], args[2], args[3], 0])
@staticmethod
def vvcurveto(args):
if len(args) < 4 or len(args) % 4 > 1: raise ValueError(args)
if len(args) < 4 or len(args) % 4 > 1:
raise ValueError(args)
if len(args) % 2 == 1:
yield ('rrcurveto', [args[0], args[1], args[2], args[3], 0, args[4]])
yield ("rrcurveto", [args[0], args[1], args[2], args[3], 0, args[4]])
args = args[5:]
for args in _everyN(args, 4):
yield ('rrcurveto', [0, args[0], args[1], args[2], 0, args[3]])
yield ("rrcurveto", [0, args[0], args[1], args[2], 0, args[3]])
@staticmethod
def hvcurveto(args):
if len(args) < 4 or len(args) % 8 not in {0,1,4,5}: raise ValueError(args)
if len(args) < 4 or len(args) % 8 not in {0, 1, 4, 5}:
raise ValueError(args)
last_args = None
if len(args) % 2 == 1:
lastStraight = len(args) % 8 == 5
@ -223,20 +250,22 @@ class _GeneralizerDecombinerCommandsMap(object):
try:
while True:
args = next(it)
yield ('rrcurveto', [args[0], 0, args[1], args[2], 0, args[3]])
yield ("rrcurveto", [args[0], 0, args[1], args[2], 0, args[3]])
args = next(it)
yield ('rrcurveto', [0, args[0], args[1], args[2], args[3], 0])
yield ("rrcurveto", [0, args[0], args[1], args[2], args[3], 0])
except StopIteration:
pass
if last_args:
args = last_args
if lastStraight:
yield ('rrcurveto', [args[0], 0, args[1], args[2], args[4], args[3]])
yield ("rrcurveto", [args[0], 0, args[1], args[2], args[4], args[3]])
else:
yield ('rrcurveto', [0, args[0], args[1], args[2], args[3], args[4]])
yield ("rrcurveto", [0, args[0], args[1], args[2], args[3], args[4]])
@staticmethod
def vhcurveto(args):
if len(args) < 4 or len(args) % 8 not in {0,1,4,5}: raise ValueError(args)
if len(args) < 4 or len(args) % 8 not in {0, 1, 4, 5}:
raise ValueError(args)
last_args = None
if len(args) % 2 == 1:
lastStraight = len(args) % 8 == 5
@ -245,32 +274,36 @@ class _GeneralizerDecombinerCommandsMap(object):
try:
while True:
args = next(it)
yield ('rrcurveto', [0, args[0], args[1], args[2], args[3], 0])
yield ("rrcurveto", [0, args[0], args[1], args[2], args[3], 0])
args = next(it)
yield ('rrcurveto', [args[0], 0, args[1], args[2], 0, args[3]])
yield ("rrcurveto", [args[0], 0, args[1], args[2], 0, args[3]])
except StopIteration:
pass
if last_args:
args = last_args
if lastStraight:
yield ('rrcurveto', [0, args[0], args[1], args[2], args[3], args[4]])
yield ("rrcurveto", [0, args[0], args[1], args[2], args[3], args[4]])
else:
yield ('rrcurveto', [args[0], 0, args[1], args[2], args[4], args[3]])
yield ("rrcurveto", [args[0], 0, args[1], args[2], args[4], args[3]])
@staticmethod
def rcurveline(args):
if len(args) < 8 or len(args) % 6 != 2: raise ValueError(args)
if len(args) < 8 or len(args) % 6 != 2:
raise ValueError(args)
args, last_args = args[:-2], args[-2:]
for args in _everyN(args, 6):
yield ('rrcurveto', args)
yield ('rlineto', last_args)
yield ("rrcurveto", args)
yield ("rlineto", last_args)
@staticmethod
def rlinecurve(args):
if len(args) < 8 or len(args) % 2 != 0: raise ValueError(args)
if len(args) < 8 or len(args) % 2 != 0:
raise ValueError(args)
args, last_args = args[:-6], args[-6:]
for args in _everyN(args, 2):
yield ('rlineto', args)
yield ('rrcurveto', last_args)
yield ("rlineto", args)
yield ("rrcurveto", last_args)
def _convertBlendOpToArgs(blendList):
# args is list of blend op args. Since we are supporting
@ -278,8 +311,11 @@ def _convertBlendOpToArgs(blendList):
# be a list of blend op args, and need to be converted before
# we convert the current list.
if any([isinstance(arg, list) for arg in blendList]):
args = [i for e in blendList for i in
(_convertBlendOpToArgs(e) if isinstance(e,list) else [e]) ]
args = [
i
for e in blendList
for i in (_convertBlendOpToArgs(e) if isinstance(e, list) else [e])
]
else:
args = blendList
@ -303,10 +339,13 @@ def _convertBlendOpToArgs(blendList):
defaultArgs = [[arg] for arg in args[:numBlends]]
deltaArgs = args[numBlends:]
numDeltaValues = len(deltaArgs)
deltaList = [ deltaArgs[i:i + numRegions] for i in range(0, numDeltaValues, numRegions) ]
deltaList = [
deltaArgs[i : i + numRegions] for i in range(0, numDeltaValues, numRegions)
]
blend_args = [a + b + [1] for a, b in zip(defaultArgs, deltaList)]
return blend_args
def generalizeCommands(commands, ignoreErrors=False):
result = []
mapping = _GeneralizerDecombinerCommandsMap
@ -314,13 +353,19 @@ def generalizeCommands(commands, ignoreErrors=False):
# First, generalize any blend args in the arg list.
if any([isinstance(arg, list) for arg in args]):
try:
args = [n for arg in args for n in (_convertBlendOpToArgs(arg) if isinstance(arg, list) else [arg])]
args = [
n
for arg in args
for n in (
_convertBlendOpToArgs(arg) if isinstance(arg, list) else [arg]
)
]
except ValueError:
if ignoreErrors:
# Store op as data, such that consumers of commands do not have to
# deal with incorrect number of arguments.
result.append(('', args))
result.append(('', [op]))
result.append(("", args))
result.append(("", [op]))
else:
raise
@ -335,14 +380,17 @@ def generalizeCommands(commands, ignoreErrors=False):
if ignoreErrors:
# Store op as data, such that consumers of commands do not have to
# deal with incorrect number of arguments.
result.append(('', args))
result.append(('', [op]))
result.append(("", args))
result.append(("", [op]))
else:
raise
return result
def generalizeProgram(program, getNumRegions=None, **kwargs):
return commandsToProgram(generalizeCommands(programToCommands(program, getNumRegions), **kwargs))
return commandsToProgram(
generalizeCommands(programToCommands(program, getNumRegions), **kwargs)
)
def _categorizeVector(v):
@ -362,27 +410,35 @@ def _categorizeVector(v):
"""
if not v[0]:
if not v[1]:
return '0', v[:1]
return "0", v[:1]
else:
return 'v', v[1:]
return "v", v[1:]
else:
if not v[1]:
return 'h', v[:1]
return "h", v[:1]
else:
return 'r', v
return "r", v
def _mergeCategories(a, b):
if a == '0': return b
if b == '0': return a
if a == b: return a
if a == "0":
return b
if b == "0":
return a
if a == b:
return a
return None
def _negateCategory(a):
if a == 'h': return 'v'
if a == 'v': return 'h'
assert a in '0r'
if a == "h":
return "v"
if a == "v":
return "h"
assert a in "0r"
return a
def _convertToBlendCmds(args):
# return a list of blend commands, and
# the remaining non-blended args, if any.
@ -435,6 +491,7 @@ def _convertToBlendCmds(args):
return new_args
def _addArgs(a, b):
if isinstance(b, list):
if isinstance(a, list):
@ -449,11 +506,13 @@ def _addArgs(a, b):
return a + b
def specializeCommands(commands,
def specializeCommands(
commands,
ignoreErrors=False,
generalizeFirst=True,
preserveTopology=False,
maxstack=48):
maxstack=48,
):
# We perform several rounds of optimizations. They are carefully ordered and are:
#
@ -487,7 +546,6 @@ def specializeCommands(commands,
#
# 7. For any args which are blend lists, convert them to a blend command.
# 0. Generalize commands.
if generalizeFirst:
commands = generalizeCommands(commands, ignoreErrors=ignoreErrors)
@ -496,9 +554,9 @@ def specializeCommands(commands,
# 1. Combine successive rmoveto operations.
for i in range(len(commands) - 1, 0, -1):
if 'rmoveto' == commands[i][0] == commands[i-1][0]:
if "rmoveto" == commands[i][0] == commands[i - 1][0]:
v1, v2 = commands[i - 1][1], commands[i][1]
commands[i-1] = ('rmoveto', [v1[0]+v2[0], v1[1]+v2[1]])
commands[i - 1] = ("rmoveto", [v1[0] + v2[0], v1[1] + v2[1]])
del commands[i]
# 2. Specialize rmoveto/rlineto/rrcurveto operators into horizontal/vertical variants.
@ -550,15 +608,15 @@ def specializeCommands(commands,
for i in range(len(commands)):
op, args = commands[i]
if op in {'rmoveto', 'rlineto'}:
if op in {"rmoveto", "rlineto"}:
c, args = _categorizeVector(args)
commands[i] = c + op[1:], args
continue
if op == 'rrcurveto':
if op == "rrcurveto":
c1, args1 = _categorizeVector(args[:2])
c2, args2 = _categorizeVector(args[-2:])
commands[i] = c1+c2+'curveto', args1+args[2:4]+args2
commands[i] = c1 + c2 + "curveto", args1 + args[2:4] + args2
continue
# 3. Merge or delete redundant operations, to the extent requested.
@ -581,22 +639,21 @@ def specializeCommands(commands,
# For Type2 CharStrings the sequence is:
# w? {hs* vs* cm* hm* mt subpath}? {mt subpath}* endchar"
# Some other redundancies change topology (point numbers).
if not preserveTopology:
for i in range(len(commands) - 1, -1, -1):
op, args = commands[i]
# A 00curveto is demoted to a (specialized) lineto.
if op == '00curveto':
if op == "00curveto":
assert len(args) == 4
c, args = _categorizeVector(args[1:3])
op = c+'lineto'
op = c + "lineto"
commands[i] = op, args
# and then...
# A 0lineto can be deleted.
if op == '0lineto':
if op == "0lineto":
del commands[i]
continue
@ -604,8 +661,7 @@ def specializeCommands(commands,
# In CFF2 charstrings from variable fonts, each
# arg item may be a list of blendable values, one from
# each source font.
if (i and op in {'hlineto', 'vlineto'} and
(op == commands[i-1][0])):
if i and op in {"hlineto", "vlineto"} and (op == commands[i - 1][0]):
_, other_args = commands[i - 1]
assert len(args) == 1 and len(other_args) == 1
try:
@ -622,25 +678,25 @@ def specializeCommands(commands,
op, args = commands[i]
prv, nxt = commands[i - 1][0], commands[i + 1][0]
if op in {'0lineto', 'hlineto', 'vlineto'} and prv == nxt == 'rlineto':
if op in {"0lineto", "hlineto", "vlineto"} and prv == nxt == "rlineto":
assert len(args) == 1
args = [0, args[0]] if op[0] == 'v' else [args[0], 0]
commands[i] = ('rlineto', args)
args = [0, args[0]] if op[0] == "v" else [args[0], 0]
commands[i] = ("rlineto", args)
continue
if op[2:] == 'curveto' and len(args) == 5 and prv == nxt == 'rrcurveto':
assert (op[0] == 'r') ^ (op[1] == 'r')
if op[0] == 'v':
if op[2:] == "curveto" and len(args) == 5 and prv == nxt == "rrcurveto":
assert (op[0] == "r") ^ (op[1] == "r")
if op[0] == "v":
pos = 0
elif op[0] != 'r':
elif op[0] != "r":
pos = 1
elif op[1] == 'v':
elif op[1] == "v":
pos = 4
else:
pos = 5
# Insert, while maintaining the type of args (can be tuple or list).
args = args[:pos] + type(args)((0,)) + args[pos:]
commands[i] = ('rrcurveto', args)
commands[i] = ("rrcurveto", args)
continue
# 5. Combine adjacent operators when possible, minding not to go over max stack size.
@ -650,42 +706,46 @@ def specializeCommands(commands,
new_op = None
# Merge logic...
if {op1, op2} <= {'rlineto', 'rrcurveto'}:
if {op1, op2} <= {"rlineto", "rrcurveto"}:
if op1 == op2:
new_op = op1
else:
if op2 == 'rrcurveto' and len(args2) == 6:
new_op = 'rlinecurve'
if op2 == "rrcurveto" and len(args2) == 6:
new_op = "rlinecurve"
elif len(args2) == 2:
new_op = 'rcurveline'
new_op = "rcurveline"
elif (op1, op2) in {('rlineto', 'rlinecurve'), ('rrcurveto', 'rcurveline')}:
elif (op1, op2) in {("rlineto", "rlinecurve"), ("rrcurveto", "rcurveline")}:
new_op = op2
elif {op1, op2} == {'vlineto', 'hlineto'}:
elif {op1, op2} == {"vlineto", "hlineto"}:
new_op = op1
elif 'curveto' == op1[2:] == op2[2:]:
elif "curveto" == op1[2:] == op2[2:]:
d0, d1 = op1[:2]
d2, d3 = op2[:2]
if d1 == 'r' or d2 == 'r' or d0 == d3 == 'r':
if d1 == "r" or d2 == "r" or d0 == d3 == "r":
continue
d = _mergeCategories(d1, d2)
if d is None: continue
if d0 == 'r':
if d is None:
continue
if d0 == "r":
d = _mergeCategories(d, d3)
if d is None: continue
new_op = 'r'+d+'curveto'
elif d3 == 'r':
if d is None:
continue
new_op = "r" + d + "curveto"
elif d3 == "r":
d0 = _mergeCategories(d0, _negateCategory(d))
if d0 is None: continue
new_op = d0+'r'+'curveto'
if d0 is None:
continue
new_op = d0 + "r" + "curveto"
else:
d0 = _mergeCategories(d0, d3)
if d0 is None: continue
new_op = d0+d+'curveto'
if d0 is None:
continue
new_op = d0 + d + "curveto"
# Make sure the stack depth does not exceed (maxstack - 1), so
# that subroutinizer can insert subroutine calls at any point.
@ -697,31 +757,35 @@ def specializeCommands(commands,
for i in range(len(commands)):
op, args = commands[i]
if op in {'0moveto', '0lineto'}:
commands[i] = 'h'+op[1:], args
if op in {"0moveto", "0lineto"}:
commands[i] = "h" + op[1:], args
continue
if op[2:] == 'curveto' and op[:2] not in {'rr', 'hh', 'vv', 'vh', 'hv'}:
if op[2:] == "curveto" and op[:2] not in {"rr", "hh", "vv", "vh", "hv"}:
op0, op1 = op[:2]
if (op0 == 'r') ^ (op1 == 'r'):
if (op0 == "r") ^ (op1 == "r"):
assert len(args) % 2 == 1
if op0 == '0': op0 = 'h'
if op1 == '0': op1 = 'h'
if op0 == 'r': op0 = op1
if op1 == 'r': op1 = _negateCategory(op0)
assert {op0,op1} <= {'h','v'}, (op0, op1)
if op0 == "0":
op0 = "h"
if op1 == "0":
op1 = "h"
if op0 == "r":
op0 = op1
if op1 == "r":
op1 = _negateCategory(op0)
assert {op0, op1} <= {"h", "v"}, (op0, op1)
if len(args) % 2:
if op0 != op1: # vhcurveto / hvcurveto
if (op0 == 'h') ^ (len(args) % 8 == 1):
if (op0 == "h") ^ (len(args) % 8 == 1):
# Swap last two args order
args = args[:-2] + args[-1:] + args[-2:-1]
else: # hhcurveto / vvcurveto
if op0 == 'h': # hhcurveto
if op0 == "h": # hhcurveto
# Swap first two args order
args = args[1:2] + args[:1] + args[2:]
commands[i] = op0+op1+'curveto', args
commands[i] = op0 + op1 + "curveto", args
continue
# 7. For any series of args which are blend lists, convert the series to a single blend arg.
@ -732,36 +796,55 @@ def specializeCommands(commands,
return commands
def specializeProgram(program, getNumRegions=None, **kwargs):
return commandsToProgram(specializeCommands(programToCommands(program, getNumRegions), **kwargs))
return commandsToProgram(
specializeCommands(programToCommands(program, getNumRegions), **kwargs)
)
if __name__ == '__main__':
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
import doctest
sys.exit(doctest.testmod().failed)
import argparse
parser = argparse.ArgumentParser(
"fonttools cffLib.specialer", description="CFF CharString generalizer/specializer")
"fonttools cffLib.specialer",
description="CFF CharString generalizer/specializer",
)
parser.add_argument("program", metavar="command", nargs="*", help="Commands.")
parser.add_argument(
"program", metavar="command", nargs="*", help="Commands.")
parser.add_argument(
"--num-regions", metavar="NumRegions", nargs="*", default=None,
help="Number of variable-font regions for blend opertaions.")
"--num-regions",
metavar="NumRegions",
nargs="*",
default=None,
help="Number of variable-font regions for blend opertaions.",
)
options = parser.parse_args(sys.argv[1:])
getNumRegions = None if options.num_regions is None else lambda vsIndex: int(options.num_regions[0 if vsIndex is None else vsIndex])
getNumRegions = (
None
if options.num_regions is None
else lambda vsIndex: int(options.num_regions[0 if vsIndex is None else vsIndex])
)
program = stringToProgram(options.program)
print("Program:"); print(programToString(program))
print("Program:")
print(programToString(program))
commands = programToCommands(program, getNumRegions)
print("Commands:"); print(commands)
print("Commands:")
print(commands)
program2 = commandsToProgram(commands)
print("Program from commands:"); print(programToString(program2))
print("Program from commands:")
print(programToString(program2))
assert program == program2
print("Generalized program:"); print(programToString(generalizeProgram(program, getNumRegions)))
print("Specialized program:"); print(programToString(specializeProgram(program, getNumRegions)))
print("Generalized program:")
print(programToString(generalizeProgram(program, getNumRegions)))
print("Specialized program:")
print(programToString(specializeProgram(program, getNumRegions)))

View File

@ -16,9 +16,11 @@ from functools import reduce
class missingdict(dict):
def __init__(self, missing_func):
self.missing_func = missing_func
def __missing__(self, v):
return self.missing_func(v)
def cumSum(f, op=add, start=0, decreasing=False):
keys = sorted(f.keys())
@ -42,9 +44,10 @@ def cumSum(f, op=add, start=0, decreasing=False):
return out
def byteCost(widths, default, nominal):
if not hasattr(widths, 'items'):
if not hasattr(widths, "items"):
d = defaultdict(int)
for w in widths:
d[w] += 1
@ -52,7 +55,8 @@ def byteCost(widths, default, nominal):
cost = 0
for w, freq in widths.items():
if w == default: continue
if w == default:
continue
diff = abs(w - nominal)
if diff <= 107:
cost += freq
@ -98,7 +102,7 @@ def optimizeWidths(widths):
This algorithm is linear in UPEM+numGlyphs."""
if not hasattr(widths, 'items'):
if not hasattr(widths, "items"):
d = defaultdict(int)
for w in widths:
d[w] += 1
@ -115,13 +119,21 @@ def optimizeWidths(widths):
cumMaxD = cumSum(widths, op=max, decreasing=True)
# Cost per nominal choice, without default consideration.
nomnCostU = missingdict(lambda x: cumFrqU[x] + cumFrqU[x-108] + cumFrqU[x-1132]*3)
nomnCostD = missingdict(lambda x: cumFrqD[x] + cumFrqD[x+108] + cumFrqD[x+1132]*3)
nomnCostU = missingdict(
lambda x: cumFrqU[x] + cumFrqU[x - 108] + cumFrqU[x - 1132] * 3
)
nomnCostD = missingdict(
lambda x: cumFrqD[x] + cumFrqD[x + 108] + cumFrqD[x + 1132] * 3
)
nomnCost = missingdict(lambda x: nomnCostU[x] + nomnCostD[x] - widths[x])
# Cost-saving per nominal choice, by best default choice.
dfltCostU = missingdict(lambda x: max(cumMaxU[x], cumMaxU[x-108]*2, cumMaxU[x-1132]*5))
dfltCostD = missingdict(lambda x: max(cumMaxD[x], cumMaxD[x+108]*2, cumMaxD[x+1132]*5))
dfltCostU = missingdict(
lambda x: max(cumMaxU[x], cumMaxU[x - 108] * 2, cumMaxU[x - 1132] * 5)
)
dfltCostD = missingdict(
lambda x: max(cumMaxD[x], cumMaxD[x + 108] * 2, cumMaxD[x + 1132] * 5)
)
dfltCost = missingdict(lambda x: max(dfltCostU[x], dfltCostD[x]))
# Combined cost per nominal choice.
@ -150,34 +162,48 @@ def optimizeWidths(widths):
return default, nominal
def main(args=None):
"""Calculate optimum defaultWidthX/nominalWidthX values"""
import argparse
parser = argparse.ArgumentParser(
"fonttools cffLib.width",
description=main.__doc__,
)
parser.add_argument('inputs', metavar='FILE', type=str, nargs='+',
help="Input TTF files")
parser.add_argument('-b', '--brute-force', dest="brute", action="store_true",
help="Use brute-force approach (VERY slow)")
parser.add_argument(
"inputs", metavar="FILE", type=str, nargs="+", help="Input TTF files"
)
parser.add_argument(
"-b",
"--brute-force",
dest="brute",
action="store_true",
help="Use brute-force approach (VERY slow)",
)
args = parser.parse_args(args)
for fontfile in args.inputs:
font = TTFont(fontfile)
hmtx = font['hmtx']
hmtx = font["hmtx"]
widths = [m[0] for m in hmtx.metrics.values()]
if args.brute:
default, nominal = optimizeWidthsBruteforce(widths)
else:
default, nominal = optimizeWidths(widths)
print("glyphs=%d default=%d nominal=%d byteCost=%d" % (len(widths), default, nominal, byteCost(widths, default, nominal)))
print(
"glyphs=%d default=%d nominal=%d byteCost=%d"
% (len(widths), default, nominal, byteCost(widths, default, nominal))
)
if __name__ == '__main__':
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
import doctest
sys.exit(doctest.testmod().failed)
main()

View File

@ -1,3 +1,2 @@
class ColorLibError(Exception):
pass

View File

@ -67,9 +67,7 @@ def _split_format(cls, source):
assert isinstance(
fmt, collections.abc.Hashable
), f"{cls} Format is not hashable: {fmt!r}"
assert (
fmt in cls.convertersByName
), f"{cls} invalid Format: {fmt!r}"
assert fmt in cls.convertersByName, f"{cls} invalid Format: {fmt!r}"
return fmt, remainder

View File

@ -6,44 +6,52 @@ import timeit
MAX_ERR = 5
def generate_curve():
return [
tuple(float(random.randint(0, 2048)) for coord in range(2))
for point in range(4)]
for point in range(4)
]
def setup_curve_to_quadratic():
return generate_curve(), MAX_ERR
def setup_curves_to_quadratic():
num_curves = 3
return (
[generate_curve() for curve in range(num_curves)],
[MAX_ERR] * num_curves)
return ([generate_curve() for curve in range(num_curves)], [MAX_ERR] * num_curves)
def run_benchmark(
benchmark_module, module, function, setup_suffix='', repeat=5, number=1000):
setup_func = 'setup_' + function
benchmark_module, module, function, setup_suffix="", repeat=5, number=1000
):
setup_func = "setup_" + function
if setup_suffix:
print('%s with %s:' % (function, setup_suffix), end='')
setup_func += '_' + setup_suffix
print("%s with %s:" % (function, setup_suffix), end="")
setup_func += "_" + setup_suffix
else:
print('%s:' % function, end='')
print("%s:" % function, end="")
def wrapper(function, setup_func):
function = globals()[function]
setup_func = globals()[setup_func]
def wrapped():
return function(*setup_func())
return wrapped
results = timeit.repeat(wrapper(function, setup_func), repeat=repeat, number=number)
print('\t%5.1fus' % (min(results) * 1000000. / number))
print("\t%5.1fus" % (min(results) * 1000000.0 / number))
def main():
"""Benchmark the cu2qu algorithm performance."""
run_benchmark('cu2qu.benchmark', 'cu2qu', 'curve_to_quadratic')
run_benchmark('cu2qu.benchmark', 'cu2qu', 'curves_to_quadratic')
run_benchmark("cu2qu.benchmark", "cu2qu", "curve_to_quadratic")
run_benchmark("cu2qu.benchmark", "cu2qu", "curves_to_quadratic")
if __name__ == '__main__':
if __name__ == "__main__":
random.seed(1)
main()

View File

@ -37,7 +37,7 @@ def open_ufo(path):
def _font_to_quadratic(input_path, output_path=None, **kwargs):
ufo = open_ufo(input_path)
logger.info('Converting curves for %s', input_path)
logger.info("Converting curves for %s", input_path)
if font_to_quadratic(ufo, **kwargs):
logger.info("Saving %s", output_path)
if output_path:
@ -67,13 +67,13 @@ def _copytree(input_path, output_path):
def main(args=None):
"""Convert a UFO font from cubic to quadratic curves"""
parser = argparse.ArgumentParser(prog="cu2qu")
parser.add_argument(
"--version", action="version", version=fontTools.__version__)
parser.add_argument("--version", action="version", version=fontTools.__version__)
parser.add_argument(
"infiles",
nargs="+",
metavar="INPUT",
help="one or more input UFO source file(s).")
help="one or more input UFO source file(s).",
)
parser.add_argument("-v", "--verbose", action="count", default=0)
parser.add_argument(
"-e",
@ -81,19 +81,21 @@ def main(args=None):
type=float,
metavar="ERROR",
default=None,
help="maxiumum approximation error measured in EM (default: 0.001)")
help="maxiumum approximation error measured in EM (default: 0.001)",
)
parser.add_argument(
"--keep-direction",
dest="reverse_direction",
action="store_false",
help="do not reverse the contour direction")
help="do not reverse the contour direction",
)
mode_parser = parser.add_mutually_exclusive_group()
mode_parser.add_argument(
"-i",
"--interpolatable",
action="store_true",
help="whether curve conversion should keep interpolation compatibility"
help="whether curve conversion should keep interpolation compatibility",
)
mode_parser.add_argument(
"-j",
@ -103,7 +105,8 @@ def main(args=None):
default=1,
const=_cpu_count(),
metavar="N",
help="Convert using N multiple processes (default: %(default)s)")
help="Convert using N multiple processes (default: %(default)s)",
)
output_parser = parser.add_mutually_exclusive_group()
output_parser.add_argument(
@ -111,14 +114,18 @@ def main(args=None):
"--output-file",
default=None,
metavar="OUTPUT",
help=("output filename for the converted UFO. By default fonts are "
"modified in place. This only works with a single input."))
help=(
"output filename for the converted UFO. By default fonts are "
"modified in place. This only works with a single input."
),
)
output_parser.add_argument(
"-d",
"--output-dir",
default=None,
metavar="DIRECTORY",
help="output directory where to save converted UFOs")
help="output directory where to save converted UFOs",
)
options = parser.parse_args(args)
@ -143,8 +150,7 @@ def main(args=None):
elif not os.path.isdir(output_dir):
parser.error("'%s' is not a directory" % output_dir)
output_paths = [
os.path.join(output_dir, os.path.basename(p))
for p in options.infiles
os.path.join(output_dir, os.path.basename(p)) for p in options.infiles
]
elif options.output_file:
output_paths = [options.output_file]
@ -152,12 +158,14 @@ def main(args=None):
# save in-place
output_paths = [None] * len(options.infiles)
kwargs = dict(dump_stats=options.verbose > 0,
kwargs = dict(
dump_stats=options.verbose > 0,
max_err_em=options.conversion_error,
reverse_direction=options.reverse_direction)
reverse_direction=options.reverse_direction,
)
if options.interpolatable:
logger.info('Converting curves compatibly')
logger.info("Converting curves compatibly")
ufos = [open_ufo(infile) for infile in options.infiles]
if fonts_to_quadratic(ufos, **kwargs):
for ufo, output_path in zip(ufos, output_paths):
@ -171,11 +179,10 @@ def main(args=None):
if output_path:
_copytree(input_path, output_path)
else:
jobs = min(len(options.infiles),
options.jobs) if options.jobs > 1 else 1
jobs = min(len(options.infiles), options.jobs) if options.jobs > 1 else 1
if jobs > 1:
func = partial(_font_to_quadratic, **kwargs)
logger.info('Running %d parallel processes', jobs)
logger.info("Running %d parallel processes", jobs)
with closing(mp.Pool(jobs)) as pool:
pool.starmap(func, zip(options.infiles, output_paths))
else:

View File

@ -26,7 +26,7 @@ import math
from .errors import Error as Cu2QuError, ApproxNotFoundError
__all__ = ['curve_to_quadratic', 'curves_to_quadratic']
__all__ = ["curve_to_quadratic", "curves_to_quadratic"]
MAX_N = 100
@ -61,7 +61,9 @@ def dot(v1, v2):
@cython.cfunc
@cython.inline
@cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex)
@cython.locals(_1=cython.complex, _2=cython.complex, _3=cython.complex, _4=cython.complex)
@cython.locals(
_1=cython.complex, _2=cython.complex, _3=cython.complex, _4=cython.complex
)
def calc_cubic_points(a, b, c, d):
_1 = d
_2 = (c / 3.0) + d
@ -72,7 +74,9 @@ def calc_cubic_points(a, b, c, d):
@cython.cfunc
@cython.inline
@cython.locals(p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex)
@cython.locals(
p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex
)
@cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex)
def calc_cubic_parameters(p0, p1, p2, p3):
c = (p1 - p0) * 3.0
@ -83,7 +87,9 @@ def calc_cubic_parameters(p0, p1, p2, p3):
@cython.cfunc
@cython.locals(p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex)
@cython.locals(
p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex
)
def split_cubic_into_n_iter(p0, p1, p2, p3, n):
"""Split a cubic Bezier into n equal parts.
@ -115,10 +121,20 @@ def split_cubic_into_n_iter(p0, p1, p2, p3, n):
return _split_cubic_into_n_gen(p0, p1, p2, p3, n)
@cython.locals(p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex, n=cython.int)
@cython.locals(
p0=cython.complex,
p1=cython.complex,
p2=cython.complex,
p3=cython.complex,
n=cython.int,
)
@cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex)
@cython.locals(dt=cython.double, delta_2=cython.double, delta_3=cython.double, i=cython.int)
@cython.locals(a1=cython.complex, b1=cython.complex, c1=cython.complex, d1=cython.complex)
@cython.locals(
dt=cython.double, delta_2=cython.double, delta_3=cython.double, i=cython.int
)
@cython.locals(
a1=cython.complex, b1=cython.complex, c1=cython.complex, d1=cython.complex
)
def _split_cubic_into_n_gen(p0, p1, p2, p3, n):
a, b, c, d = calc_cubic_parameters(p0, p1, p2, p3)
dt = 1 / n
@ -135,7 +151,9 @@ def _split_cubic_into_n_gen(p0, p1, p2, p3, n):
yield calc_cubic_points(a1, b1, c1, d1)
@cython.locals(p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex)
@cython.locals(
p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex
)
@cython.locals(mid=cython.complex, deriv3=cython.complex)
def split_cubic_into_two(p0, p1, p2, p3):
"""Split a cubic Bezier into two equal parts.
@ -152,14 +170,27 @@ def split_cubic_into_two(p0, p1, p2, p3):
tuple: Two cubic Beziers (each expressed as a tuple of four complex
values).
"""
mid = (p0 + 3 * (p1 + p2) + p3) * .125
deriv3 = (p3 + p2 - p1 - p0) * .125
return ((p0, (p0 + p1) * .5, mid - deriv3, mid),
(mid, mid + deriv3, (p2 + p3) * .5, p3))
mid = (p0 + 3 * (p1 + p2) + p3) * 0.125
deriv3 = (p3 + p2 - p1 - p0) * 0.125
return (
(p0, (p0 + p1) * 0.5, mid - deriv3, mid),
(mid, mid + deriv3, (p2 + p3) * 0.5, p3),
)
@cython.locals(p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex, _27=cython.double)
@cython.locals(mid1=cython.complex, deriv1=cython.complex, mid2=cython.complex, deriv2=cython.complex)
@cython.locals(
p0=cython.complex,
p1=cython.complex,
p2=cython.complex,
p3=cython.complex,
_27=cython.double,
)
@cython.locals(
mid1=cython.complex,
deriv1=cython.complex,
mid2=cython.complex,
deriv2=cython.complex,
)
def split_cubic_into_three(p0, p1, p2, p3, _27=1 / 27):
"""Split a cubic Bezier into three equal parts.
@ -181,13 +212,21 @@ def split_cubic_into_three(p0, p1, p2, p3, _27=1/27):
deriv1 = (p3 + 3 * p2 - 4 * p0) * _27
mid2 = (p0 + 6 * p1 + 12 * p2 + 8 * p3) * _27
deriv2 = (4 * p3 - 3 * p1 - p0) * _27
return ((p0, (2*p0 + p1) / 3.0, mid1 - deriv1, mid1),
return (
(p0, (2 * p0 + p1) / 3.0, mid1 - deriv1, mid1),
(mid1, mid1 + deriv1, mid2 - deriv2, mid2),
(mid2, mid2 + deriv2, (p2 + 2*p3) / 3.0, p3))
(mid2, mid2 + deriv2, (p2 + 2 * p3) / 3.0, p3),
)
@cython.returns(cython.complex)
@cython.locals(t=cython.double, p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex)
@cython.locals(
t=cython.double,
p0=cython.complex,
p1=cython.complex,
p2=cython.complex,
p3=cython.complex,
)
@cython.locals(_p1=cython.complex, _p2=cython.complex)
def cubic_approx_control(t, p0, p1, p2, p3):
"""Approximate a cubic Bezier using a quadratic one.
@ -235,7 +274,13 @@ def calc_intersect(a, b, c, d):
@cython.cfunc
@cython.returns(cython.int)
@cython.locals(tolerance=cython.double, p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex)
@cython.locals(
tolerance=cython.double,
p0=cython.complex,
p1=cython.complex,
p2=cython.complex,
p3=cython.complex,
)
@cython.locals(mid=cython.complex, deriv3=cython.complex)
def cubic_farthest_fit_inside(p0, p1, p2, p3, tolerance):
"""Check if a cubic Bezier lies within a given distance of the origin.
@ -260,17 +305,24 @@ def cubic_farthest_fit_inside(p0, p1, p2, p3, tolerance):
return True
# Split.
mid = (p0 + 3 * (p1 + p2) + p3) * .125
mid = (p0 + 3 * (p1 + p2) + p3) * 0.125
if abs(mid) > tolerance:
return False
deriv3 = (p3 + p2 - p1 - p0) * .125
return (cubic_farthest_fit_inside(p0, (p0+p1)*.5, mid-deriv3, mid, tolerance) and
cubic_farthest_fit_inside(mid, mid+deriv3, (p2+p3)*.5, p3, tolerance))
deriv3 = (p3 + p2 - p1 - p0) * 0.125
return cubic_farthest_fit_inside(
p0, (p0 + p1) * 0.5, mid - deriv3, mid, tolerance
) and cubic_farthest_fit_inside(mid, mid + deriv3, (p2 + p3) * 0.5, p3, tolerance)
@cython.cfunc
@cython.locals(tolerance=cython.double, _2_3=cython.double)
@cython.locals(q1=cython.complex, c0=cython.complex, c1=cython.complex, c2=cython.complex, c3=cython.complex)
@cython.locals(
q1=cython.complex,
c0=cython.complex,
c1=cython.complex,
c2=cython.complex,
c3=cython.complex,
)
def cubic_approx_quadratic(cubic, tolerance, _2_3=2 / 3):
"""Approximate a cubic Bezier with a single quadratic within a given tolerance.
@ -294,10 +346,7 @@ def cubic_approx_quadratic(cubic, tolerance, _2_3=2/3):
c3 = cubic[3]
c1 = c0 + (q1 - c0) * _2_3
c2 = c3 + (q1 - c3) * _2_3
if not cubic_farthest_fit_inside(0,
c1 - cubic[1],
c2 - cubic[2],
0, tolerance):
if not cubic_farthest_fit_inside(0, c1 - cubic[1], c2 - cubic[2], 0, tolerance):
return None
return c0, q1, c3
@ -305,8 +354,16 @@ def cubic_approx_quadratic(cubic, tolerance, _2_3=2/3):
@cython.cfunc
@cython.locals(n=cython.int, tolerance=cython.double, _2_3=cython.double)
@cython.locals(i=cython.int)
@cython.locals(c0=cython.complex, c1=cython.complex, c2=cython.complex, c3=cython.complex)
@cython.locals(q0=cython.complex, q1=cython.complex, next_q1=cython.complex, q2=cython.complex, d1=cython.complex)
@cython.locals(
c0=cython.complex, c1=cython.complex, c2=cython.complex, c3=cython.complex
)
@cython.locals(
q0=cython.complex,
q1=cython.complex,
next_q1=cython.complex,
q2=cython.complex,
d1=cython.complex,
)
def cubic_approx_spline(cubic, n, tolerance, _2_3=2 / 3):
"""Approximate a cubic Bezier curve with a spline of n quadratics.
@ -347,7 +404,7 @@ def cubic_approx_spline(cubic, n, tolerance, _2_3=2/3):
next_cubic = next(cubics)
next_q1 = cubic_approx_control(i / (n - 1), *next_cubic)
spline.append(next_q1)
q2 = (q1 + next_q1) * .5
q2 = (q1 + next_q1) * 0.5
else:
q2 = c3
@ -355,12 +412,9 @@ def cubic_approx_spline(cubic, n, tolerance, _2_3=2/3):
d0 = d1
d1 = q2 - c3
if (abs(d1) > tolerance or
not cubic_farthest_fit_inside(d0,
q0 + (q1 - q0) * _2_3 - c1,
q2 + (q1 - q2) * _2_3 - c2,
d1,
tolerance)):
if abs(d1) > tolerance or not cubic_farthest_fit_inside(
d0, q0 + (q1 - q0) * _2_3 - c1, q2 + (q1 - q2) * _2_3 - c2, d1, tolerance
):
return None
spline.append(cubic[3])
@ -394,7 +448,6 @@ def curve_to_quadratic(curve, max_err):
raise ApproxNotFoundError(curve)
@cython.locals(l=cython.int, last_i=cython.int, i=cython.int)
def curves_to_quadratic(curves, max_errors):
"""Return quadratic Bezier splines approximating the input cubic Beziers.
@ -448,5 +501,3 @@ def curves_to_quadratic(curves, max_errors):
return [[(s.real, s.imag) for s in spline] for spline in splines]
raise ApproxNotFoundError(curves)

View File

@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
class Error(Exception):
"""Base Cu2Qu exception class for all other errors."""

View File

@ -30,12 +30,15 @@ from fontTools.pens.reverseContourPen import ReverseContourPen
from . import curves_to_quadratic
from .errors import (
UnequalZipLengthsError, IncompatibleSegmentNumberError,
IncompatibleSegmentTypesError, IncompatibleGlyphsError,
IncompatibleFontsError)
UnequalZipLengthsError,
IncompatibleSegmentNumberError,
IncompatibleSegmentTypesError,
IncompatibleGlyphsError,
IncompatibleFontsError,
)
__all__ = ['fonts_to_quadratic', 'font_to_quadratic']
__all__ = ["fonts_to_quadratic", "font_to_quadratic"]
# The default approximation error below is a relative value (1/1000 of the EM square).
# Later on, we convert it to absolute font units by multiplying it by a font's UPEM
@ -47,6 +50,8 @@ logger = logging.getLogger(__name__)
_zip = zip
def zip(*args):
"""Ensure each argument to zip has the same length. Also make sure a list is
returned for python 2/3 compatibility.
@ -69,27 +74,27 @@ class GetSegmentsPen(AbstractPen):
self.segments = []
def _add_segment(self, tag, *args):
if tag in ['move', 'line', 'qcurve', 'curve']:
if tag in ["move", "line", "qcurve", "curve"]:
self._last_pt = args[-1]
self.segments.append((tag, args))
def moveTo(self, pt):
self._add_segment('move', pt)
self._add_segment("move", pt)
def lineTo(self, pt):
self._add_segment('line', pt)
self._add_segment("line", pt)
def qCurveTo(self, *points):
self._add_segment('qcurve', self._last_pt, *points)
self._add_segment("qcurve", self._last_pt, *points)
def curveTo(self, *points):
self._add_segment('curve', self._last_pt, *points)
self._add_segment("curve", self._last_pt, *points)
def closePath(self):
self._add_segment('close')
self._add_segment("close")
def endPath(self):
self._add_segment('end')
self._add_segment("end")
def addComponent(self, glyphName, transformation):
pass
@ -122,17 +127,17 @@ def _set_segments(glyph, segments, reverse_direction):
if reverse_direction:
pen = ReverseContourPen(pen)
for tag, args in segments:
if tag == 'move':
if tag == "move":
pen.moveTo(*args)
elif tag == 'line':
elif tag == "line":
pen.lineTo(*args)
elif tag == 'curve':
elif tag == "curve":
pen.curveTo(*args[1:])
elif tag == 'qcurve':
elif tag == "qcurve":
pen.qCurveTo(*args[1:])
elif tag == 'close':
elif tag == "close":
pen.closePath()
elif tag == 'end':
elif tag == "end":
pen.endPath()
else:
raise AssertionError('Unhandled segment type "%s"' % tag)
@ -141,16 +146,16 @@ def _set_segments(glyph, segments, reverse_direction):
def _segments_to_quadratic(segments, max_err, stats):
"""Return quadratic approximations of cubic segments."""
assert all(s[0] == 'curve' for s in segments), 'Non-cubic given to convert'
assert all(s[0] == "curve" for s in segments), "Non-cubic given to convert"
new_points = curves_to_quadratic([s[1] for s in segments], max_err)
n = len(new_points[0])
assert all(len(s) == n for s in new_points[1:]), 'Converted incompatibly'
assert all(len(s) == n for s in new_points[1:]), "Converted incompatibly"
spline_length = str(n - 2)
stats[spline_length] = stats.get(spline_length, 0) + 1
return [('qcurve', p) for p in new_points]
return [("qcurve", p) for p in new_points]
def _glyphs_to_quadratic(glyphs, max_err, reverse_direction, stats):
@ -176,7 +181,7 @@ def _glyphs_to_quadratic(glyphs, max_err, reverse_direction, stats):
tag = segments[0][0]
if not all(s[0] == tag for s in segments[1:]):
incompatible[i] = [s[0] for s in segments]
elif tag == 'curve':
elif tag == "curve":
segments = _segments_to_quadratic(segments, max_err, stats)
glyphs_modified = True
new_segments_by_location.append(segments)
@ -191,8 +196,7 @@ def _glyphs_to_quadratic(glyphs, max_err, reverse_direction, stats):
return glyphs_modified
def glyphs_to_quadratic(
glyphs, max_err=None, reverse_direction=False, stats=None):
def glyphs_to_quadratic(glyphs, max_err=None, reverse_direction=False, stats=None):
"""Convert the curves of a set of compatible of glyphs to quadratic.
All curves will be converted to quadratic at once, ensuring interpolation
@ -220,8 +224,14 @@ def glyphs_to_quadratic(
def fonts_to_quadratic(
fonts, max_err_em=None, max_err=None, reverse_direction=False,
stats=None, dump_stats=False, remember_curve_type=True):
fonts,
max_err_em=None,
max_err=None,
reverse_direction=False,
stats=None,
dump_stats=False,
remember_curve_type=True,
):
"""Convert the curves of a collection of fonts to quadratic.
All curves will be converted to quadratic at once, ensuring interpolation
@ -258,7 +268,7 @@ def fonts_to_quadratic(
stats = {}
if max_err_em and max_err:
raise TypeError('Only one of max_err and max_err_em can be specified.')
raise TypeError("Only one of max_err and max_err_em can be specified.")
if not (max_err_em or max_err):
max_err_em = DEFAULT_MAX_ERR
@ -270,8 +280,7 @@ def fonts_to_quadratic(
if isinstance(max_err_em, (list, tuple)):
assert len(fonts) == len(max_err_em)
max_errors = [f.info.unitsPerEm * e
for f, e in zip(fonts, max_err_em)]
max_errors = [f.info.unitsPerEm * e for f, e in zip(fonts, max_err_em)]
elif max_err_em:
max_errors = [f.info.unitsPerEm * max_err_em for f in fonts]
@ -286,7 +295,8 @@ def fonts_to_quadratic(
cur_max_errors.append(error)
try:
modified |= _glyphs_to_quadratic(
glyphs, cur_max_errors, reverse_direction, stats)
glyphs, cur_max_errors, reverse_direction, stats
)
except IncompatibleGlyphsError as exc:
logger.error(exc)
glyph_errors[name] = exc
@ -296,8 +306,10 @@ def fonts_to_quadratic(
if modified and dump_stats:
spline_lengths = sorted(stats.keys())
logger.info('New spline lengths: %s' % (', '.join(
'%s: %d' % (l, stats[l]) for l in spline_lengths)))
logger.info(
"New spline lengths: %s"
% (", ".join("%s: %d" % (l, stats[l]) for l in spline_lengths))
)
if remember_curve_type:
for font in fonts:

File diff suppressed because it is too large Load Diff

View File

@ -1,36 +1,258 @@
MacRoman = [
'NUL', 'Eth', 'eth', 'Lslash', 'lslash', 'Scaron', 'scaron', 'Yacute',
'yacute', 'HT', 'LF', 'Thorn', 'thorn', 'CR', 'Zcaron', 'zcaron', 'DLE', 'DC1',
'DC2', 'DC3', 'DC4', 'onehalf', 'onequarter', 'onesuperior', 'threequarters',
'threesuperior', 'twosuperior', 'brokenbar', 'minus', 'multiply', 'RS', 'US',
'space', 'exclam', 'quotedbl', 'numbersign', 'dollar', 'percent', 'ampersand',
'quotesingle', 'parenleft', 'parenright', 'asterisk', 'plus', 'comma',
'hyphen', 'period', 'slash', 'zero', 'one', 'two', 'three', 'four', 'five',
'six', 'seven', 'eight', 'nine', 'colon', 'semicolon', 'less', 'equal',
'greater', 'question', 'at', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J',
'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
'bracketleft', 'backslash', 'bracketright', 'asciicircum', 'underscore',
'grave', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n',
'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'braceleft', 'bar',
'braceright', 'asciitilde', 'DEL', 'Adieresis', 'Aring', 'Ccedilla', 'Eacute',
'Ntilde', 'Odieresis', 'Udieresis', 'aacute', 'agrave', 'acircumflex',
'adieresis', 'atilde', 'aring', 'ccedilla', 'eacute', 'egrave', 'ecircumflex',
'edieresis', 'iacute', 'igrave', 'icircumflex', 'idieresis', 'ntilde',
'oacute', 'ograve', 'ocircumflex', 'odieresis', 'otilde', 'uacute', 'ugrave',
'ucircumflex', 'udieresis', 'dagger', 'degree', 'cent', 'sterling', 'section',
'bullet', 'paragraph', 'germandbls', 'registered', 'copyright', 'trademark',
'acute', 'dieresis', 'notequal', 'AE', 'Oslash', 'infinity', 'plusminus',
'lessequal', 'greaterequal', 'yen', 'mu', 'partialdiff', 'summation',
'product', 'pi', 'integral', 'ordfeminine', 'ordmasculine', 'Omega', 'ae',
'oslash', 'questiondown', 'exclamdown', 'logicalnot', 'radical', 'florin',
'approxequal', 'Delta', 'guillemotleft', 'guillemotright', 'ellipsis',
'nbspace', 'Agrave', 'Atilde', 'Otilde', 'OE', 'oe', 'endash', 'emdash',
'quotedblleft', 'quotedblright', 'quoteleft', 'quoteright', 'divide', 'lozenge',
'ydieresis', 'Ydieresis', 'fraction', 'currency', 'guilsinglleft',
'guilsinglright', 'fi', 'fl', 'daggerdbl', 'periodcentered', 'quotesinglbase',
'quotedblbase', 'perthousand', 'Acircumflex', 'Ecircumflex', 'Aacute',
'Edieresis', 'Egrave', 'Iacute', 'Icircumflex', 'Idieresis', 'Igrave', 'Oacute',
'Ocircumflex', 'apple', 'Ograve', 'Uacute', 'Ucircumflex', 'Ugrave', 'dotlessi',
'circumflex', 'tilde', 'macron', 'breve', 'dotaccent', 'ring', 'cedilla',
'hungarumlaut', 'ogonek', 'caron'
"NUL",
"Eth",
"eth",
"Lslash",
"lslash",
"Scaron",
"scaron",
"Yacute",
"yacute",
"HT",
"LF",
"Thorn",
"thorn",
"CR",
"Zcaron",
"zcaron",
"DLE",
"DC1",
"DC2",
"DC3",
"DC4",
"onehalf",
"onequarter",
"onesuperior",
"threequarters",
"threesuperior",
"twosuperior",
"brokenbar",
"minus",
"multiply",
"RS",
"US",
"space",
"exclam",
"quotedbl",
"numbersign",
"dollar",
"percent",
"ampersand",
"quotesingle",
"parenleft",
"parenright",
"asterisk",
"plus",
"comma",
"hyphen",
"period",
"slash",
"zero",
"one",
"two",
"three",
"four",
"five",
"six",
"seven",
"eight",
"nine",
"colon",
"semicolon",
"less",
"equal",
"greater",
"question",
"at",
"A",
"B",
"C",
"D",
"E",
"F",
"G",
"H",
"I",
"J",
"K",
"L",
"M",
"N",
"O",
"P",
"Q",
"R",
"S",
"T",
"U",
"V",
"W",
"X",
"Y",
"Z",
"bracketleft",
"backslash",
"bracketright",
"asciicircum",
"underscore",
"grave",
"a",
"b",
"c",
"d",
"e",
"f",
"g",
"h",
"i",
"j",
"k",
"l",
"m",
"n",
"o",
"p",
"q",
"r",
"s",
"t",
"u",
"v",
"w",
"x",
"y",
"z",
"braceleft",
"bar",
"braceright",
"asciitilde",
"DEL",
"Adieresis",
"Aring",
"Ccedilla",
"Eacute",
"Ntilde",
"Odieresis",
"Udieresis",
"aacute",
"agrave",
"acircumflex",
"adieresis",
"atilde",
"aring",
"ccedilla",
"eacute",
"egrave",
"ecircumflex",
"edieresis",
"iacute",
"igrave",
"icircumflex",
"idieresis",
"ntilde",
"oacute",
"ograve",
"ocircumflex",
"odieresis",
"otilde",
"uacute",
"ugrave",
"ucircumflex",
"udieresis",
"dagger",
"degree",
"cent",
"sterling",
"section",
"bullet",
"paragraph",
"germandbls",
"registered",
"copyright",
"trademark",
"acute",
"dieresis",
"notequal",
"AE",
"Oslash",
"infinity",
"plusminus",
"lessequal",
"greaterequal",
"yen",
"mu",
"partialdiff",
"summation",
"product",
"pi",
"integral",
"ordfeminine",
"ordmasculine",
"Omega",
"ae",
"oslash",
"questiondown",
"exclamdown",
"logicalnot",
"radical",
"florin",
"approxequal",
"Delta",
"guillemotleft",
"guillemotright",
"ellipsis",
"nbspace",
"Agrave",
"Atilde",
"Otilde",
"OE",
"oe",
"endash",
"emdash",
"quotedblleft",
"quotedblright",
"quoteleft",
"quoteright",
"divide",
"lozenge",
"ydieresis",
"Ydieresis",
"fraction",
"currency",
"guilsinglleft",
"guilsinglright",
"fi",
"fl",
"daggerdbl",
"periodcentered",
"quotesinglbase",
"quotedblbase",
"perthousand",
"Acircumflex",
"Ecircumflex",
"Aacute",
"Edieresis",
"Egrave",
"Iacute",
"Icircumflex",
"Idieresis",
"Igrave",
"Oacute",
"Ocircumflex",
"apple",
"Ograve",
"Uacute",
"Ucircumflex",
"Ugrave",
"dotlessi",
"circumflex",
"tilde",
"macron",
"breve",
"dotaccent",
"ring",
"cedilla",
"hungarumlaut",
"ogonek",
"caron",
]

View File

@ -1,48 +1,258 @@
StandardEncoding = [
'.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
'.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
'.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
'.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
'.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
'.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
'.notdef', '.notdef', 'space', 'exclam', 'quotedbl',
'numbersign', 'dollar', 'percent', 'ampersand',
'quoteright', 'parenleft', 'parenright', 'asterisk', 'plus',
'comma', 'hyphen', 'period', 'slash', 'zero', 'one', 'two',
'three', 'four', 'five', 'six', 'seven', 'eight', 'nine',
'colon', 'semicolon', 'less', 'equal', 'greater',
'question', 'at', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',
'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T',
'U', 'V', 'W', 'X', 'Y', 'Z', 'bracketleft', 'backslash',
'bracketright', 'asciicircum', 'underscore', 'quoteleft',
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l',
'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x',
'y', 'z', 'braceleft', 'bar', 'braceright', 'asciitilde',
'.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
'.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
'.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
'.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
'.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
'.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
'.notdef', '.notdef', '.notdef', '.notdef', 'exclamdown',
'cent', 'sterling', 'fraction', 'yen', 'florin', 'section',
'currency', 'quotesingle', 'quotedblleft', 'guillemotleft',
'guilsinglleft', 'guilsinglright', 'fi', 'fl', '.notdef',
'endash', 'dagger', 'daggerdbl', 'periodcentered',
'.notdef', 'paragraph', 'bullet', 'quotesinglbase',
'quotedblbase', 'quotedblright', 'guillemotright',
'ellipsis', 'perthousand', '.notdef', 'questiondown',
'.notdef', 'grave', 'acute', 'circumflex', 'tilde',
'macron', 'breve', 'dotaccent', 'dieresis', '.notdef',
'ring', 'cedilla', '.notdef', 'hungarumlaut', 'ogonek',
'caron', 'emdash', '.notdef', '.notdef', '.notdef',
'.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
'.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
'.notdef', '.notdef', '.notdef', 'AE', '.notdef',
'ordfeminine', '.notdef', '.notdef', '.notdef', '.notdef',
'Lslash', 'Oslash', 'OE', 'ordmasculine', '.notdef',
'.notdef', '.notdef', '.notdef', '.notdef', 'ae', '.notdef',
'.notdef', '.notdef', 'dotlessi', '.notdef', '.notdef',
'lslash', 'oslash', 'oe', 'germandbls', '.notdef',
'.notdef', '.notdef', '.notdef'
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
"space",
"exclam",
"quotedbl",
"numbersign",
"dollar",
"percent",
"ampersand",
"quoteright",
"parenleft",
"parenright",
"asterisk",
"plus",
"comma",
"hyphen",
"period",
"slash",
"zero",
"one",
"two",
"three",
"four",
"five",
"six",
"seven",
"eight",
"nine",
"colon",
"semicolon",
"less",
"equal",
"greater",
"question",
"at",
"A",
"B",
"C",
"D",
"E",
"F",
"G",
"H",
"I",
"J",
"K",
"L",
"M",
"N",
"O",
"P",
"Q",
"R",
"S",
"T",
"U",
"V",
"W",
"X",
"Y",
"Z",
"bracketleft",
"backslash",
"bracketright",
"asciicircum",
"underscore",
"quoteleft",
"a",
"b",
"c",
"d",
"e",
"f",
"g",
"h",
"i",
"j",
"k",
"l",
"m",
"n",
"o",
"p",
"q",
"r",
"s",
"t",
"u",
"v",
"w",
"x",
"y",
"z",
"braceleft",
"bar",
"braceright",
"asciitilde",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
"exclamdown",
"cent",
"sterling",
"fraction",
"yen",
"florin",
"section",
"currency",
"quotesingle",
"quotedblleft",
"guillemotleft",
"guilsinglleft",
"guilsinglright",
"fi",
"fl",
".notdef",
"endash",
"dagger",
"daggerdbl",
"periodcentered",
".notdef",
"paragraph",
"bullet",
"quotesinglbase",
"quotedblbase",
"quotedblright",
"guillemotright",
"ellipsis",
"perthousand",
".notdef",
"questiondown",
".notdef",
"grave",
"acute",
"circumflex",
"tilde",
"macron",
"breve",
"dotaccent",
"dieresis",
".notdef",
"ring",
"cedilla",
".notdef",
"hungarumlaut",
"ogonek",
"caron",
"emdash",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
"AE",
".notdef",
"ordfeminine",
".notdef",
".notdef",
".notdef",
".notdef",
"Lslash",
"Oslash",
"OE",
"ordmasculine",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
"ae",
".notdef",
".notdef",
".notdef",
"dotlessi",
".notdef",
".notdef",
"lslash",
"oslash",
"oe",
"germandbls",
".notdef",
".notdef",
".notdef",
".notdef",
]

View File

@ -4,15 +4,17 @@ but missing from Python. See https://github.com/fonttools/fonttools/issues/236
import codecs
import encodings
class ExtendCodec(codecs.Codec):
class ExtendCodec(codecs.Codec):
def __init__(self, name, base_encoding, mapping):
self.name = name
self.base_encoding = base_encoding
self.mapping = mapping
self.reverse = {v: k for k, v in mapping.items()}
self.max_len = max(len(v) for v in mapping.values())
self.info = codecs.CodecInfo(name=self.name, encode=self.encode, decode=self.decode)
self.info = codecs.CodecInfo(
name=self.name, encode=self.encode, decode=self.decode
)
codecs.register_error(name, self.error)
def _map(self, mapper, output_type, exc_type, input, errors):
@ -33,10 +35,10 @@ class ExtendCodec(codecs.Codec):
input = input[pos:]
return out, length
def encode(self, input, errors='strict'):
def encode(self, input, errors="strict"):
return self._map(codecs.encode, bytes, UnicodeEncodeError, input, errors)
def decode(self, input, errors='strict'):
def decode(self, input, errors="strict"):
return self._map(codecs.decode, str, UnicodeDecodeError, input, errors)
def error(self, e):
@ -55,7 +57,9 @@ class ExtendCodec(codecs.Codec):
_extended_encodings = {
"x_mac_japanese_ttx": ("shift_jis", {
"x_mac_japanese_ttx": (
"shift_jis",
{
b"\xFC": chr(0x007C),
b"\x7E": chr(0x007E),
b"\x80": chr(0x005C),
@ -63,39 +67,50 @@ _extended_encodings = {
b"\xFD": chr(0x00A9),
b"\xFE": chr(0x2122),
b"\xFF": chr(0x2026),
}),
"x_mac_trad_chinese_ttx": ("big5", {
},
),
"x_mac_trad_chinese_ttx": (
"big5",
{
b"\x80": chr(0x005C),
b"\xA0": chr(0x00A0),
b"\xFD": chr(0x00A9),
b"\xFE": chr(0x2122),
b"\xFF": chr(0x2026),
}),
"x_mac_korean_ttx": ("euc_kr", {
},
),
"x_mac_korean_ttx": (
"euc_kr",
{
b"\x80": chr(0x00A0),
b"\x81": chr(0x20A9),
b"\x82": chr(0x2014),
b"\x83": chr(0x00A9),
b"\xFE": chr(0x2122),
b"\xFF": chr(0x2026),
}),
"x_mac_simp_chinese_ttx": ("gb2312", {
},
),
"x_mac_simp_chinese_ttx": (
"gb2312",
{
b"\x80": chr(0x00FC),
b"\xA0": chr(0x00A0),
b"\xFD": chr(0x00A9),
b"\xFE": chr(0x2122),
b"\xFF": chr(0x2026),
}),
},
),
}
_cache = {}
def search_function(name):
name = encodings.normalize_encoding(name) # Rather undocumented...
if name in _extended_encodings:
if name not in _cache:
base_encoding, mapping = _extended_encodings[name]
assert(name[-4:] == "_ttx")
assert name[-4:] == "_ttx"
# Python 2 didn't have any of the encodings that we are implementing
# in this file. Python 3 added aliases for the East Asian ones, mapping
# them "temporarily" to the same base encoding as us, with a comment
@ -116,4 +131,5 @@ def search_function(name):
return None
codecs.register(search_function)

View File

@ -768,8 +768,8 @@ class Builder(object):
varidx_map = store.optimize()
gdef.remap_device_varidxes(varidx_map)
if 'GPOS' in self.font:
self.font['GPOS'].table.remap_device_varidxes(varidx_map)
if "GPOS" in self.font:
self.font["GPOS"].table.remap_device_varidxes(varidx_map)
VariableScalar.clear_cache()
if any(
(
@ -1339,7 +1339,9 @@ class Builder(object):
# GSUB 5/6
def add_chain_context_subst(self, location, prefix, glyphs, suffix, lookups):
if not all(glyphs) or not all(prefix) or not all(suffix):
raise FeatureLibError("Empty glyph class in contextual substitution", location)
raise FeatureLibError(
"Empty glyph class in contextual substitution", location
)
lookup = self.get_lookup_(location, ChainContextSubstBuilder)
lookup.rules.append(
ChainContextualRule(
@ -1349,7 +1351,9 @@ class Builder(object):
def add_single_subst_chained_(self, location, prefix, suffix, mapping):
if not mapping or not all(prefix) or not all(suffix):
raise FeatureLibError("Empty glyph class in contextual substitution", location)
raise FeatureLibError(
"Empty glyph class in contextual substitution", location
)
# https://github.com/fonttools/fonttools/issues/512
chain = self.get_lookup_(location, ChainContextSubstBuilder)
sub = chain.find_chainable_single_subst(set(mapping.keys()))
@ -1377,8 +1381,12 @@ class Builder(object):
lookup = self.get_lookup_(location, SinglePosBuilder)
for glyphs, value in pos:
if not glyphs:
raise FeatureLibError("Empty glyph class in positioning rule", location)
otValueRecord = self.makeOpenTypeValueRecord(location, value, pairPosContext=False)
raise FeatureLibError(
"Empty glyph class in positioning rule", location
)
otValueRecord = self.makeOpenTypeValueRecord(
location, value, pairPosContext=False
)
for glyph in glyphs:
try:
lookup.add_pos(location, glyph, otValueRecord)
@ -1388,9 +1396,7 @@ class Builder(object):
# GPOS 2
def add_class_pair_pos(self, location, glyphclass1, value1, glyphclass2, value2):
if not glyphclass1 or not glyphclass2:
raise FeatureLibError(
"Empty glyph class in positioning rule", location
)
raise FeatureLibError("Empty glyph class in positioning rule", location)
lookup = self.get_lookup_(location, PairPosBuilder)
v1 = self.makeOpenTypeValueRecord(location, value1, pairPosContext=True)
v2 = self.makeOpenTypeValueRecord(location, value2, pairPosContext=True)
@ -1458,7 +1464,9 @@ class Builder(object):
# GPOS 7/8
def add_chain_context_pos(self, location, prefix, glyphs, suffix, lookups):
if not all(glyphs) or not all(prefix) or not all(suffix):
raise FeatureLibError("Empty glyph class in contextual positioning rule", location)
raise FeatureLibError(
"Empty glyph class in contextual positioning rule", location
)
lookup = self.get_lookup_(location, ChainContextPosBuilder)
lookup.rules.append(
ChainContextualRule(
@ -1468,7 +1476,9 @@ class Builder(object):
def add_single_pos_chained_(self, location, prefix, suffix, pos):
if not pos or not all(prefix) or not all(suffix):
raise FeatureLibError("Empty glyph class in contextual positioning rule", location)
raise FeatureLibError(
"Empty glyph class in contextual positioning rule", location
)
# https://github.com/fonttools/fonttools/issues/514
chain = self.get_lookup_(location, ChainContextPosBuilder)
targets = []
@ -1479,7 +1489,9 @@ class Builder(object):
if value is None:
subs.append(None)
continue
otValue = self.makeOpenTypeValueRecord(location, value, pairPosContext=False)
otValue = self.makeOpenTypeValueRecord(
location, value, pairPosContext=False
)
sub = chain.find_chainable_single_pos(targets, glyphs, otValue)
if sub is None:
sub = self.get_chained_lookup_(location, SinglePosBuilder)
@ -1498,7 +1510,9 @@ class Builder(object):
for markClassDef in markClass.definitions:
for mark in markClassDef.glyphs.glyphSet():
if mark not in lookupBuilder.marks:
otMarkAnchor = self.makeOpenTypeAnchor(location, markClassDef.anchor)
otMarkAnchor = self.makeOpenTypeAnchor(
location, markClassDef.anchor
)
lookupBuilder.marks[mark] = (markClass.name, otMarkAnchor)
else:
existingMarkClass = lookupBuilder.marks[mark][0]
@ -1592,9 +1606,13 @@ class Builder(object):
if not isinstance(getattr(anchor, dim), VariableScalar):
continue
if getattr(anchor, dim + "DeviceTable") is not None:
raise FeatureLibError("Can't define a device coordinate and variable scalar", location)
raise FeatureLibError(
"Can't define a device coordinate and variable scalar", location
)
if not self.varstorebuilder:
raise FeatureLibError("Can't define a variable scalar in a non-variable font", location)
raise FeatureLibError(
"Can't define a variable scalar in a non-variable font", location
)
varscalar = getattr(anchor, dim)
varscalar.axes = self.axes
default, index = varscalar.add_to_variation_store(self.varstorebuilder)
@ -1606,7 +1624,9 @@ class Builder(object):
deviceY = buildVarDevTable(index)
variable = True
otlanchor = otl.buildAnchor(anchor.x, anchor.y, anchor.contourpoint, deviceX, deviceY)
otlanchor = otl.buildAnchor(
anchor.x, anchor.y, anchor.contourpoint, deviceX, deviceY
)
if variable:
otlanchor.Format = 3
return otlanchor
@ -1617,7 +1637,6 @@ class Builder(object):
if not name.startswith("Reserved")
}
def makeOpenTypeValueRecord(self, location, v, pairPosContext):
"""ast.ValueRecord --> otBase.ValueRecord"""
if not v:
@ -1635,9 +1654,14 @@ class Builder(object):
otDeviceName = otName[0:4] + "Device"
feaDeviceName = otDeviceName[0].lower() + otDeviceName[1:]
if getattr(v, feaDeviceName):
raise FeatureLibError("Can't define a device coordinate and variable scalar", location)
raise FeatureLibError(
"Can't define a device coordinate and variable scalar", location
)
if not self.varstorebuilder:
raise FeatureLibError("Can't define a variable scalar in a non-variable font", location)
raise FeatureLibError(
"Can't define a variable scalar in a non-variable font",
location,
)
val.axes = self.axes
default, index = val.add_to_variation_store(self.varstorebuilder)
vr[otName] = default

View File

@ -3,6 +3,7 @@ from typing import NamedTuple
LOOKUP_DEBUG_INFO_KEY = "com.github.fonttools.feaLib"
LOOKUP_DEBUG_ENV_VAR = "FONTTOOLS_LOOKUP_DEBUGGING"
class LookupDebugInfo(NamedTuple):
"""Information about where a lookup came from, to be embedded in a font"""

View File

@ -134,7 +134,8 @@ class Parser(object):
]
raise FeatureLibError(
"The following glyph names are referenced but are missing from the "
"glyph set:\n" + ("\n".join(error)), None
"glyph set:\n" + ("\n".join(error)),
None,
)
return self.doc_
@ -396,7 +397,8 @@ class Parser(object):
self.expect_symbol_("-")
range_end = self.expect_cid_()
self.check_glyph_name_in_glyph_set(
f"cid{range_start:05d}", f"cid{range_end:05d}",
f"cid{range_start:05d}",
f"cid{range_end:05d}",
)
glyphs.add_cid_range(
range_start,
@ -696,7 +698,9 @@ class Parser(object):
location = self.cur_token_location_
glyphs = self.parse_glyphclass_(accept_glyphname=True)
if not glyphs.glyphSet():
raise FeatureLibError("Empty glyph class in mark class definition", location)
raise FeatureLibError(
"Empty glyph class in mark class definition", location
)
anchor = self.parse_anchor_()
name = self.expect_class_name_()
self.expect_symbol_(";")

View File

@ -4,7 +4,11 @@
from fontTools import ttLib
import fontTools.merge.base
from fontTools.merge.cmap import computeMegaGlyphOrder, computeMegaCmap, renameCFFCharStrings
from fontTools.merge.cmap import (
computeMegaGlyphOrder,
computeMegaCmap,
renameCFFCharStrings,
)
from fontTools.merge.layout import layoutPreMerge, layoutPostMerge
from fontTools.merge.options import Options
import fontTools.merge.tables
@ -57,7 +61,7 @@ class Merger(object):
fonts = [ttLib.TTFont(fontfile) for fontfile in fontfiles]
for font, fontfile in zip(fonts, fontfiles):
font._merger__fontfile = fontfile
font._merger__name = font['name'].getDebugName(4)
font._merger__name = font["name"].getDebugName(4)
return fonts
def merge(self, fontfiles):
@ -84,10 +88,10 @@ class Merger(object):
fonts = self._openFonts(fontfiles)
for font, glyphOrder in zip(fonts, glyphOrders):
font.setGlyphOrder(glyphOrder)
if 'CFF ' in font:
renameCFFCharStrings(self, glyphOrder, font['CFF '])
if "CFF " in font:
renameCFFCharStrings(self, glyphOrder, font["CFF "])
cmaps = [font['cmap'] for font in fonts]
cmaps = [font["cmap"] for font in fonts]
self.duplicateGlyphsPerFont = [{} for _ in fonts]
computeMegaCmap(self, cmaps)
@ -100,7 +104,7 @@ class Merger(object):
self.fonts = fonts
allTags = reduce(set.union, (list(font.keys()) for font in fonts), set())
allTags.remove('GlyphOrder')
allTags.remove("GlyphOrder")
for tag in allTags:
if tag in self.options.drop_tables:
@ -131,16 +135,21 @@ class Merger(object):
# Right now we don't use self at all. Will use in the future
# for options and logging.
allKeys = set.union(set(), *(vars(table).keys() for table in tables if table is not NotImplemented))
allKeys = set.union(
set(),
*(vars(table).keys() for table in tables if table is not NotImplemented),
)
for key in allKeys:
try:
mergeLogic = logic[key]
except KeyError:
try:
mergeLogic = logic['*']
mergeLogic = logic["*"]
except KeyError:
raise Exception("Don't know how to merge key %s of class %s" %
(key, returnTable.__class__.__name__))
raise Exception(
"Don't know how to merge key %s of class %s"
% (key, returnTable.__class__.__name__)
)
if mergeLogic is NotImplemented:
continue
value = mergeLogic(getattr(table, key, NotImplemented) for table in tables)
@ -161,11 +170,8 @@ class Merger(object):
font["OS/2"].recalcAvgCharWidth(font)
__all__ = [
'Options',
'Merger',
'main'
]
__all__ = ["Options", "Merger", "main"]
@timer("make one with everything (TOTAL TIME)")
def main(args=None):
@ -176,11 +182,11 @@ def main(args=None):
args = sys.argv[1:]
options = Options()
args = options.parse_opts(args, ignore_unknown=['output-file'])
outfile = 'merged.ttf'
args = options.parse_opts(args, ignore_unknown=["output-file"])
outfile = "merged.ttf"
fontfiles = []
for g in args:
if g.startswith('--output-file='):
if g.startswith("--output-file="):
outfile = g[14:]
continue
fontfiles.append(g)

View File

@ -2,5 +2,5 @@ import sys
from fontTools.merge import main
if __name__ == '__main__':
if __name__ == "__main__":
sys.exit(main())

View File

@ -12,19 +12,24 @@ log = logging.getLogger("fontTools.merge")
def add_method(*clazzes, **kwargs):
"""Returns a decorator function that adds a new method to one or
more classes."""
allowDefault = kwargs.get('allowDefaultTable', False)
allowDefault = kwargs.get("allowDefaultTable", False)
def wrapper(method):
done = []
for clazz in clazzes:
if clazz in done: continue # Support multiple names of a clazz
if clazz in done:
continue # Support multiple names of a clazz
done.append(clazz)
assert allowDefault or clazz != DefaultTable, 'Oops, table class not found.'
assert method.__name__ not in clazz.__dict__, \
"Oops, class '%s' has method '%s'." % (clazz.__name__, method.__name__)
assert allowDefault or clazz != DefaultTable, "Oops, table class not found."
assert (
method.__name__ not in clazz.__dict__
), "Oops, class '%s' has method '%s'." % (clazz.__name__, method.__name__)
setattr(clazz, method.__name__, method)
return None
return wrapper
def mergeObjects(lst):
lst = [item for item in lst if item is not NotImplemented]
if not lst:
@ -46,10 +51,11 @@ def mergeObjects(lst):
mergeLogic = logic[key]
except KeyError:
try:
mergeLogic = logic['*']
mergeLogic = logic["*"]
except KeyError:
raise Exception("Don't know how to merge key %s of class %s" %
(key, clazz.__name__))
raise Exception(
"Don't know how to merge key %s of class %s" % (key, clazz.__name__)
)
if mergeLogic is NotImplemented:
continue
value = mergeLogic(getattr(table, key, NotImplemented) for table in lst)
@ -60,9 +66,10 @@ def mergeObjects(lst):
return returnTable
@add_method(DefaultTable, allowDefaultTable=True)
def merge(self, m, tables):
if not hasattr(self, 'mergeMap'):
if not hasattr(self, "mergeMap"):
log.info("Don't know how to merge '%s'.", self.tableTag)
return NotImplemented
@ -72,5 +79,3 @@ def merge(self, m, tables):
return m.mergeObjects(self, self.mergeMap, tables)
else:
return logic(tables)

View File

@ -27,9 +27,14 @@ def computeMegaGlyphOrder(merger, glyphOrders):
merger.glyphOrder = megaOrder = list(megaOrder.keys())
def _glyphsAreSame(glyphSet1, glyphSet2, glyph1, glyph2,
advanceTolerance=.05,
advanceToleranceEmpty=.20):
def _glyphsAreSame(
glyphSet1,
glyphSet2,
glyph1,
glyph2,
advanceTolerance=0.05,
advanceToleranceEmpty=0.20,
):
pen1 = DecomposingRecordingPen(glyphSet1)
pen2 = DecomposingRecordingPen(glyphSet2)
g1 = glyphSet1[glyph1]
@ -43,11 +48,12 @@ def _glyphsAreSame(glyphSet1, glyphSet2, glyph1, glyph2,
# TODO Warn if advances not the same but within tolerance.
if abs(g1.width - g2.width) > g1.width * tolerance:
return False
if hasattr(g1, 'height') and g1.height is not None:
if hasattr(g1, "height") and g1.height is not None:
if abs(g1.height - g2.height) > g1.height * tolerance:
return False
return True
# Valid (format, platformID, platEncID) triplets for cmap subtables containing
# Unicode BMP-only and Unicode Full Repertoire semantics.
# Cf. OpenType spec for "Platform specific encodings":
@ -56,6 +62,7 @@ class _CmapUnicodePlatEncodings:
BMP = {(4, 3, 1), (4, 0, 3), (4, 0, 4), (4, 0, 6)}
FullRepertoire = {(12, 3, 10), (12, 0, 4), (12, 0, 6)}
def computeMegaCmap(merger, cmapTables):
"""Sets merger.cmap and merger.glyphOrder."""
@ -76,7 +83,10 @@ def computeMegaCmap(merger, cmapTables):
log.warning(
"Dropped cmap subtable from font '%s':\t"
"format %2s, platformID %2s, platEncID %2s",
fontIdx, subtable.format, subtable.platformID, subtable.platEncID
fontIdx,
subtable.format,
subtable.platformID,
subtable.platEncID,
)
if format12 is not None:
chosenCmapTables.append((format12, fontIdx))
@ -86,7 +96,7 @@ def computeMegaCmap(merger, cmapTables):
# Build the unicode mapping
merger.cmap = cmap = {}
fontIndexForGlyph = {}
glyphSets = [None for f in merger.fonts] if hasattr(merger, 'fonts') else None
glyphSets = [None for f in merger.fonts] if hasattr(merger, "fonts") else None
for table, fontIdx in chosenCmapTables:
# handle duplicates
@ -113,7 +123,9 @@ def computeMegaCmap(merger, cmapTables):
# Char previously mapped to oldgid but oldgid is already remapped to a different
# gid, because of another Unicode character.
# TODO: Try harder to do something about these.
log.warning("Dropped mapping from codepoint %#06X to glyphId '%s'", uni, gid)
log.warning(
"Dropped mapping from codepoint %#06X to glyphId '%s'", uni, gid
)
def renameCFFCharStrings(merger, glyphOrder, cffTable):

View File

@ -17,14 +17,18 @@ def mergeLookupLists(lst):
# TODO Do smarter merge.
return sumLists(lst)
def mergeFeatures(lst):
assert lst
self = otTables.Feature()
self.FeatureParams = None
self.LookupListIndex = mergeLookupLists([l.LookupListIndex for l in lst if l.LookupListIndex])
self.LookupListIndex = mergeLookupLists(
[l.LookupListIndex for l in lst if l.LookupListIndex]
)
self.LookupCount = len(self.LookupListIndex)
return self
def mergeFeatureLists(lst):
d = {}
for l in lst:
@ -41,6 +45,7 @@ def mergeFeatureLists(lst):
ret.append(rec)
return ret
def mergeLangSyses(lst):
assert lst
@ -50,10 +55,13 @@ def mergeLangSyses(lst):
self = otTables.LangSys()
self.LookupOrder = None
self.ReqFeatureIndex = 0xFFFF
self.FeatureIndex = mergeFeatureLists([l.FeatureIndex for l in lst if l.FeatureIndex])
self.FeatureIndex = mergeFeatureLists(
[l.FeatureIndex for l in lst if l.FeatureIndex]
)
self.FeatureCount = len(self.FeatureIndex)
return self
def mergeScripts(lst):
assert lst
@ -82,6 +90,7 @@ def mergeScripts(lst):
self.DefaultLangSys = None
return self
def mergeScriptRecords(lst):
d = {}
for l in lst:
@ -98,111 +107,124 @@ def mergeScriptRecords(lst):
ret.append(rec)
return ret
otTables.ScriptList.mergeMap = {
'ScriptCount': lambda lst: None, # TODO
'ScriptRecord': mergeScriptRecords,
"ScriptCount": lambda lst: None, # TODO
"ScriptRecord": mergeScriptRecords,
}
otTables.BaseScriptList.mergeMap = {
'BaseScriptCount': lambda lst: None, # TODO
"BaseScriptCount": lambda lst: None, # TODO
# TODO: Merge duplicate entries
'BaseScriptRecord': lambda lst: sorted(sumLists(lst), key=lambda s: s.BaseScriptTag),
"BaseScriptRecord": lambda lst: sorted(
sumLists(lst), key=lambda s: s.BaseScriptTag
),
}
otTables.FeatureList.mergeMap = {
'FeatureCount': sum,
'FeatureRecord': lambda lst: sorted(sumLists(lst), key=lambda s: s.FeatureTag),
"FeatureCount": sum,
"FeatureRecord": lambda lst: sorted(sumLists(lst), key=lambda s: s.FeatureTag),
}
otTables.LookupList.mergeMap = {
'LookupCount': sum,
'Lookup': sumLists,
"LookupCount": sum,
"Lookup": sumLists,
}
otTables.Coverage.mergeMap = {
'Format': min,
'glyphs': sumLists,
"Format": min,
"glyphs": sumLists,
}
otTables.ClassDef.mergeMap = {
'Format': min,
'classDefs': sumDicts,
"Format": min,
"classDefs": sumDicts,
}
otTables.LigCaretList.mergeMap = {
'Coverage': mergeObjects,
'LigGlyphCount': sum,
'LigGlyph': sumLists,
"Coverage": mergeObjects,
"LigGlyphCount": sum,
"LigGlyph": sumLists,
}
otTables.AttachList.mergeMap = {
'Coverage': mergeObjects,
'GlyphCount': sum,
'AttachPoint': sumLists,
"Coverage": mergeObjects,
"GlyphCount": sum,
"AttachPoint": sumLists,
}
# XXX Renumber MarkFilterSets of lookups
otTables.MarkGlyphSetsDef.mergeMap = {
'MarkSetTableFormat': equal,
'MarkSetCount': sum,
'Coverage': sumLists,
"MarkSetTableFormat": equal,
"MarkSetCount": sum,
"Coverage": sumLists,
}
otTables.Axis.mergeMap = {
'*': mergeObjects,
"*": mergeObjects,
}
# XXX Fix BASE table merging
otTables.BaseTagList.mergeMap = {
'BaseTagCount': sum,
'BaselineTag': sumLists,
"BaseTagCount": sum,
"BaselineTag": sumLists,
}
otTables.GDEF.mergeMap = \
otTables.GSUB.mergeMap = \
otTables.GPOS.mergeMap = \
otTables.BASE.mergeMap = \
otTables.JSTF.mergeMap = \
otTables.MATH.mergeMap = \
{
'*': mergeObjects,
'Version': max,
otTables.GDEF.mergeMap = (
otTables.GSUB.mergeMap
) = (
otTables.GPOS.mergeMap
) = otTables.BASE.mergeMap = otTables.JSTF.mergeMap = otTables.MATH.mergeMap = {
"*": mergeObjects,
"Version": max,
}
ttLib.getTableClass('GDEF').mergeMap = \
ttLib.getTableClass('GSUB').mergeMap = \
ttLib.getTableClass('GPOS').mergeMap = \
ttLib.getTableClass('BASE').mergeMap = \
ttLib.getTableClass('JSTF').mergeMap = \
ttLib.getTableClass('MATH').mergeMap = \
{
'tableTag': onlyExisting(equal), # XXX clean me up
'table': mergeObjects,
ttLib.getTableClass("GDEF").mergeMap = ttLib.getTableClass(
"GSUB"
).mergeMap = ttLib.getTableClass("GPOS").mergeMap = ttLib.getTableClass(
"BASE"
).mergeMap = ttLib.getTableClass(
"JSTF"
).mergeMap = ttLib.getTableClass(
"MATH"
).mergeMap = {
"tableTag": onlyExisting(equal), # XXX clean me up
"table": mergeObjects,
}
@add_method(ttLib.getTableClass('GSUB'))
@add_method(ttLib.getTableClass("GSUB"))
def merge(self, m, tables):
assert len(tables) == len(m.duplicateGlyphsPerFont)
for i, (table, dups) in enumerate(zip(tables, m.duplicateGlyphsPerFont)):
if not dups: continue
if not dups:
continue
if table is None or table is NotImplemented:
log.warning("Have non-identical duplicates to resolve for '%s' but no GSUB. Are duplicates intended?: %s", m.fonts[i]._merger__name, dups)
log.warning(
"Have non-identical duplicates to resolve for '%s' but no GSUB. Are duplicates intended?: %s",
m.fonts[i]._merger__name,
dups,
)
continue
synthFeature = None
synthLookup = None
for script in table.table.ScriptList.ScriptRecord:
if script.ScriptTag == 'DFLT': continue # XXX
for langsys in [script.Script.DefaultLangSys] + [l.LangSys for l in script.Script.LangSysRecord]:
if langsys is None: continue # XXX Create!
feature = [v for v in langsys.FeatureIndex if v.FeatureTag == 'locl']
if script.ScriptTag == "DFLT":
continue # XXX
for langsys in [script.Script.DefaultLangSys] + [
l.LangSys for l in script.Script.LangSysRecord
]:
if langsys is None:
continue # XXX Create!
feature = [v for v in langsys.FeatureIndex if v.FeatureTag == "locl"]
assert len(feature) <= 1
if feature:
feature = feature[0]
else:
if not synthFeature:
synthFeature = otTables.FeatureRecord()
synthFeature.FeatureTag = 'locl'
synthFeature.FeatureTag = "locl"
f = synthFeature.Feature = otTables.Feature()
f.FeatureParams = None
f.LookupCount = 0
@ -238,7 +260,9 @@ def merge(self, m, tables):
DefaultTable.merge(self, m, tables)
return self
@add_method(otTables.SingleSubst,
@add_method(
otTables.SingleSubst,
otTables.MultipleSubst,
otTables.AlternateSubst,
otTables.LigatureSubst,
@ -248,29 +272,32 @@ def merge(self, m, tables):
otTables.CursivePos,
otTables.MarkBasePos,
otTables.MarkLigPos,
otTables.MarkMarkPos)
otTables.MarkMarkPos,
)
def mapLookups(self, lookupMap):
pass
# Copied and trimmed down from subset.py
@add_method(otTables.ContextSubst,
@add_method(
otTables.ContextSubst,
otTables.ChainContextSubst,
otTables.ContextPos,
otTables.ChainContextPos)
otTables.ChainContextPos,
)
def __merge_classify_context(self):
class ContextHelper(object):
def __init__(self, klass, Format):
if klass.__name__.endswith('Subst'):
Typ = 'Sub'
Type = 'Subst'
if klass.__name__.endswith("Subst"):
Typ = "Sub"
Type = "Subst"
else:
Typ = 'Pos'
Type = 'Pos'
if klass.__name__.startswith('Chain'):
Chain = 'Chain'
Typ = "Pos"
Type = "Pos"
if klass.__name__.startswith("Chain"):
Chain = "Chain"
else:
Chain = ''
Chain = ""
ChainTyp = Chain + Typ
self.Typ = Typ
@ -278,14 +305,14 @@ def __merge_classify_context(self):
self.Chain = Chain
self.ChainTyp = ChainTyp
self.LookupRecord = Type+'LookupRecord'
self.LookupRecord = Type + "LookupRecord"
if Format == 1:
self.Rule = ChainTyp+'Rule'
self.RuleSet = ChainTyp+'RuleSet'
self.Rule = ChainTyp + "Rule"
self.RuleSet = ChainTyp + "RuleSet"
elif Format == 2:
self.Rule = ChainTyp+'ClassRule'
self.RuleSet = ChainTyp+'ClassSet'
self.Rule = ChainTyp + "ClassRule"
self.RuleSet = ChainTyp + "ClassSet"
if self.Format not in [1, 2, 3]:
return None # Don't shoot the messenger; let it go
@ -297,99 +324,121 @@ def __merge_classify_context(self):
return self.__class__._merge__ContextHelpers[self.Format]
@add_method(otTables.ContextSubst,
@add_method(
otTables.ContextSubst,
otTables.ChainContextSubst,
otTables.ContextPos,
otTables.ChainContextPos)
otTables.ChainContextPos,
)
def mapLookups(self, lookupMap):
c = self.__merge_classify_context()
if self.Format in [1, 2]:
for rs in getattr(self, c.RuleSet):
if not rs: continue
if not rs:
continue
for r in getattr(rs, c.Rule):
if not r: continue
if not r:
continue
for ll in getattr(r, c.LookupRecord):
if not ll: continue
if not ll:
continue
ll.LookupListIndex = lookupMap[ll.LookupListIndex]
elif self.Format == 3:
for ll in getattr(self, c.LookupRecord):
if not ll: continue
if not ll:
continue
ll.LookupListIndex = lookupMap[ll.LookupListIndex]
else:
assert 0, "unknown format: %s" % self.Format
@add_method(otTables.ExtensionSubst,
otTables.ExtensionPos)
@add_method(otTables.ExtensionSubst, otTables.ExtensionPos)
def mapLookups(self, lookupMap):
if self.Format == 1:
self.ExtSubTable.mapLookups(lookupMap)
else:
assert 0, "unknown format: %s" % self.Format
@add_method(otTables.Lookup)
def mapLookups(self, lookupMap):
for st in self.SubTable:
if not st: continue
if not st:
continue
st.mapLookups(lookupMap)
@add_method(otTables.LookupList)
def mapLookups(self, lookupMap):
for l in self.Lookup:
if not l: continue
if not l:
continue
l.mapLookups(lookupMap)
@add_method(otTables.Lookup)
def mapMarkFilteringSets(self, markFilteringSetMap):
if self.LookupFlag & 0x0010:
self.MarkFilteringSet = markFilteringSetMap[self.MarkFilteringSet]
@add_method(otTables.LookupList)
def mapMarkFilteringSets(self, markFilteringSetMap):
for l in self.Lookup:
if not l: continue
if not l:
continue
l.mapMarkFilteringSets(markFilteringSetMap)
@add_method(otTables.Feature)
def mapLookups(self, lookupMap):
self.LookupListIndex = [lookupMap[i] for i in self.LookupListIndex]
@add_method(otTables.FeatureList)
def mapLookups(self, lookupMap):
for f in self.FeatureRecord:
if not f or not f.Feature: continue
if not f or not f.Feature:
continue
f.Feature.mapLookups(lookupMap)
@add_method(otTables.DefaultLangSys,
otTables.LangSys)
@add_method(otTables.DefaultLangSys, otTables.LangSys)
def mapFeatures(self, featureMap):
self.FeatureIndex = [featureMap[i] for i in self.FeatureIndex]
if self.ReqFeatureIndex != 65535:
self.ReqFeatureIndex = featureMap[self.ReqFeatureIndex]
@add_method(otTables.Script)
def mapFeatures(self, featureMap):
if self.DefaultLangSys:
self.DefaultLangSys.mapFeatures(featureMap)
for l in self.LangSysRecord:
if not l or not l.LangSys: continue
if not l or not l.LangSys:
continue
l.LangSys.mapFeatures(featureMap)
@add_method(otTables.ScriptList)
def mapFeatures(self, featureMap):
for s in self.ScriptRecord:
if not s or not s.Script: continue
if not s or not s.Script:
continue
s.Script.mapFeatures(featureMap)
def layoutPreMerge(font):
# Map indices to references
GDEF = font.get('GDEF')
GSUB = font.get('GSUB')
GPOS = font.get('GPOS')
GDEF = font.get("GDEF")
GSUB = font.get("GSUB")
GPOS = font.get("GPOS")
for t in [GSUB, GPOS]:
if not t: continue
if not t:
continue
if t.table.LookupList:
lookupMap = {i: v for i, v in enumerate(t.table.LookupList.Lookup)}
@ -397,7 +446,9 @@ def layoutPreMerge(font):
t.table.FeatureList.mapLookups(lookupMap)
if GDEF and GDEF.table.Version >= 0x00010002:
markFilteringSetMap = {i:v for i,v in enumerate(GDEF.table.MarkGlyphSetsDef.Coverage)}
markFilteringSetMap = {
i: v for i, v in enumerate(GDEF.table.MarkGlyphSetsDef.Coverage)
}
t.table.LookupList.mapMarkFilteringSets(markFilteringSetMap)
if t.table.FeatureList and t.table.ScriptList:
@ -406,15 +457,17 @@ def layoutPreMerge(font):
# TODO FeatureParams nameIDs
def layoutPostMerge(font):
# Map references back to indices
GDEF = font.get('GDEF')
GSUB = font.get('GSUB')
GPOS = font.get('GPOS')
GDEF = font.get("GDEF")
GSUB = font.get("GSUB")
GPOS = font.get("GPOS")
for t in [GSUB, GPOS]:
if not t: continue
if not t:
continue
if t.table.FeatureList and t.table.ScriptList:
@ -423,12 +476,18 @@ def layoutPostMerge(font):
t.table.ScriptList.mapFeatures(featureMap)
# Record used features.
featureMap = AttendanceRecordingIdentityDict(t.table.FeatureList.FeatureRecord)
featureMap = AttendanceRecordingIdentityDict(
t.table.FeatureList.FeatureRecord
)
t.table.ScriptList.mapFeatures(featureMap)
usedIndices = featureMap.s
# Remove unused features
t.table.FeatureList.FeatureRecord = [f for i,f in enumerate(t.table.FeatureList.FeatureRecord) if i in usedIndices]
t.table.FeatureList.FeatureRecord = [
f
for i, f in enumerate(t.table.FeatureList.FeatureRecord)
if i in usedIndices
]
# Map back to indices.
featureMap = NonhashableDict(t.table.FeatureList.FeatureRecord)
@ -450,7 +509,9 @@ def layoutPostMerge(font):
usedIndices = lookupMap.s
# Remove unused lookups
t.table.LookupList.Lookup = [l for i,l in enumerate(t.table.LookupList.Lookup) if i in usedIndices]
t.table.LookupList.Lookup = [
l for i, l in enumerate(t.table.LookupList.Lookup) if i in usedIndices
]
# Map back to indices.
lookupMap = NonhashableDict(t.table.LookupList.Lookup)
@ -460,7 +521,9 @@ def layoutPostMerge(font):
t.table.LookupList.LookupCount = len(t.table.LookupList.Lookup)
if GDEF and GDEF.table.Version >= 0x00010002:
markFilteringSetMap = NonhashableDict(GDEF.table.MarkGlyphSetsDef.Coverage)
markFilteringSetMap = NonhashableDict(
GDEF.table.MarkGlyphSetsDef.Coverage
)
t.table.LookupList.mapMarkFilteringSets(markFilteringSetMap)
# TODO FeatureParams nameIDs

View File

@ -4,7 +4,6 @@
class Options(object):
class UnknownOptionError(Exception):
pass
@ -27,12 +26,12 @@ class Options(object):
opts = {}
for a in argv:
orig_a = a
if not a.startswith('--'):
if not a.startswith("--"):
ret.append(a)
continue
a = a[2:]
i = a.find('=')
op = '='
i = a.find("=")
op = "="
if i == -1:
if a.startswith("no-"):
k = a[3:]
@ -43,11 +42,11 @@ class Options(object):
else:
k = a[:i]
if k[-1] in "-+":
op = k[-1]+'=' # Ops is '-=' or '+=' now.
op = k[-1] + "=" # Ops is '-=' or '+=' now.
k = k[:-1]
v = a[i + 1 :]
ok = k
k = k.replace('-', '_')
k = k.replace("-", "_")
if not hasattr(self, k):
if ignore_unknown is True or ok in ignore_unknown:
ret.append(orig_a)
@ -61,16 +60,16 @@ class Options(object):
elif isinstance(ov, int):
v = int(v)
elif isinstance(ov, list):
vv = v.split(',')
if vv == ['']:
vv = v.split(",")
if vv == [""]:
vv = []
vv = [int(x, 0) if len(x) and x[0] in "0123456789" else x for x in vv]
if op == '=':
if op == "=":
v = vv
elif op == '+=':
elif op == "+=":
v = ov
v.extend(vv)
elif op == '-=':
elif op == "-=":
v = ov
for x in vv:
if x in v:
@ -82,4 +81,3 @@ class Options(object):
self.set(**opts)
return ret

View File

@ -13,21 +13,21 @@ import logging
log = logging.getLogger("fontTools.merge")
ttLib.getTableClass('maxp').mergeMap = {
'*': max,
'tableTag': equal,
'tableVersion': equal,
'numGlyphs': sum,
'maxStorage': first,
'maxFunctionDefs': first,
'maxInstructionDefs': first,
ttLib.getTableClass("maxp").mergeMap = {
"*": max,
"tableTag": equal,
"tableVersion": equal,
"numGlyphs": sum,
"maxStorage": first,
"maxFunctionDefs": first,
"maxInstructionDefs": first,
# TODO When we correctly merge hinting data, update these values:
# maxFunctionDefs, maxInstructionDefs, maxSizeOfInstructions
}
headFlagsMergeBitMap = {
'size': 16,
'*': bitwise_or,
"size": 16,
"*": bitwise_or,
1: bitwise_and, # Baseline at y = 0
2: bitwise_and, # lsb at x = 0
3: bitwise_and, # Force ppem to integer values. FIXME?
@ -39,64 +39,64 @@ headFlagsMergeBitMap = {
15: lambda bit: 0, # Always set to zero
}
ttLib.getTableClass('head').mergeMap = {
'tableTag': equal,
'tableVersion': max,
'fontRevision': max,
'checkSumAdjustment': lambda lst: 0, # We need *something* here
'magicNumber': equal,
'flags': mergeBits(headFlagsMergeBitMap),
'unitsPerEm': equal,
'created': current_time,
'modified': current_time,
'xMin': min,
'yMin': min,
'xMax': max,
'yMax': max,
'macStyle': first,
'lowestRecPPEM': max,
'fontDirectionHint': lambda lst: 2,
'indexToLocFormat': first,
'glyphDataFormat': equal,
ttLib.getTableClass("head").mergeMap = {
"tableTag": equal,
"tableVersion": max,
"fontRevision": max,
"checkSumAdjustment": lambda lst: 0, # We need *something* here
"magicNumber": equal,
"flags": mergeBits(headFlagsMergeBitMap),
"unitsPerEm": equal,
"created": current_time,
"modified": current_time,
"xMin": min,
"yMin": min,
"xMax": max,
"yMax": max,
"macStyle": first,
"lowestRecPPEM": max,
"fontDirectionHint": lambda lst: 2,
"indexToLocFormat": first,
"glyphDataFormat": equal,
}
ttLib.getTableClass('hhea').mergeMap = {
'*': equal,
'tableTag': equal,
'tableVersion': max,
'ascent': max,
'descent': min,
'lineGap': max,
'advanceWidthMax': max,
'minLeftSideBearing': min,
'minRightSideBearing': min,
'xMaxExtent': max,
'caretSlopeRise': first,
'caretSlopeRun': first,
'caretOffset': first,
'numberOfHMetrics': recalculate,
ttLib.getTableClass("hhea").mergeMap = {
"*": equal,
"tableTag": equal,
"tableVersion": max,
"ascent": max,
"descent": min,
"lineGap": max,
"advanceWidthMax": max,
"minLeftSideBearing": min,
"minRightSideBearing": min,
"xMaxExtent": max,
"caretSlopeRise": first,
"caretSlopeRun": first,
"caretOffset": first,
"numberOfHMetrics": recalculate,
}
ttLib.getTableClass('vhea').mergeMap = {
'*': equal,
'tableTag': equal,
'tableVersion': max,
'ascent': max,
'descent': min,
'lineGap': max,
'advanceHeightMax': max,
'minTopSideBearing': min,
'minBottomSideBearing': min,
'yMaxExtent': max,
'caretSlopeRise': first,
'caretSlopeRun': first,
'caretOffset': first,
'numberOfVMetrics': recalculate,
ttLib.getTableClass("vhea").mergeMap = {
"*": equal,
"tableTag": equal,
"tableVersion": max,
"ascent": max,
"descent": min,
"lineGap": max,
"advanceHeightMax": max,
"minTopSideBearing": min,
"minBottomSideBearing": min,
"yMaxExtent": max,
"caretSlopeRise": first,
"caretSlopeRun": first,
"caretOffset": first,
"numberOfVMetrics": recalculate,
}
os2FsTypeMergeBitMap = {
'size': 16,
'*': lambda bit: 0,
"size": 16,
"*": lambda bit: 0,
1: bitwise_or, # no embedding permitted
2: bitwise_and, # allow previewing and printing documents
3: bitwise_and, # allow editing documents
@ -104,6 +104,7 @@ os2FsTypeMergeBitMap = {
9: bitwise_or, # no embedding of outlines permitted
}
def mergeOs2FsType(lst):
lst = list(lst)
if all(item == 0 for item in lst):
@ -128,39 +129,40 @@ def mergeOs2FsType(lst):
return fsType
ttLib.getTableClass('OS/2').mergeMap = {
'*': first,
'tableTag': equal,
'version': max,
'xAvgCharWidth': first, # Will be recalculated at the end on the merged font
'fsType': mergeOs2FsType, # Will be overwritten
'panose': first, # FIXME: should really be the first Latin font
'ulUnicodeRange1': bitwise_or,
'ulUnicodeRange2': bitwise_or,
'ulUnicodeRange3': bitwise_or,
'ulUnicodeRange4': bitwise_or,
'fsFirstCharIndex': min,
'fsLastCharIndex': max,
'sTypoAscender': max,
'sTypoDescender': min,
'sTypoLineGap': max,
'usWinAscent': max,
'usWinDescent': max,
ttLib.getTableClass("OS/2").mergeMap = {
"*": first,
"tableTag": equal,
"version": max,
"xAvgCharWidth": first, # Will be recalculated at the end on the merged font
"fsType": mergeOs2FsType, # Will be overwritten
"panose": first, # FIXME: should really be the first Latin font
"ulUnicodeRange1": bitwise_or,
"ulUnicodeRange2": bitwise_or,
"ulUnicodeRange3": bitwise_or,
"ulUnicodeRange4": bitwise_or,
"fsFirstCharIndex": min,
"fsLastCharIndex": max,
"sTypoAscender": max,
"sTypoDescender": min,
"sTypoLineGap": max,
"usWinAscent": max,
"usWinDescent": max,
# Version 1
'ulCodePageRange1': onlyExisting(bitwise_or),
'ulCodePageRange2': onlyExisting(bitwise_or),
"ulCodePageRange1": onlyExisting(bitwise_or),
"ulCodePageRange2": onlyExisting(bitwise_or),
# Version 2, 3, 4
'sxHeight': onlyExisting(max),
'sCapHeight': onlyExisting(max),
'usDefaultChar': onlyExisting(first),
'usBreakChar': onlyExisting(first),
'usMaxContext': onlyExisting(max),
"sxHeight": onlyExisting(max),
"sCapHeight": onlyExisting(max),
"usDefaultChar": onlyExisting(first),
"usBreakChar": onlyExisting(first),
"usMaxContext": onlyExisting(max),
# version 5
'usLowerOpticalPointSize': onlyExisting(min),
'usUpperOpticalPointSize': onlyExisting(max),
"usLowerOpticalPointSize": onlyExisting(min),
"usUpperOpticalPointSize": onlyExisting(max),
}
@add_method(ttLib.getTableClass('OS/2'))
@add_method(ttLib.getTableClass("OS/2"))
def merge(self, m, tables):
DefaultTable.merge(self, m, tables)
if self.version < 2:
@ -174,41 +176,43 @@ def merge(self, m, tables):
self.fsType &= ~0x0004
return self
ttLib.getTableClass('post').mergeMap = {
'*': first,
'tableTag': equal,
'formatType': max,
'isFixedPitch': min,
'minMemType42': max,
'maxMemType42': lambda lst: 0,
'minMemType1': max,
'maxMemType1': lambda lst: 0,
'mapping': onlyExisting(sumDicts),
'extraNames': lambda lst: [],
ttLib.getTableClass("post").mergeMap = {
"*": first,
"tableTag": equal,
"formatType": max,
"isFixedPitch": min,
"minMemType42": max,
"maxMemType42": lambda lst: 0,
"minMemType1": max,
"maxMemType1": lambda lst: 0,
"mapping": onlyExisting(sumDicts),
"extraNames": lambda lst: [],
}
ttLib.getTableClass('vmtx').mergeMap = ttLib.getTableClass('hmtx').mergeMap = {
'tableTag': equal,
'metrics': sumDicts,
ttLib.getTableClass("vmtx").mergeMap = ttLib.getTableClass("hmtx").mergeMap = {
"tableTag": equal,
"metrics": sumDicts,
}
ttLib.getTableClass('name').mergeMap = {
'tableTag': equal,
'names': first, # FIXME? Does mixing name records make sense?
ttLib.getTableClass("name").mergeMap = {
"tableTag": equal,
"names": first, # FIXME? Does mixing name records make sense?
}
ttLib.getTableClass('loca').mergeMap = {
'*': recalculate,
'tableTag': equal,
ttLib.getTableClass("loca").mergeMap = {
"*": recalculate,
"tableTag": equal,
}
ttLib.getTableClass('glyf').mergeMap = {
'tableTag': equal,
'glyphs': sumDicts,
'glyphOrder': sumLists,
ttLib.getTableClass("glyf").mergeMap = {
"tableTag": equal,
"glyphs": sumDicts,
"glyphOrder": sumLists,
}
@add_method(ttLib.getTableClass('glyf'))
@add_method(ttLib.getTableClass("glyf"))
def merge(self, m, tables):
for i, table in enumerate(tables):
for g in table.glyphs.values():
@ -222,18 +226,20 @@ def merge(self, m, tables):
g.expand(table)
return DefaultTable.merge(self, m, tables)
ttLib.getTableClass('prep').mergeMap = lambda self, lst: first(lst)
ttLib.getTableClass('fpgm').mergeMap = lambda self, lst: first(lst)
ttLib.getTableClass('cvt ').mergeMap = lambda self, lst: first(lst)
ttLib.getTableClass('gasp').mergeMap = lambda self, lst: first(lst) # FIXME? Appears irreconcilable
@add_method(ttLib.getTableClass('CFF '))
ttLib.getTableClass("prep").mergeMap = lambda self, lst: first(lst)
ttLib.getTableClass("fpgm").mergeMap = lambda self, lst: first(lst)
ttLib.getTableClass("cvt ").mergeMap = lambda self, lst: first(lst)
ttLib.getTableClass("gasp").mergeMap = lambda self, lst: first(
lst
) # FIXME? Appears irreconcilable
@add_method(ttLib.getTableClass("CFF "))
def merge(self, m, tables):
if any(hasattr(table, "FDSelect") for table in tables):
raise NotImplementedError(
"Merging CID-keyed CFF tables is not supported yet"
)
raise NotImplementedError("Merging CID-keyed CFF tables is not supported yet")
for table in tables:
table.cff.desubroutinize()
@ -279,17 +285,18 @@ def merge(self, m, tables):
return newcff
@add_method(ttLib.getTableClass('cmap'))
@add_method(ttLib.getTableClass("cmap"))
def merge(self, m, tables):
# TODO Handle format=14.
if not hasattr(m, 'cmap'):
if not hasattr(m, "cmap"):
computeMegaCmap(m, tables)
cmap = m.cmap
cmapBmpOnly = {uni: gid for uni, gid in cmap.items() if uni <= 0xFFFF}
self.tables = []
module = ttLib.getTableModule('cmap')
module = ttLib.getTableModule("cmap")
if len(cmapBmpOnly) != len(cmap):
# format-12 required.
cmapTable = module.cmap_classes[12](12)

View File

@ -1,5 +1,6 @@
# Copyright 2021 Behdad Esfahbod. All Rights Reserved.
def is_Default_Ignorable(u):
# http://www.unicode.org/reports/tr44/#Default_Ignorable_Code_Point
#
@ -35,31 +36,43 @@ def is_Default_Ignorable(u):
# E0100..E01EF # Mn [240] VARIATION SELECTOR-17..VARIATION SELECTOR-256
# E01F0..E0FFF # Cn [3600] <reserved-E01F0>..<reserved-E0FFF>
return (
u == 0x00AD or # Cf SOFT HYPHEN
u == 0x034F or # Mn COMBINING GRAPHEME JOINER
u == 0x061C or # Cf ARABIC LETTER MARK
0x115F <= u <= 0x1160 or # Lo [2] HANGUL CHOSEONG FILLER..HANGUL JUNGSEONG FILLER
0x17B4 <= u <= 0x17B5 or # Mn [2] KHMER VOWEL INHERENT AQ..KHMER VOWEL INHERENT AA
0x180B <= u <= 0x180D or # Mn [3] MONGOLIAN FREE VARIATION SELECTOR ONE..MONGOLIAN FREE VARIATION SELECTOR THREE
u == 0x180E or # Cf MONGOLIAN VOWEL SEPARATOR
u == 0x180F or # Mn MONGOLIAN FREE VARIATION SELECTOR FOUR
0x200B <= u <= 0x200F or # Cf [5] ZERO WIDTH SPACE..RIGHT-TO-LEFT MARK
0x202A <= u <= 0x202E or # Cf [5] LEFT-TO-RIGHT EMBEDDING..RIGHT-TO-LEFT OVERRIDE
0x2060 <= u <= 0x2064 or # Cf [5] WORD JOINER..INVISIBLE PLUS
u == 0x2065 or # Cn <reserved-2065>
0x2066 <= u <= 0x206F or # Cf [10] LEFT-TO-RIGHT ISOLATE..NOMINAL DIGIT SHAPES
u == 0x3164 or # Lo HANGUL FILLER
0xFE00 <= u <= 0xFE0F or # Mn [16] VARIATION SELECTOR-1..VARIATION SELECTOR-16
u == 0xFEFF or # Cf ZERO WIDTH NO-BREAK SPACE
u == 0xFFA0 or # Lo HALFWIDTH HANGUL FILLER
0xFFF0 <= u <= 0xFFF8 or # Cn [9] <reserved-FFF0>..<reserved-FFF8>
0x1BCA0 <= u <= 0x1BCA3 or # Cf [4] SHORTHAND FORMAT LETTER OVERLAP..SHORTHAND FORMAT UP STEP
0x1D173 <= u <= 0x1D17A or # Cf [8] MUSICAL SYMBOL BEGIN BEAM..MUSICAL SYMBOL END PHRASE
u == 0xE0000 or # Cn <reserved-E0000>
u == 0xE0001 or # Cf LANGUAGE TAG
0xE0002 <= u <= 0xE001F or # Cn [30] <reserved-E0002>..<reserved-E001F>
0xE0020 <= u <= 0xE007F or # Cf [96] TAG SPACE..CANCEL TAG
0xE0080 <= u <= 0xE00FF or # Cn [128] <reserved-E0080>..<reserved-E00FF>
0xE0100 <= u <= 0xE01EF or # Mn [240] VARIATION SELECTOR-17..VARIATION SELECTOR-256
0xE01F0 <= u <= 0xE0FFF or # Cn [3600] <reserved-E01F0>..<reserved-E0FFF>
False)
u == 0x00AD
or u == 0x034F # Cf SOFT HYPHEN
or u == 0x061C # Mn COMBINING GRAPHEME JOINER
or 0x115F <= u <= 0x1160 # Cf ARABIC LETTER MARK
or 0x17B4 # Lo [2] HANGUL CHOSEONG FILLER..HANGUL JUNGSEONG FILLER
<= u
<= 0x17B5
or 0x180B # Mn [2] KHMER VOWEL INHERENT AQ..KHMER VOWEL INHERENT AA
<= u
<= 0x180D
or u # Mn [3] MONGOLIAN FREE VARIATION SELECTOR ONE..MONGOLIAN FREE VARIATION SELECTOR THREE
== 0x180E
or u == 0x180F # Cf MONGOLIAN VOWEL SEPARATOR
or 0x200B <= u <= 0x200F # Mn MONGOLIAN FREE VARIATION SELECTOR FOUR
or 0x202A <= u <= 0x202E # Cf [5] ZERO WIDTH SPACE..RIGHT-TO-LEFT MARK
or 0x2060 # Cf [5] LEFT-TO-RIGHT EMBEDDING..RIGHT-TO-LEFT OVERRIDE
<= u
<= 0x2064
or u == 0x2065 # Cf [5] WORD JOINER..INVISIBLE PLUS
or 0x2066 <= u <= 0x206F # Cn <reserved-2065>
or u == 0x3164 # Cf [10] LEFT-TO-RIGHT ISOLATE..NOMINAL DIGIT SHAPES
or 0xFE00 <= u <= 0xFE0F # Lo HANGUL FILLER
or u == 0xFEFF # Mn [16] VARIATION SELECTOR-1..VARIATION SELECTOR-16
or u == 0xFFA0 # Cf ZERO WIDTH NO-BREAK SPACE
or 0xFFF0 <= u <= 0xFFF8 # Lo HALFWIDTH HANGUL FILLER
or 0x1BCA0 <= u <= 0x1BCA3 # Cn [9] <reserved-FFF0>..<reserved-FFF8>
or 0x1D173 # Cf [4] SHORTHAND FORMAT LETTER OVERLAP..SHORTHAND FORMAT UP STEP
<= u
<= 0x1D17A
or u == 0xE0000 # Cf [8] MUSICAL SYMBOL BEGIN BEAM..MUSICAL SYMBOL END PHRASE
or u == 0xE0001 # Cn <reserved-E0000>
or 0xE0002 <= u <= 0xE001F # Cf LANGUAGE TAG
or 0xE0020 <= u <= 0xE007F # Cn [30] <reserved-E0002>..<reserved-E001F>
or 0xE0080 <= u <= 0xE00FF # Cf [96] TAG SPACE..CANCEL TAG
or 0xE0100 <= u <= 0xE01EF # Cn [128] <reserved-E0080>..<reserved-E00FF>
or 0xE01F0 # Mn [240] VARIATION SELECTOR-17..VARIATION SELECTOR-256
<= u
<= 0xE0FFF
or False # Cn [3600] <reserved-E01F0>..<reserved-E0FFF>
)

View File

@ -14,6 +14,7 @@ log = logging.getLogger("fontTools.merge")
# General utility functions for merging values from different fonts
def equal(lst):
lst = list(lst)
t = iter(lst)
@ -21,25 +22,32 @@ def equal(lst):
assert all(item == first for item in t), "Expected all items to be equal: %s" % lst
return first
def first(lst):
return next(iter(lst))
def recalculate(lst):
return NotImplemented
def current_time(lst):
return timestampNow()
def bitwise_and(lst):
return reduce(operator.and_, lst)
def bitwise_or(lst):
return reduce(operator.or_, lst)
def avg_int(lst):
lst = list(lst)
return sum(lst) // len(lst)
def onlyExisting(func):
"""Returns a filter func that when called with a list,
only calls func on the non-NotImplemented items of the list,
@ -52,29 +60,31 @@ def onlyExisting(func):
return wrapper
def sumLists(lst):
l = []
for item in lst:
l.extend(item)
return l
def sumDicts(lst):
d = {}
for item in lst:
d.update(item)
return d
def mergeBits(bitmap):
def mergeBits(bitmap):
def wrapper(lst):
lst = list(lst)
returnValue = 0
for bitNumber in range(bitmap['size']):
for bitNumber in range(bitmap["size"]):
try:
mergeLogic = bitmap[bitNumber]
except KeyError:
try:
mergeLogic = bitmap['*']
mergeLogic = bitmap["*"]
except KeyError:
raise Exception("Don't know how to merge bit %s" % bitNumber)
shiftedBit = 1 << bitNumber
@ -98,6 +108,7 @@ class AttendanceRecordingIdentityDict(object):
self.s.add(self.d[id(v)])
return v
class GregariousIdentityDict(object):
"""A dictionary-like object that welcomes guests without reservations and
adds them to the end of the guest list."""
@ -112,6 +123,7 @@ class GregariousIdentityDict(object):
self.l.append(v)
return v
class NonhashableDict(object):
"""A dictionary-like object mapping objects to values."""

View File

@ -23,6 +23,7 @@ def calcBounds(array):
ys = [y for x, y in array]
return min(xs), min(ys), max(xs), max(ys)
def calcIntBounds(array, round=otRound):
"""Calculate the integer bounding rectangle of a 2D points array.
@ -57,6 +58,7 @@ def updateBounds(bounds, p, min=min, max=max):
xMin, yMin, xMax, yMax = bounds
return min(xMin, x), min(yMin, y), max(xMax, x), max(yMax, y)
def pointInRect(p, rect):
"""Test if a point is inside a bounding rectangle.
@ -72,6 +74,7 @@ def pointInRect(p, rect):
xMin, yMin, xMax, yMax = rect
return (xMin <= x <= xMax) and (yMin <= y <= yMax)
def pointsInRect(array, rect):
"""Determine which points are inside a bounding rectangle.
@ -88,6 +91,7 @@ def pointsInRect(array, rect):
xMin, yMin, xMax, yMax = rect
return [(xMin <= x <= xMax) and (yMin <= y <= yMax) for x, y in array]
def vectorLength(vector):
"""Calculate the length of the given vector.
@ -100,6 +104,7 @@ def vectorLength(vector):
x, y = vector
return math.sqrt(x**2 + y**2)
def asInt16(array):
"""Round a list of floats to 16-bit signed integers.
@ -130,6 +135,7 @@ def normRect(rect):
(xMin, yMin, xMax, yMax) = rect
return min(xMin, xMax), min(yMin, yMax), max(xMin, xMax), max(yMin, yMax)
def scaleRect(rect, x, y):
"""Scale a bounding box rectangle.
@ -145,6 +151,7 @@ def scaleRect(rect, x, y):
(xMin, yMin, xMax, yMax) = rect
return xMin * x, yMin * y, xMax * x, yMax * y
def offsetRect(rect, dx, dy):
"""Offset a bounding box rectangle.
@ -160,6 +167,7 @@ def offsetRect(rect, dx, dy):
(xMin, yMin, xMax, yMax) = rect
return xMin + dx, yMin + dy, xMax + dx, yMax + dy
def insetRect(rect, dx, dy):
"""Inset a bounding box rectangle on all sides.
@ -175,6 +183,7 @@ def insetRect(rect, dx, dy):
(xMin, yMin, xMax, yMax) = rect
return xMin + dx, yMin + dy, xMax - dx, yMax - dy
def sectRect(rect1, rect2):
"""Test for rectangle-rectangle intersection.
@ -191,12 +200,17 @@ def sectRect(rect1, rect2):
"""
(xMin1, yMin1, xMax1, yMax1) = rect1
(xMin2, yMin2, xMax2, yMax2) = rect2
xMin, yMin, xMax, yMax = (max(xMin1, xMin2), max(yMin1, yMin2),
min(xMax1, xMax2), min(yMax1, yMax2))
xMin, yMin, xMax, yMax = (
max(xMin1, xMin2),
max(yMin1, yMin2),
min(xMax1, xMax2),
min(yMax1, yMax2),
)
if xMin >= xMax or yMin >= yMax:
return False, (0, 0, 0, 0)
return True, (xMin, yMin, xMax, yMax)
def unionRect(rect1, rect2):
"""Determine union of bounding rectangles.
@ -211,10 +225,15 @@ def unionRect(rect1, rect2):
"""
(xMin1, yMin1, xMax1, yMax1) = rect1
(xMin2, yMin2, xMax2, yMax2) = rect2
xMin, yMin, xMax, yMax = (min(xMin1, xMin2), min(yMin1, yMin2),
max(xMax1, xMax2), max(yMax1, yMax2))
xMin, yMin, xMax, yMax = (
min(xMin1, xMin2),
min(yMin1, yMin2),
max(xMax1, xMax2),
max(yMax1, yMax2),
)
return (xMin, yMin, xMax, yMax)
def rectCenter(rect):
"""Determine rectangle center.
@ -228,6 +247,7 @@ def rectCenter(rect):
(xMin, yMin, xMax, yMax) = rect
return (xMin + xMax) / 2, (yMin + yMax) / 2
def rectArea(rect):
"""Determine rectangle area.
@ -241,6 +261,7 @@ def rectArea(rect):
(xMin, yMin, xMax, yMax) = rect
return (yMax - yMin) * (xMax - xMin)
def intRect(rect):
"""Round a rectangle to integer values.
@ -262,7 +283,6 @@ def intRect(rect):
class Vector(_Vector):
def __init__(self, *args, **kwargs):
warnings.warn(
"fontTools.misc.arrayTools.Vector has been deprecated, please use "
@ -373,7 +393,9 @@ def _test():
(0, 2, 4, 5)
"""
if __name__ == "__main__":
import sys
import doctest
sys.exit(doctest.testmod().failed)

View File

@ -168,4 +168,5 @@ def classify(list_of_sets, sort=True):
if __name__ == "__main__":
import sys, doctest
sys.exit(doctest.testmod(optionflags=doctest.ELLIPSIS).failed)

View File

@ -6,7 +6,9 @@ import re
numberAddedRE = re.compile(r"#\d+$")
def makeOutputFileName(input, outputDir=None, extension=None, overWrite=False, suffix=""):
def makeOutputFileName(
input, outputDir=None, extension=None, overWrite=False, suffix=""
):
"""Generates a suitable file name for writing output.
Often tools will want to take a file, do some kind of transformation to it,
@ -44,6 +46,7 @@ def makeOutputFileName(input, outputDir=None, extension=None, overWrite=False, s
if not overWrite:
while os.path.exists(output):
output = os.path.join(
dirName, fileName + suffix + "#" + repr(n) + extension)
dirName, fileName + suffix + "#" + repr(n) + extension
)
n += 1
return output

View File

@ -10,9 +10,11 @@ We only define the symbols that we use. E.g. see fontTools.cu2qu
from types import SimpleNamespace
def _empty_decorator(x):
return x
compiled = False
for name in ("double", "complex", "int"):

View File

@ -1,7 +1,7 @@
"""Misc dict tools."""
__all__ = ['hashdict']
__all__ = ["hashdict"]
# https://stackoverflow.com/questions/1151658/python-hashable-dicts
class hashdict(dict):
@ -26,36 +26,54 @@ class hashdict(dict):
http://stackoverflow.com/questions/1151658/python-hashable-dicts
"""
def __key(self):
return tuple(sorted(self.items()))
def __repr__(self):
return "{0}({1})".format(self.__class__.__name__,
", ".join("{0}={1}".format(
str(i[0]),repr(i[1])) for i in self.__key()))
return "{0}({1})".format(
self.__class__.__name__,
", ".join("{0}={1}".format(str(i[0]), repr(i[1])) for i in self.__key()),
)
def __hash__(self):
return hash(self.__key())
def __setitem__(self, key, value):
raise TypeError("{0} does not support item assignment"
.format(self.__class__.__name__))
raise TypeError(
"{0} does not support item assignment".format(self.__class__.__name__)
)
def __delitem__(self, key):
raise TypeError("{0} does not support item assignment"
.format(self.__class__.__name__))
raise TypeError(
"{0} does not support item assignment".format(self.__class__.__name__)
)
def clear(self):
raise TypeError("{0} does not support item assignment"
.format(self.__class__.__name__))
raise TypeError(
"{0} does not support item assignment".format(self.__class__.__name__)
)
def pop(self, *args, **kwargs):
raise TypeError("{0} does not support item assignment"
.format(self.__class__.__name__))
raise TypeError(
"{0} does not support item assignment".format(self.__class__.__name__)
)
def popitem(self, *args, **kwargs):
raise TypeError("{0} does not support item assignment"
.format(self.__class__.__name__))
raise TypeError(
"{0} does not support item assignment".format(self.__class__.__name__)
)
def setdefault(self, *args, **kwargs):
raise TypeError("{0} does not support item assignment"
.format(self.__class__.__name__))
raise TypeError(
"{0} does not support item assignment".format(self.__class__.__name__)
)
def update(self, *args, **kwargs):
raise TypeError("{0} does not support item assignment"
.format(self.__class__.__name__))
raise TypeError(
"{0} does not support item assignment".format(self.__class__.__name__)
)
# update is not ok because it mutates the object
# __add__ is ok because it creates a new object
# while the new object is under construction, it's ok to mutate it
@ -63,4 +81,3 @@ class hashdict(dict):
result = hashdict(self)
dict.update(result, right)
return result

View File

@ -21,6 +21,7 @@ def _decryptChar(cipher, R):
R = ((cipher + R) * 52845 + 22719) & 0xFFFF
return bytechr(plain), R
def _encryptChar(plain, R):
plain = byteord(plain)
cipher = ((plain ^ (R >> 8))) & 0xFF
@ -56,6 +57,7 @@ def decrypt(cipherstring, R):
plainstring = bytesjoin(plainList)
return plainstring, int(R)
def encrypt(plainstring, R):
r"""
Encrypts a string using the Type 1 encryption algorithm.
@ -99,10 +101,13 @@ def encrypt(plainstring, R):
def hexString(s):
import binascii
return binascii.hexlify(s)
def deHexString(h):
import binascii
h = bytesjoin(h.split())
return binascii.unhexlify(h)
@ -110,4 +115,5 @@ def deHexString(h):
if __name__ == "__main__":
import sys
import doctest
sys.exit(doctest.testmod().failed)

View File

@ -6,13 +6,13 @@ import fontTools.encodings.codecs
# Map keyed by platformID, then platEncID, then possibly langID
_encodingMap = {
0: { # Unicode
0: 'utf_16_be',
1: 'utf_16_be',
2: 'utf_16_be',
3: 'utf_16_be',
4: 'utf_16_be',
5: 'utf_16_be',
6: 'utf_16_be',
0: "utf_16_be",
1: "utf_16_be",
2: "utf_16_be",
3: "utf_16_be",
4: "utf_16_be",
5: "utf_16_be",
6: "utf_16_be",
},
1: { # Macintosh
# See
@ -31,35 +31,36 @@ _encodingMap = {
38: "mac_latin2",
39: "mac_latin2",
40: "mac_latin2",
Ellipsis: 'mac_roman', # Other
Ellipsis: "mac_roman", # Other
},
1: 'x_mac_japanese_ttx',
2: 'x_mac_trad_chinese_ttx',
3: 'x_mac_korean_ttx',
6: 'mac_greek',
7: 'mac_cyrillic',
25: 'x_mac_simp_chinese_ttx',
29: 'mac_latin2',
35: 'mac_turkish',
37: 'mac_iceland',
1: "x_mac_japanese_ttx",
2: "x_mac_trad_chinese_ttx",
3: "x_mac_korean_ttx",
6: "mac_greek",
7: "mac_cyrillic",
25: "x_mac_simp_chinese_ttx",
29: "mac_latin2",
35: "mac_turkish",
37: "mac_iceland",
},
2: { # ISO
0: 'ascii',
1: 'utf_16_be',
2: 'latin1',
0: "ascii",
1: "utf_16_be",
2: "latin1",
},
3: { # Microsoft
0: 'utf_16_be',
1: 'utf_16_be',
2: 'shift_jis',
3: 'gb2312',
4: 'big5',
5: 'euc_kr',
6: 'johab',
10: 'utf_16_be',
0: "utf_16_be",
1: "utf_16_be",
2: "shift_jis",
3: "gb2312",
4: "big5",
5: "euc_kr",
6: "johab",
10: "utf_16_be",
},
}
def getEncoding(platformID, platEncID, langID, default=None):
"""Returns the Python encoding name for OpenType platformID/encodingID/langID
triplet. If encoding for these values is not known, by default None is

View File

@ -244,7 +244,8 @@ except ImportError:
except UnicodeDecodeError:
raise ValueError(
"Bytes strings can only contain ASCII characters. "
"Use unicode strings for non-ASCII characters.")
"Use unicode strings for non-ASCII characters."
)
except AttributeError:
_raise_serialization_error(s)
if s and _invalid_xml_string.search(s):
@ -425,9 +426,7 @@ except ImportError:
write(_escape_cdata(elem.tail))
def _raise_serialization_error(text):
raise TypeError(
"cannot serialize %r (type %s)" % (text, type(text).__name__)
)
raise TypeError("cannot serialize %r (type %s)" % (text, type(text).__name__))
def _escape_cdata(text):
# escape character data

View File

@ -133,6 +133,7 @@ def userNameToFileName(userName, existing=[], prefix="", suffix=""):
# finished
return fullName
def handleClash1(userName, existing=[], prefix="", suffix=""):
"""
existing should be a case-insensitive list
@ -167,7 +168,7 @@ def handleClash1(userName, existing=[], prefix="", suffix=""):
prefixLength = len(prefix)
suffixLength = len(suffix)
if prefixLength + len(userName) + suffixLength + 15 > maxFileNameLength:
l = (prefixLength + len(userName) + suffixLength + 15)
l = prefixLength + len(userName) + suffixLength + 15
sliceLength = maxFileNameLength - l
userName = userName[:sliceLength]
finalName = None
@ -189,6 +190,7 @@ def handleClash1(userName, existing=[], prefix="", suffix=""):
# finished
return finalName
def handleClash2(existing=[], prefix="", suffix=""):
"""
existing should be a case-insensitive list
@ -236,7 +238,9 @@ def handleClash2(existing=[], prefix="", suffix=""):
# finished
return finalName
if __name__ == "__main__":
import doctest
import sys
sys.exit(doctest.testmod().failed)

View File

@ -231,8 +231,10 @@ def ensureVersionIsLong(value):
if value < 0x10000:
newValue = floatToFixed(value, 16)
log.warning(
"Table version value is a float: %.4f; "
"fix to use hex instead: 0x%08x", value, newValue)
"Table version value is a float: %.4f; " "fix to use hex instead: 0x%08x",
value,
newValue,
)
value = newValue
return value

View File

@ -54,9 +54,10 @@ class LevelFormatter(logging.Formatter):
"""
def __init__(self, fmt=None, datefmt=None, style="%"):
if style != '%':
if style != "%":
raise ValueError(
"only '%' percent style is supported in both python 2 and 3")
"only '%' percent style is supported in both python 2 and 3"
)
if fmt is None:
fmt = DEFAULT_FORMATS
if isinstance(fmt, str):
@ -66,7 +67,7 @@ class LevelFormatter(logging.Formatter):
custom_formats = dict(fmt)
default_format = custom_formats.pop("*", None)
else:
raise TypeError('fmt must be a str or a dict of str: %r' % fmt)
raise TypeError("fmt must be a str or a dict of str: %r" % fmt)
super(LevelFormatter, self).__init__(default_format, datefmt)
self.default_format = self._fmt
self.custom_formats = {}
@ -133,15 +134,18 @@ def configLogger(**kwargs):
handlers = kwargs.pop("handlers", None)
if handlers is None:
if "stream" in kwargs and "filename" in kwargs:
raise ValueError("'stream' and 'filename' should not be "
"specified together")
raise ValueError(
"'stream' and 'filename' should not be " "specified together"
)
else:
if "stream" in kwargs or "filename" in kwargs:
raise ValueError("'stream' or 'filename' should not be "
"specified together with 'handlers'")
raise ValueError(
"'stream' or 'filename' should not be "
"specified together with 'handlers'"
)
if handlers is None:
filename = kwargs.pop("filename", None)
mode = kwargs.pop("filemode", 'a')
mode = kwargs.pop("filemode", "a")
if filename:
h = logging.FileHandler(filename, mode)
else:
@ -159,7 +163,7 @@ def configLogger(**kwargs):
fs = kwargs.pop("format", None)
dfs = kwargs.pop("datefmt", None)
# XXX: '%' is the only format style supported on both py2 and 3
style = kwargs.pop("style", '%')
style = kwargs.pop("style", "%")
fmt = LevelFormatter(fs, dfs, style)
filters = kwargs.pop("filters", [])
for h in handlers:
@ -177,8 +181,8 @@ def configLogger(**kwargs):
if level is not None:
logger.setLevel(level)
if kwargs:
keys = ', '.join(kwargs.keys())
raise ValueError('Unrecognised argument(s): %s' % keys)
keys = ", ".join(kwargs.keys())
raise ValueError("Unrecognised argument(s): %s" % keys)
def _resetExistingLoggers(parent="root"):
@ -287,10 +291,9 @@ class Timer(object):
def __init__(self, logger=None, msg=None, level=None, start=None):
self.reset(start)
if logger is None:
for arg in ('msg', 'level'):
for arg in ("msg", "level"):
if locals().get(arg) is not None:
raise ValueError(
"'%s' can't be specified without a 'logger'" % arg)
raise ValueError("'%s' can't be specified without a 'logger'" % arg)
self.logger = logger
self.level = level if level is not None else TIME_LEVEL
self.msg = msg
@ -350,7 +353,7 @@ class Timer(object):
message = self.formatTime(self.msg, time)
# Allow log handlers to see the individual parts to facilitate things
# like a server accumulating aggregate stats.
msg_parts = { 'msg': self.msg, 'time': time }
msg_parts = {"msg": self.msg, "time": time}
self.logger.log(self.level, message, msg_parts)
def __call__(self, func_or_msg=None, **kwargs):
@ -370,6 +373,7 @@ class Timer(object):
def wrapper(*args, **kwds):
with self:
return func(*args, **kwds)
return wrapper
else:
msg = func_or_msg or kwargs.get("msg")
@ -425,8 +429,7 @@ class ChannelsFilter(logging.Filter):
nlen = self.lengths[name]
if name == record.name:
return True
elif (record.name.find(name, 0, nlen) == 0
and record.name[nlen] == "."):
elif record.name.find(name, 0, nlen) == 0 and record.name[nlen] == ".":
return True
return False
@ -465,6 +468,7 @@ class CapturingLogHandler(logging.Handler):
def assertRegex(self, regexp, msg=None):
import re
pattern = re.compile(regexp)
for r in self.records:
if pattern.search(r.getMessage()):
@ -505,32 +509,35 @@ class LogMixin(object):
@property
def log(self):
if not hasattr(self, "_log"):
name = ".".join(
(self.__class__.__module__, self.__class__.__name__)
)
name = ".".join((self.__class__.__module__, self.__class__.__name__))
self._log = logging.getLogger(name)
return self._log
def deprecateArgument(name, msg, category=UserWarning):
"""Raise a warning about deprecated function argument 'name'."""
warnings.warn(
"%r is deprecated; %s" % (name, msg), category=category, stacklevel=3)
warnings.warn("%r is deprecated; %s" % (name, msg), category=category, stacklevel=3)
def deprecateFunction(msg, category=UserWarning):
"""Decorator to raise a warning when a deprecated function is called."""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
warnings.warn(
"%r is deprecated; %s" % (func.__name__, msg),
category=category, stacklevel=2)
category=category,
stacklevel=2,
)
return func(*args, **kwargs)
return wrapper
return decorator
if __name__ == "__main__":
import doctest
sys.exit(doctest.testmod(optionflags=doctest.ELLIPSIS).failed)

View File

@ -1,4 +1,5 @@
from fontTools.misc.textTools import Tag, bytesjoin, strjoin
try:
import xattr
except ImportError:
@ -24,7 +25,7 @@ def getMacCreatorAndType(path):
"""
if xattr is not None:
try:
finderInfo = xattr.getxattr(path, 'com.apple.FinderInfo')
finderInfo = xattr.getxattr(path, "com.apple.FinderInfo")
except (KeyError, IOError):
pass
else:
@ -48,7 +49,8 @@ def setMacCreatorAndType(path, fileCreator, fileType):
"""
if xattr is not None:
from fontTools.misc.textTools import pad
if not all(len(s) == 4 for s in (fileCreator, fileType)):
raise TypeError('arg must be string of 4 chars')
raise TypeError("arg must be string of 4 chars")
finderInfo = pad(bytesjoin([fileType, fileCreator]), 32)
xattr.setxattr(path, 'com.apple.FinderInfo', finderInfo)
xattr.setxattr(path, "com.apple.FinderInfo", finderInfo)

View File

@ -23,6 +23,7 @@ class ResourceReader(MutableMapping):
representing all the resources of a certain type.
"""
def __init__(self, fileOrPath):
"""Open a file
@ -31,7 +32,7 @@ class ResourceReader(MutableMapping):
``os.PathLike`` object, or a string.
"""
self._resources = OrderedDict()
if hasattr(fileOrPath, 'read'):
if hasattr(fileOrPath, "read"):
self.file = fileOrPath
else:
try:
@ -48,7 +49,7 @@ class ResourceReader(MutableMapping):
def openResourceFork(path):
if hasattr(path, "__fspath__"): # support os.PathLike objects
path = path.__fspath__()
with open(path + '/..namedfork/rsrc', 'rb') as resfork:
with open(path + "/..namedfork/rsrc", "rb") as resfork:
data = resfork.read()
infile = BytesIO(data)
infile.name = path
@ -56,7 +57,7 @@ class ResourceReader(MutableMapping):
@staticmethod
def openDataFork(path):
with open(path, 'rb') as datafork:
with open(path, "rb") as datafork:
data = datafork.read()
infile = BytesIO(data)
infile.name = path
@ -73,13 +74,13 @@ class ResourceReader(MutableMapping):
except OverflowError:
raise ResourceError("Failed to seek offset ('offset' is too large)")
if self.file.tell() != offset:
raise ResourceError('Failed to seek offset (reached EOF)')
raise ResourceError("Failed to seek offset (reached EOF)")
try:
data = self.file.read(numBytes)
except OverflowError:
raise ResourceError("Cannot read resource ('numBytes' is too large)")
if len(data) != numBytes:
raise ResourceError('Cannot read resource (not enough data)')
raise ResourceError("Cannot read resource (not enough data)")
return data
def _readHeaderAndMap(self):
@ -96,15 +97,15 @@ class ResourceReader(MutableMapping):
def _readTypeList(self):
absTypeListOffset = self.absTypeListOffset
numTypesData = self._read(2, absTypeListOffset)
self.numTypes, = struct.unpack('>H', numTypesData)
(self.numTypes,) = struct.unpack(">H", numTypesData)
absTypeListOffset2 = absTypeListOffset + 2
for i in range(self.numTypes + 1):
resTypeItemOffset = absTypeListOffset2 + ResourceTypeItemSize * i
resTypeItemData = self._read(ResourceTypeItemSize, resTypeItemOffset)
item = sstruct.unpack(ResourceTypeItem, resTypeItemData)
resType = tostr(item['type'], encoding='mac-roman')
refListOffset = absTypeListOffset + item['refListOffset']
numRes = item['numRes'] + 1
resType = tostr(item["type"], encoding="mac-roman")
refListOffset = absTypeListOffset + item["refListOffset"]
numRes = item["numRes"] + 1
resources = self._readReferenceList(resType, refListOffset, numRes)
self._resources[resType] = resources
@ -174,7 +175,7 @@ class ResourceReader(MutableMapping):
def getNamedResource(self, resType, name):
"""Return the named resource of given type, else return None."""
name = tostr(name, encoding='mac-roman')
name = tostr(name, encoding="mac-roman")
for res in self.get(resType, []):
if res.name == name:
return res
@ -196,8 +197,9 @@ class Resource(object):
attr: attributes.
"""
def __init__(self, resType=None, resData=None, resID=None, resName=None,
resAttr=None):
def __init__(
self, resType=None, resData=None, resID=None, resName=None, resAttr=None
):
self.type = resType
self.data = resData
self.id = resID
@ -207,16 +209,16 @@ class Resource(object):
def decompile(self, refData, reader):
sstruct.unpack(ResourceRefItem, refData, self)
# interpret 3-byte dataOffset as (padded) ULONG to unpack it with struct
self.dataOffset, = struct.unpack('>L', bytesjoin([b"\0", self.dataOffset]))
(self.dataOffset,) = struct.unpack(">L", bytesjoin([b"\0", self.dataOffset]))
absDataOffset = reader.dataOffset + self.dataOffset
dataLength, = struct.unpack(">L", reader._read(4, absDataOffset))
(dataLength,) = struct.unpack(">L", reader._read(4, absDataOffset))
self.data = reader._read(dataLength)
if self.nameOffset == -1:
return
absNameOffset = reader.absNameListOffset + self.nameOffset
nameLength, = struct.unpack('B', reader._read(1, absNameOffset))
name, = struct.unpack('>%ss' % nameLength, reader._read(nameLength))
self.name = tostr(name, encoding='mac-roman')
(nameLength,) = struct.unpack("B", reader._read(1, absNameOffset))
(name,) = struct.unpack(">%ss" % nameLength, reader._read(nameLength))
self.name = tostr(name, encoding="mac-roman")
ResourceForkHeader = """

View File

@ -353,7 +353,9 @@ def _real_element(value: float, ctx: SimpleNamespace) -> etree.Element:
return el
def _dict_element(d: Mapping[str, PlistEncodable], ctx: SimpleNamespace) -> etree.Element:
def _dict_element(
d: Mapping[str, PlistEncodable], ctx: SimpleNamespace
) -> etree.Element:
el = etree.Element("dict")
items = d.items()
if ctx.sort_keys:
@ -371,7 +373,9 @@ def _dict_element(d: Mapping[str, PlistEncodable], ctx: SimpleNamespace) -> etre
return el
def _array_element(array: Sequence[PlistEncodable], ctx: SimpleNamespace) -> etree.Element:
def _array_element(
array: Sequence[PlistEncodable], ctx: SimpleNamespace
) -> etree.Element:
el = etree.Element("array")
if len(array) == 0:
return el

View File

@ -3,7 +3,10 @@ CFF dictionary data and Type1/Type2 CharStrings.
"""
from fontTools.misc.fixedTools import (
fixedToFloat, floatToFixed, floatToFixedToStr, strToFixedToFloat,
fixedToFloat,
floatToFixed,
floatToFixedToStr,
strToFixedToFloat,
)
from fontTools.misc.textTools import bytechr, byteord, bytesjoin, strjoin
from fontTools.pens.boundsPen import BoundsPen
@ -27,44 +30,52 @@ def read_operator(self, b0, data, index):
value = self.handle_operator(operator)
return value, index
def read_byte(self, b0, data, index):
return b0 - 139, index
def read_smallInt1(self, b0, data, index):
b1 = byteord(data[index])
return (b0 - 247) * 256 + b1 + 108, index + 1
def read_smallInt2(self, b0, data, index):
b1 = byteord(data[index])
return -(b0 - 251) * 256 - b1 - 108, index + 1
def read_shortInt(self, b0, data, index):
value, = struct.unpack(">h", data[index:index+2])
(value,) = struct.unpack(">h", data[index : index + 2])
return value, index + 2
def read_longInt(self, b0, data, index):
value, = struct.unpack(">l", data[index:index+4])
(value,) = struct.unpack(">l", data[index : index + 4])
return value, index + 4
def read_fixed1616(self, b0, data, index):
value, = struct.unpack(">l", data[index:index+4])
(value,) = struct.unpack(">l", data[index : index + 4])
return fixedToFloat(value, precisionBits=16), index + 4
def read_reserved(self, b0, data, index):
assert NotImplementedError
return NotImplemented, index
def read_realNumber(self, b0, data, index):
number = ''
number = ""
while True:
b = byteord(data[index])
index = index + 1
nibble0 = (b & 0xf0) >> 4
nibble1 = b & 0x0f
if nibble0 == 0xf:
nibble0 = (b & 0xF0) >> 4
nibble1 = b & 0x0F
if nibble0 == 0xF:
break
number = number + realNibbles[nibble0]
if nibble1 == 0xf:
if nibble1 == 0xF:
break
number = number + realNibbles[nibble1]
return float(number), index
@ -88,8 +99,23 @@ cffDictOperandEncoding[30] = read_realNumber
cffDictOperandEncoding[255] = read_reserved
realNibbles = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'.', 'E', 'E-', None, '-']
realNibbles = [
"0",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
".",
"E",
"E-",
None,
"-",
]
realNibblesDict = {v: i for i, v in enumerate(realNibbles)}
maxOpStack = 193
@ -112,62 +138,63 @@ def buildOperatorDict(operatorList):
t2Operators = [
# opcode name
(1, 'hstem'),
(3, 'vstem'),
(4, 'vmoveto'),
(5, 'rlineto'),
(6, 'hlineto'),
(7, 'vlineto'),
(8, 'rrcurveto'),
(10, 'callsubr'),
(11, 'return'),
(14, 'endchar'),
(15, 'vsindex'),
(16, 'blend'),
(18, 'hstemhm'),
(19, 'hintmask'),
(20, 'cntrmask'),
(21, 'rmoveto'),
(22, 'hmoveto'),
(23, 'vstemhm'),
(24, 'rcurveline'),
(25, 'rlinecurve'),
(26, 'vvcurveto'),
(27, 'hhcurveto'),
(1, "hstem"),
(3, "vstem"),
(4, "vmoveto"),
(5, "rlineto"),
(6, "hlineto"),
(7, "vlineto"),
(8, "rrcurveto"),
(10, "callsubr"),
(11, "return"),
(14, "endchar"),
(15, "vsindex"),
(16, "blend"),
(18, "hstemhm"),
(19, "hintmask"),
(20, "cntrmask"),
(21, "rmoveto"),
(22, "hmoveto"),
(23, "vstemhm"),
(24, "rcurveline"),
(25, "rlinecurve"),
(26, "vvcurveto"),
(27, "hhcurveto"),
# (28, 'shortint'), # not really an operator
(29, 'callgsubr'),
(30, 'vhcurveto'),
(31, 'hvcurveto'),
((12, 0), 'ignore'), # dotsection. Yes, there a few very early OTF/CFF
(29, "callgsubr"),
(30, "vhcurveto"),
(31, "hvcurveto"),
((12, 0), "ignore"), # dotsection. Yes, there a few very early OTF/CFF
# fonts with this deprecated operator. Just ignore it.
((12, 3), 'and'),
((12, 4), 'or'),
((12, 5), 'not'),
((12, 8), 'store'),
((12, 9), 'abs'),
((12, 10), 'add'),
((12, 11), 'sub'),
((12, 12), 'div'),
((12, 13), 'load'),
((12, 14), 'neg'),
((12, 15), 'eq'),
((12, 18), 'drop'),
((12, 20), 'put'),
((12, 21), 'get'),
((12, 22), 'ifelse'),
((12, 23), 'random'),
((12, 24), 'mul'),
((12, 26), 'sqrt'),
((12, 27), 'dup'),
((12, 28), 'exch'),
((12, 29), 'index'),
((12, 30), 'roll'),
((12, 34), 'hflex'),
((12, 35), 'flex'),
((12, 36), 'hflex1'),
((12, 37), 'flex1'),
((12, 3), "and"),
((12, 4), "or"),
((12, 5), "not"),
((12, 8), "store"),
((12, 9), "abs"),
((12, 10), "add"),
((12, 11), "sub"),
((12, 12), "div"),
((12, 13), "load"),
((12, 14), "neg"),
((12, 15), "eq"),
((12, 18), "drop"),
((12, 20), "put"),
((12, 21), "get"),
((12, 22), "ifelse"),
((12, 23), "random"),
((12, 24), "mul"),
((12, 26), "sqrt"),
((12, 27), "dup"),
((12, 28), "exch"),
((12, 29), "index"),
((12, 30), "roll"),
((12, 34), "hflex"),
((12, 35), "flex"),
((12, 36), "hflex1"),
((12, 37), "flex1"),
]
def getIntEncoder(format):
if format == "cff":
fourByteOp = bytechr(29)
@ -177,8 +204,13 @@ def getIntEncoder(format):
assert format == "t2"
fourByteOp = None
def encodeInt(value, fourByteOp=fourByteOp, bytechr=bytechr,
pack=struct.pack, unpack=struct.unpack):
def encodeInt(
value,
fourByteOp=fourByteOp,
bytechr=bytechr,
pack=struct.pack,
unpack=struct.unpack,
):
if -107 <= value <= 107:
code = bytechr(value + 139)
elif 108 <= value <= 1131:
@ -200,9 +232,11 @@ def getIntEncoder(format):
# distinguish anymore between small ints that were supposed to
# be small fixed numbers and small ints that were just small
# ints. Hence the warning.
log.warning("4-byte T2 number got passed to the "
log.warning(
"4-byte T2 number got passed to the "
"IntType handler. This should happen only when reading in "
"old XML files.\n")
"old XML files.\n"
)
code = bytechr(255) + pack(">l", value)
else:
code = fourByteOp + pack(">l", value)
@ -215,6 +249,7 @@ encodeIntCFF = getIntEncoder("cff")
encodeIntT1 = getIntEncoder("t1")
encodeIntT2 = getIntEncoder("t2")
def encodeFixed(f, pack=struct.pack):
"""For T2 only"""
value = floatToFixed(f, precisionBits=16)
@ -224,7 +259,8 @@ def encodeFixed(f, pack=struct.pack):
return b"\xff" + pack(">l", value) # encode the entire fixed point value
realZeroBytes = bytechr(30) + bytechr(0xf)
realZeroBytes = bytechr(30) + bytechr(0xF)
def encodeFloat(f):
# For CFF only, used in cffLib
@ -249,20 +285,20 @@ def encodeFloat(f):
elif c2 == "+":
s = s[1:]
nibbles.append(realNibblesDict[c])
nibbles.append(0xf)
nibbles.append(0xF)
if len(nibbles) % 2:
nibbles.append(0xf)
nibbles.append(0xF)
d = bytechr(30)
for i in range(0, len(nibbles), 2):
d = d + bytechr(nibbles[i] << 4 | nibbles[i + 1])
return d
class CharStringCompileError(Exception): pass
class CharStringCompileError(Exception):
pass
class SimpleT2Decompiler(object):
def __init__(self, localSubrs, globalSubrs, private=None, blender=None):
self.localSubrs = localSubrs
self.localBias = calcSubrBias(localSubrs)
@ -346,10 +382,13 @@ class SimpleT2Decompiler(object):
def op_hstem(self, index):
self.countHints()
def op_vstem(self, index):
self.countHints()
def op_hstemhm(self, index):
self.countHints()
def op_vstemhm(self, index):
self.countHints()
@ -369,46 +408,67 @@ class SimpleT2Decompiler(object):
# misc
def op_and(self, index):
raise NotImplementedError
def op_or(self, index):
raise NotImplementedError
def op_not(self, index):
raise NotImplementedError
def op_store(self, index):
raise NotImplementedError
def op_abs(self, index):
raise NotImplementedError
def op_add(self, index):
raise NotImplementedError
def op_sub(self, index):
raise NotImplementedError
def op_div(self, index):
raise NotImplementedError
def op_load(self, index):
raise NotImplementedError
def op_neg(self, index):
raise NotImplementedError
def op_eq(self, index):
raise NotImplementedError
def op_drop(self, index):
raise NotImplementedError
def op_put(self, index):
raise NotImplementedError
def op_get(self, index):
raise NotImplementedError
def op_ifelse(self, index):
raise NotImplementedError
def op_random(self, index):
raise NotImplementedError
def op_mul(self, index):
raise NotImplementedError
def op_sqrt(self, index):
raise NotImplementedError
def op_dup(self, index):
raise NotImplementedError
def op_exch(self, index):
raise NotImplementedError
def op_index(self, index):
raise NotImplementedError
def op_roll(self, index):
raise NotImplementedError
@ -418,7 +478,9 @@ class SimpleT2Decompiler(object):
numBlends = self.pop()
numOps = numBlends * (self.numRegions + 1)
if self.blender is None:
del self.operandStack[-(numOps-numBlends):] # Leave the default operands on the stack.
del self.operandStack[
-(numOps - numBlends) :
] # Leave the default operands on the stack.
else:
argi = len(self.operandStack) - numOps
end_args = tuplei = argi + numBlends
@ -439,37 +501,44 @@ class SimpleT2Decompiler(object):
t1Operators = [
# opcode name
(1, 'hstem'),
(3, 'vstem'),
(4, 'vmoveto'),
(5, 'rlineto'),
(6, 'hlineto'),
(7, 'vlineto'),
(8, 'rrcurveto'),
(9, 'closepath'),
(10, 'callsubr'),
(11, 'return'),
(13, 'hsbw'),
(14, 'endchar'),
(21, 'rmoveto'),
(22, 'hmoveto'),
(30, 'vhcurveto'),
(31, 'hvcurveto'),
((12, 0), 'dotsection'),
((12, 1), 'vstem3'),
((12, 2), 'hstem3'),
((12, 6), 'seac'),
((12, 7), 'sbw'),
((12, 12), 'div'),
((12, 16), 'callothersubr'),
((12, 17), 'pop'),
((12, 33), 'setcurrentpoint'),
(1, "hstem"),
(3, "vstem"),
(4, "vmoveto"),
(5, "rlineto"),
(6, "hlineto"),
(7, "vlineto"),
(8, "rrcurveto"),
(9, "closepath"),
(10, "callsubr"),
(11, "return"),
(13, "hsbw"),
(14, "endchar"),
(21, "rmoveto"),
(22, "hmoveto"),
(30, "vhcurveto"),
(31, "hvcurveto"),
((12, 0), "dotsection"),
((12, 1), "vstem3"),
((12, 2), "hstem3"),
((12, 6), "seac"),
((12, 7), "sbw"),
((12, 12), "div"),
((12, 16), "callothersubr"),
((12, 17), "pop"),
((12, 33), "setcurrentpoint"),
]
class T2WidthExtractor(SimpleT2Decompiler):
def __init__(self, localSubrs, globalSubrs, nominalWidthX, defaultWidthX, private=None, blender=None):
def __init__(
self,
localSubrs,
globalSubrs,
nominalWidthX,
defaultWidthX,
private=None,
blender=None,
):
SimpleT2Decompiler.__init__(self, localSubrs, globalSubrs, private, blender)
self.nominalWidthX = nominalWidthX
self.defaultWidthX = defaultWidthX
@ -484,7 +553,9 @@ class T2WidthExtractor(SimpleT2Decompiler):
if not self.gotWidth:
if evenOdd ^ (len(args) % 2):
# For CFF2 charstrings, this should never happen
assert self.defaultWidthX is not None, "CFF2 CharStrings must not have an initial width value"
assert (
self.defaultWidthX is not None
), "CFF2 CharStrings must not have an initial width value"
self.width = self.nominalWidthX + args[0]
args = args[1:]
else:
@ -510,10 +581,25 @@ class T2WidthExtractor(SimpleT2Decompiler):
class T2OutlineExtractor(T2WidthExtractor):
def __init__(self, pen, localSubrs, globalSubrs, nominalWidthX, defaultWidthX, private=None, blender=None):
def __init__(
self,
pen,
localSubrs,
globalSubrs,
nominalWidthX,
defaultWidthX,
private=None,
blender=None,
):
T2WidthExtractor.__init__(
self, localSubrs, globalSubrs, nominalWidthX, defaultWidthX, private, blender)
self,
localSubrs,
globalSubrs,
nominalWidthX,
defaultWidthX,
private,
blender,
)
self.pen = pen
self.subrLevel = 0
@ -586,17 +672,21 @@ class T2OutlineExtractor(T2WidthExtractor):
def op_rmoveto(self, index):
self.endPath()
self.rMoveTo(self.popallWidth())
def op_hmoveto(self, index):
self.endPath()
self.rMoveTo((self.popallWidth(1)[0], 0))
def op_vmoveto(self, index):
self.endPath()
self.rMoveTo((0, self.popallWidth(1)[0]))
def op_endchar(self, index):
self.endPath()
args = self.popallWidth()
if args:
from fontTools.encodings.StandardEncoding import StandardEncoding
# endchar can do seac accent bulding; The T2 spec says it's deprecated,
# but recent software that shall remain nameless does output it.
adx, ady, bchar, achar = args
@ -616,6 +706,7 @@ class T2OutlineExtractor(T2WidthExtractor):
def op_hlineto(self, index):
self.alternatingLineto(1)
def op_vlineto(self, index):
self.alternatingLineto(0)
@ -626,7 +717,14 @@ class T2OutlineExtractor(T2WidthExtractor):
"""{dxa dya dxb dyb dxc dyc}+ rrcurveto"""
args = self.popall()
for i in range(0, len(args), 6):
dxa, dya, dxb, dyb, dxc, dyc, = args[i:i+6]
(
dxa,
dya,
dxb,
dyb,
dxc,
dyc,
) = args[i : i + 6]
self.rCurveTo((dxa, dya), (dxb, dyb), (dxc, dyc))
def op_rcurveline(self, index):
@ -701,10 +799,12 @@ class T2OutlineExtractor(T2WidthExtractor):
dy5 = -dy2
self.rCurveTo((dx1, dy1), (dx2, dy2), (dx3, dy3))
self.rCurveTo((dx4, dy4), (dx5, dy5), (dx6, dy6))
def op_flex(self, index):
dx1, dy1, dx2, dy2, dx3, dy3, dx4, dy4, dx5, dy5, dx6, dy6, fd = self.popall()
self.rCurveTo((dx1, dy1), (dx2, dy2), (dx3, dy3))
self.rCurveTo((dx4, dy4), (dx5, dy5), (dx6, dy6))
def op_hflex1(self, index):
dx1, dy1, dx2, dy2, dx3, dx4, dx5, dy5, dx6 = self.popall()
dy3 = dy4 = 0
@ -712,6 +812,7 @@ class T2OutlineExtractor(T2WidthExtractor):
self.rCurveTo((dx1, dy1), (dx2, dy2), (dx3, dy3))
self.rCurveTo((dx4, dy4), (dx5, dy5), (dx6, dy6))
def op_flex1(self, index):
dx1, dy1, dx2, dy2, dx3, dy3, dx4, dy4, dx5, dy5, d6 = self.popall()
dx = dx1 + dx2 + dx3 + dx4 + dx5
@ -728,18 +829,25 @@ class T2OutlineExtractor(T2WidthExtractor):
# misc
def op_and(self, index):
raise NotImplementedError
def op_or(self, index):
raise NotImplementedError
def op_not(self, index):
raise NotImplementedError
def op_store(self, index):
raise NotImplementedError
def op_abs(self, index):
raise NotImplementedError
def op_add(self, index):
raise NotImplementedError
def op_sub(self, index):
raise NotImplementedError
def op_div(self, index):
num2 = self.pop()
num1 = self.pop()
@ -749,32 +857,46 @@ class T2OutlineExtractor(T2WidthExtractor):
self.push(d1)
else:
self.push(d2)
def op_load(self, index):
raise NotImplementedError
def op_neg(self, index):
raise NotImplementedError
def op_eq(self, index):
raise NotImplementedError
def op_drop(self, index):
raise NotImplementedError
def op_put(self, index):
raise NotImplementedError
def op_get(self, index):
raise NotImplementedError
def op_ifelse(self, index):
raise NotImplementedError
def op_random(self, index):
raise NotImplementedError
def op_mul(self, index):
raise NotImplementedError
def op_sqrt(self, index):
raise NotImplementedError
def op_dup(self, index):
raise NotImplementedError
def op_exch(self, index):
raise NotImplementedError
def op_index(self, index):
raise NotImplementedError
def op_roll(self, index):
raise NotImplementedError
@ -813,8 +935,8 @@ class T2OutlineExtractor(T2WidthExtractor):
self.rCurveTo((dxa, 0), (dxb, dyb), (dxc, dyc))
return args
class T1OutlineExtractor(T2OutlineExtractor):
class T1OutlineExtractor(T2OutlineExtractor):
def __init__(self, pen, subrs):
self.pen = pen
self.subrs = subrs
@ -846,6 +968,7 @@ class T1OutlineExtractor(T2OutlineExtractor):
return
self.endPath()
self.rMoveTo(self.popall())
def op_hmoveto(self, index):
if self.flexing:
# We must add a parameter to the stack if we are flexing
@ -853,6 +976,7 @@ class T1OutlineExtractor(T2OutlineExtractor):
return
self.endPath()
self.rMoveTo((self.popall()[0], 0))
def op_vmoveto(self, index):
if self.flexing:
# We must add a parameter to the stack if we are flexing
@ -861,8 +985,10 @@ class T1OutlineExtractor(T2OutlineExtractor):
return
self.endPath()
self.rMoveTo((0, self.popall()[0]))
def op_closepath(self, index):
self.closePath()
def op_setcurrentpoint(self, index):
args = self.popall()
x, y = args
@ -876,6 +1002,7 @@ class T1OutlineExtractor(T2OutlineExtractor):
self.width = wx
self.sbx = sbx
self.currentPoint = sbx, self.currentPoint[1]
def op_sbw(self, index):
self.popall() # XXX
@ -884,6 +1011,7 @@ class T1OutlineExtractor(T2OutlineExtractor):
subrIndex = self.pop()
subr = self.subrs[subrIndex]
self.execute(subr)
def op_callothersubr(self, index):
subrIndex = self.pop()
nArgs = self.pop()
@ -894,6 +1022,7 @@ class T1OutlineExtractor(T2OutlineExtractor):
elif subrIndex == 1 and nArgs == 0:
self.flexing = 1
# ignore...
def op_pop(self, index):
pass # ignore...
@ -941,20 +1070,25 @@ class T1OutlineExtractor(T2OutlineExtractor):
def op_dotsection(self, index):
self.popall() # XXX
def op_hstem3(self, index):
self.popall() # XXX
def op_seac(self, index):
"asb adx ady bchar achar seac"
from fontTools.encodings.StandardEncoding import StandardEncoding
asb, adx, ady, bchar, achar = self.popall()
baseGlyph = StandardEncoding[bchar]
self.pen.addComponent(baseGlyph, (1, 0, 0, 1, 0, 0))
accentGlyph = StandardEncoding[achar]
adx = adx + self.sbx - asb # seac weirdness
self.pen.addComponent(accentGlyph, (1, 0, 0, 1, adx, ady))
def op_vstem3(self, index):
self.popall() # XXX
class T2CharString(object):
operandEncoding = t2OperandEncoding
@ -973,11 +1107,11 @@ class T2CharString(object):
def getNumRegions(self, vsindex=None):
pd = self.private
assert(pd is not None)
assert pd is not None
if vsindex is not None:
self._cur_vsindex = vsindex
elif self._cur_vsindex is None:
self._cur_vsindex = pd.vsindex if hasattr(pd, 'vsindex') else 0
self._cur_vsindex = pd.vsindex if hasattr(pd, "vsindex") else 0
return pd.getNumRegions(self._cur_vsindex)
def __repr__(self):
@ -1001,9 +1135,15 @@ class T2CharString(object):
def draw(self, pen, blender=None):
subrs = getattr(self.private, "Subrs", [])
extractor = self.outlineExtractor(pen, subrs, self.globalSubrs,
self.private.nominalWidthX, self.private.defaultWidthX,
self.private, blender)
extractor = self.outlineExtractor(
pen,
subrs,
self.globalSubrs,
self.private.nominalWidthX,
self.private.defaultWidthX,
self.private,
blender,
)
extractor.execute(self)
self.width = extractor.width
@ -1040,7 +1180,7 @@ class T2CharString(object):
bytecode.extend(bytechr(b) for b in opcodes[token])
except KeyError:
raise CharStringCompileError("illegal operator: %s" % token)
if token in ('hintmask', 'cntrmask'):
if token in ("hintmask", "cntrmask"):
bytecode.append(program[i]) # hint mask
i = i + 1
elif isinstance(token, int):
@ -1067,8 +1207,7 @@ class T2CharString(object):
self.bytecode = bytecode
self.program = None
def getToken(self, index,
len=len, byteord=byteord, isinstance=isinstance):
def getToken(self, index, len=len, byteord=byteord, isinstance=isinstance):
if self.bytecode is not None:
if index >= len(self.bytecode):
return None, 0, 0
@ -1100,6 +1239,7 @@ class T2CharString(object):
def toXML(self, xmlWriter, ttFont=None):
from fontTools.misc.textTools import num2binary
if self.bytecode is not None:
xmlWriter.dumphex(self.bytecode)
else:
@ -1110,15 +1250,15 @@ class T2CharString(object):
if token is None:
break
if isOperator:
if token in ('hintmask', 'cntrmask'):
if token in ("hintmask", "cntrmask"):
hintMask, isOperator, index = self.getToken(index)
bits = []
for byte in hintMask:
bits.append(num2binary(byteord(byte), 8))
hintMask = strjoin(bits)
line = ' '.join(args + [token, hintMask])
line = " ".join(args + [token, hintMask])
else:
line = ' '.join(args + [token])
line = " ".join(args + [token])
xmlWriter.write(line)
xmlWriter.newline()
args = []
@ -1132,11 +1272,12 @@ class T2CharString(object):
# NOTE: only CFF2 charstrings/subrs can have numeric arguments on
# the stack after the last operator. Compiling this would fail if
# this is part of CFF 1.0 table.
line = ' '.join(args)
line = " ".join(args)
xmlWriter.write(line)
def fromXML(self, name, attrs, content):
from fontTools.misc.textTools import binary2num, readHex
if attrs.get("raw"):
self.setBytecode(readHex(content))
return
@ -1155,7 +1296,7 @@ class T2CharString(object):
token = strToFixedToFloat(token, precisionBits=16)
except ValueError:
program.append(token)
if token in ('hintmask', 'cntrmask'):
if token in ("hintmask", "cntrmask"):
mask = content[i]
maskBytes = b""
for j in range(0, len(mask), 8):
@ -1168,6 +1309,7 @@ class T2CharString(object):
program.append(token)
self.setProgram(program)
class T1CharString(T2CharString):
operandEncoding = t1OperandEncoding
@ -1201,6 +1343,7 @@ class T1CharString(T2CharString):
extractor.execute(self)
self.width = extractor.width
class DictDecompiler(object):
operandEncoding = cffDictOperandEncoding
@ -1226,6 +1369,7 @@ class DictDecompiler(object):
value, index = handler(self, b0, data, index)
if value is not None:
push(value)
def pop(self):
value = self.stack[-1]
del self.stack[-1]
@ -1270,8 +1414,10 @@ class DictDecompiler(object):
def arg_SID(self, name):
return self.strings[self.pop()]
def arg_array(self, name):
return self.popall()
def arg_blendList(self, name):
"""
There may be non-blend args at the top of the stack. We first calculate
@ -1284,13 +1430,15 @@ class DictDecompiler(object):
We re-arrange this to be a list of numMaster entries. Each entry starts with the corresponding default font relative value, and is followed by
the delta values. We then convert the default values, the first item in each entry, to an absolute value.
"""
vsindex = self.dict.get('vsindex', 0)
numMasters = self.parent.getNumRegions(vsindex) + 1 # only a PrivateDict has blended ops.
vsindex = self.dict.get("vsindex", 0)
numMasters = (
self.parent.getNumRegions(vsindex) + 1
) # only a PrivateDict has blended ops.
numBlends = self.pop()
args = self.popall()
numArgs = len(args)
# The spec says that there should be no non-blended Blue Values,.
assert(numArgs == numMasters * numBlends)
assert numArgs == numMasters * numBlends
value = [None] * numBlends
numDeltas = numMasters - 1
i = 0

View File

@ -24,7 +24,7 @@ import logging
log = logging.getLogger(__name__)
ps_special = b'()<>[]{}%' # / is one too, but we take care of that one differently
ps_special = b"()<>[]{}%" # / is one too, but we take care of that one differently
skipwhiteRE = re.compile(bytesjoin([b"[", whitespace, b"]*"]))
endofthingPat = bytesjoin([b"[^][(){}<>/%", whitespace, b"]*"])
@ -32,7 +32,7 @@ endofthingRE = re.compile(endofthingPat)
commentRE = re.compile(b"%[^\n\r]*")
# XXX This not entirely correct as it doesn't allow *nested* embedded parens:
stringPat = br"""
stringPat = rb"""
\(
(
(
@ -51,13 +51,17 @@ stringRE = re.compile(stringPat)
hexstringRE = re.compile(bytesjoin([b"<[", whitespace, b"0-9A-Fa-f]*>"]))
class PSTokenError(Exception): pass
class PSError(Exception): pass
class PSTokenError(Exception):
pass
class PSError(Exception):
pass
class PSTokenizer(object):
def __init__(self, buf=b'', encoding="ascii"):
def __init__(self, buf=b"", encoding="ascii"):
# Force self.buf to be a byte string
buf = tobytes(buf)
self.buf = buf
@ -86,14 +90,16 @@ class PSTokenizer(object):
self.closed = True
del self.buf, self.pos
def getnexttoken(self,
def getnexttoken(
self,
# localize some stuff, for performance
len=len,
ps_special=ps_special,
stringmatch=stringRE.match,
hexstringmatch=hexstringRE.match,
commentmatch=commentRE.match,
endmatch=endofthingRE.match):
endmatch=endofthingRE.match,
):
self.skipwhite()
if self.pos >= self.len:
@ -102,38 +108,38 @@ class PSTokenizer(object):
buf = self.buf
char = bytechr(byteord(buf[pos]))
if char in ps_special:
if char in b'{}[]':
tokentype = 'do_special'
if char in b"{}[]":
tokentype = "do_special"
token = char
elif char == b'%':
tokentype = 'do_comment'
elif char == b"%":
tokentype = "do_comment"
_, nextpos = commentmatch(buf, pos).span()
token = buf[pos:nextpos]
elif char == b'(':
tokentype = 'do_string'
elif char == b"(":
tokentype = "do_string"
m = stringmatch(buf, pos)
if m is None:
raise PSTokenError('bad string at character %d' % pos)
raise PSTokenError("bad string at character %d" % pos)
_, nextpos = m.span()
token = buf[pos:nextpos]
elif char == b'<':
tokentype = 'do_hexstring'
elif char == b"<":
tokentype = "do_hexstring"
m = hexstringmatch(buf, pos)
if m is None:
raise PSTokenError('bad hexstring at character %d' % pos)
raise PSTokenError("bad hexstring at character %d" % pos)
_, nextpos = m.span()
token = buf[pos:nextpos]
else:
raise PSTokenError('bad token at character %d' % pos)
raise PSTokenError("bad token at character %d" % pos)
else:
if char == b'/':
tokentype = 'do_literal'
if char == b"/":
tokentype = "do_literal"
m = endmatch(buf, pos + 1)
else:
tokentype = ''
tokentype = ""
m = endmatch(buf, pos)
if m is None:
raise PSTokenError('bad token at character %d' % pos)
raise PSTokenError("bad token at character %d" % pos)
_, nextpos = m.span()
token = buf[pos:nextpos]
self.pos = pos + len(token)
@ -152,14 +158,13 @@ class PSTokenizer(object):
self.pos = 4
def stopeexec(self):
if not hasattr(self, 'dirtybuf'):
if not hasattr(self, "dirtybuf"):
return
self.buf = self.dirtybuf
del self.dirtybuf
class PSInterpreter(PSOperators):
def __init__(self, encoding="ascii"):
systemdict = {}
userdict = {}
@ -172,18 +177,18 @@ class PSInterpreter(PSOperators):
def fillsystemdict(self):
systemdict = self.dictstack[0]
systemdict['['] = systemdict['mark'] = self.mark = ps_mark()
systemdict[']'] = ps_operator(']', self.do_makearray)
systemdict['true'] = ps_boolean(1)
systemdict['false'] = ps_boolean(0)
systemdict['StandardEncoding'] = ps_array(ps_StandardEncoding)
systemdict['FontDirectory'] = ps_dict({})
systemdict["["] = systemdict["mark"] = self.mark = ps_mark()
systemdict["]"] = ps_operator("]", self.do_makearray)
systemdict["true"] = ps_boolean(1)
systemdict["false"] = ps_boolean(0)
systemdict["StandardEncoding"] = ps_array(ps_StandardEncoding)
systemdict["FontDirectory"] = ps_dict({})
self.suckoperators(systemdict, self.__class__)
def suckoperators(self, systemdict, klass):
for name in dir(klass):
attr = getattr(self, name)
if isinstance(attr, Callable) and name[:3] == 'ps_':
if isinstance(attr, Callable) and name[:3] == "ps_":
name = name[3:]
systemdict[name] = ps_operator(name, attr)
for baseclass in klass.__bases__:
@ -211,24 +216,25 @@ class PSInterpreter(PSOperators):
except:
if self.tokenizer is not None:
log.debug(
'ps error:\n'
'- - - - - - -\n'
'%s\n'
'>>>\n'
'%s\n'
'- - - - - - -',
"ps error:\n"
"- - - - - - -\n"
"%s\n"
">>>\n"
"%s\n"
"- - - - - - -",
self.tokenizer.buf[self.tokenizer.pos - 50 : self.tokenizer.pos],
self.tokenizer.buf[self.tokenizer.pos:self.tokenizer.pos+50])
self.tokenizer.buf[self.tokenizer.pos : self.tokenizer.pos + 50],
)
raise
def handle_object(self, object):
if not (self.proclevel or object.literal or object.type == 'proceduretype'):
if object.type != 'operatortype':
if not (self.proclevel or object.literal or object.type == "proceduretype"):
if object.type != "operatortype":
object = self.resolve_name(object.value)
if object.literal:
self.push(object)
else:
if object.type == 'proceduretype':
if object.type == "proceduretype":
self.call_procedure(object)
else:
object.function()
@ -245,22 +251,25 @@ class PSInterpreter(PSOperators):
for i in range(len(dictstack) - 1, -1, -1):
if name in dictstack[i]:
return dictstack[i][name]
raise PSError('name error: ' + str(name))
raise PSError("name error: " + str(name))
def do_token(self, token,
def do_token(
self,
token,
int=int,
float=float,
ps_name=ps_name,
ps_integer=ps_integer,
ps_real=ps_real):
ps_real=ps_real,
):
try:
num = int(token)
except (ValueError, OverflowError):
try:
num = float(token)
except (ValueError, OverflowError):
if '#' in token:
hashpos = token.find('#')
if "#" in token:
hashpos = token.find("#")
try:
base = int(token[:hashpos])
num = int(token[hashpos + 1 :], base)
@ -287,7 +296,7 @@ class PSInterpreter(PSOperators):
def do_hexstring(self, token):
hexStr = "".join(token[1:-1].split())
if len(hexStr) % 2:
hexStr = hexStr + '0'
hexStr = hexStr + "0"
cleanstr = []
for i in range(0, len(hexStr), 2):
cleanstr.append(chr(int(hexStr[i : i + 2], 16)))
@ -295,10 +304,10 @@ class PSInterpreter(PSOperators):
return ps_string(cleanstr)
def do_special(self, token):
if token == '{':
if token == "{":
self.proclevel = self.proclevel + 1
return self.procmark
elif token == '}':
elif token == "}":
proc = []
while 1:
topobject = self.pop()
@ -308,12 +317,12 @@ class PSInterpreter(PSOperators):
self.proclevel = self.proclevel - 1
proc.reverse()
return ps_procedure(proc)
elif token == '[':
elif token == "[":
return self.mark
elif token == ']':
return ps_name(']')
elif token == "]":
return ps_name("]")
else:
raise PSTokenError('huh?')
raise PSTokenError("huh?")
def push(self, object):
self.stack.append(object)
@ -321,11 +330,13 @@ class PSInterpreter(PSOperators):
def pop(self, *types):
stack = self.stack
if not stack:
raise PSError('stack underflow')
raise PSError("stack underflow")
object = stack[-1]
if types:
if object.type not in types:
raise PSError('typecheck, expected %s, found %s' % (repr(types), object.type))
raise PSError(
"typecheck, expected %s, found %s" % (repr(types), object.type)
)
del stack[-1]
return object
@ -355,23 +366,26 @@ def unpack_item(item):
newitem = [None] * len(item.value)
for i in range(len(item.value)):
newitem[i] = unpack_item(item.value[i])
if item.type == 'proceduretype':
if item.type == "proceduretype":
newitem = tuple(newitem)
else:
newitem = item.value
return newitem
def suckfont(data, encoding="ascii"):
m = re.search(br"/FontName\s+/([^ \t\n\r]+)\s+def", data)
m = re.search(rb"/FontName\s+/([^ \t\n\r]+)\s+def", data)
if m:
fontName = m.group(1)
fontName = fontName.decode()
else:
fontName = None
interpreter = PSInterpreter(encoding=encoding)
interpreter.interpret(b"/Helvetica 4 dict dup /Encoding StandardEncoding put definefont pop")
interpreter.interpret(
b"/Helvetica 4 dict dup /Encoding StandardEncoding put definefont pop"
)
interpreter.interpret(data)
fontdir = interpreter.dictstack[0]['FontDirectory'].value
fontdir = interpreter.dictstack[0]["FontDirectory"].value
if fontName in fontdir:
rawfont = fontdir[fontName]
else:

View File

@ -23,50 +23,60 @@ class ps_operator(ps_object):
self.name = name
self.function = function
self.type = self.__class__.__name__[3:] + "type"
def __repr__(self):
return "<operator %s>" % self.name
class ps_procedure(ps_object):
literal = 0
def __repr__(self):
return "<procedure>"
def __str__(self):
psstring = '{'
psstring = "{"
for i in range(len(self.value)):
if i:
psstring = psstring + ' ' + str(self.value[i])
psstring = psstring + " " + str(self.value[i])
else:
psstring = psstring + str(self.value[i])
return psstring + '}'
return psstring + "}"
class ps_name(ps_object):
literal = 0
def __str__(self):
if self.literal:
return '/' + self.value
return "/" + self.value
else:
return self.value
class ps_literal(ps_object):
def __str__(self):
return '/' + self.value
return "/" + self.value
class ps_array(ps_object):
def __str__(self):
psstring = '['
psstring = "["
for i in range(len(self.value)):
item = self.value[i]
access = _accessstrings[item.access]
if access:
access = ' ' + access
access = " " + access
if i:
psstring = psstring + ' ' + str(item) + access
psstring = psstring + " " + str(item) + access
else:
psstring = psstring + str(item) + access
return psstring + ']'
return psstring + "]"
def __repr__(self):
return "<array>"
_type1_pre_eexec_order = [
"FontInfo",
"FontName",
@ -77,7 +87,7 @@ _type1_pre_eexec_order = [
"FontBBox",
"UniqueID",
"Metrics",
"StrokeWidth"
"StrokeWidth",
]
_type1_fontinfo_order = [
@ -89,40 +99,43 @@ _type1_fontinfo_order = [
"ItalicAngle",
"isFixedPitch",
"UnderlinePosition",
"UnderlineThickness"
"UnderlineThickness",
]
_type1_post_eexec_order = [
"Private",
"CharStrings",
"FID"
]
_type1_post_eexec_order = ["Private", "CharStrings", "FID"]
def _type1_item_repr(key, value):
psstring = ""
access = _accessstrings[value.access]
if access:
access = access + ' '
if key == 'CharStrings':
psstring = psstring + "/%s %s def\n" % (key, _type1_CharString_repr(value.value))
elif key == 'Encoding':
access = access + " "
if key == "CharStrings":
psstring = psstring + "/%s %s def\n" % (
key,
_type1_CharString_repr(value.value),
)
elif key == "Encoding":
psstring = psstring + _type1_Encoding_repr(value, access)
else:
psstring = psstring + "/%s %s %sdef\n" % (str(key), str(value), access)
return psstring
def _type1_Encoding_repr(encoding, access):
encoding = encoding.value
psstring = "/Encoding 256 array\n0 1 255 {1 index exch /.notdef put} for\n"
for i in range(256):
name = encoding[i].value
if name != '.notdef':
if name != ".notdef":
psstring = psstring + "dup %d /%s put\n" % (i, name)
return psstring + access + "def\n"
def _type1_CharString_repr(charstrings):
items = sorted(charstrings.items())
return 'xxx'
return "xxx"
class ps_font(ps_object):
def __str__(self):
@ -146,14 +159,22 @@ class ps_font(ps_object):
pass
else:
psstring = psstring + _type1_item_repr(key, value)
return psstring + 'dup/FontName get exch definefont pop\nmark currentfile closefile\n' + \
8 * (64 * '0' + '\n') + 'cleartomark' + '\n'
return (
psstring
+ "dup/FontName get exch definefont pop\nmark currentfile closefile\n"
+ 8 * (64 * "0" + "\n")
+ "cleartomark"
+ "\n"
)
def __repr__(self):
return '<font>'
return "<font>"
class ps_file(ps_object):
pass
class ps_dict(ps_object):
def __str__(self):
psstring = "%d dict dup begin\n" % len(self.value)
@ -161,62 +182,69 @@ class ps_dict(ps_object):
for key, value in items:
access = _accessstrings[value.access]
if access:
access = access + ' '
access = access + " "
psstring = psstring + "/%s %s %sdef\n" % (str(key), str(value), access)
return psstring + 'end '
return psstring + "end "
def __repr__(self):
return "<dict>"
class ps_mark(ps_object):
def __init__(self):
self.value = 'mark'
self.value = "mark"
self.type = self.__class__.__name__[3:] + "type"
class ps_procmark(ps_object):
def __init__(self):
self.value = 'procmark'
self.value = "procmark"
self.type = self.__class__.__name__[3:] + "type"
class ps_null(ps_object):
def __init__(self):
self.type = self.__class__.__name__[3:] + "type"
class ps_boolean(ps_object):
def __str__(self):
if self.value:
return 'true'
return "true"
else:
return 'false'
return "false"
class ps_string(ps_object):
def __str__(self):
return "(%s)" % repr(self.value)[1:-1]
class ps_integer(ps_object):
def __str__(self):
return repr(self.value)
class ps_real(ps_object):
def __str__(self):
return repr(self.value)
class PSOperators(object):
def ps_def(self):
obj = self.pop()
name = self.pop()
self.dictstack[-1][name.value] = obj
def ps_bind(self):
proc = self.pop('proceduretype')
proc = self.pop("proceduretype")
self.proc_bind(proc)
self.push(proc)
def proc_bind(self, proc):
for i in range(len(proc.value)):
item = proc.value[i]
if item.type == 'proceduretype':
if item.type == "proceduretype":
self.proc_bind(item)
else:
if not item.literal:
@ -225,12 +253,12 @@ class PSOperators(object):
except:
pass
else:
if obj.type == 'operatortype':
if obj.type == "operatortype":
proc.value[i] = obj
def ps_exch(self):
if len(self.stack) < 2:
raise RuntimeError('stack underflow')
raise RuntimeError("stack underflow")
obj1 = self.pop()
obj2 = self.pop()
self.push(obj1)
@ -238,12 +266,12 @@ class PSOperators(object):
def ps_dup(self):
if not self.stack:
raise RuntimeError('stack underflow')
raise RuntimeError("stack underflow")
self.push(self.stack[-1])
def ps_exec(self):
obj = self.pop()
if obj.type == 'proceduretype':
if obj.type == "proceduretype":
self.call_procedure(obj)
else:
self.handle_object(obj)
@ -267,12 +295,19 @@ class PSOperators(object):
self.push(obj)
def ps_matrix(self):
matrix = [ps_real(1.0), ps_integer(0), ps_integer(0), ps_real(1.0), ps_integer(0), ps_integer(0)]
matrix = [
ps_real(1.0),
ps_integer(0),
ps_integer(0),
ps_real(1.0),
ps_integer(0),
ps_integer(0),
]
self.push(ps_array(matrix))
def ps_string(self):
num = self.pop('integertype').value
self.push(ps_string('\0' * num))
num = self.pop("integertype").value
self.push(ps_string("\0" * num))
def ps_type(self):
obj = self.pop()
@ -306,11 +341,11 @@ class PSOperators(object):
self.push(ps_file(self.tokenizer))
def ps_eexec(self):
f = self.pop('filetype').value
f = self.pop("filetype").value
f.starteexec()
def ps_closefile(self):
f = self.pop('filetype').value
f = self.pop("filetype").value
f.skipwhite()
f.stopeexec()
@ -319,12 +354,10 @@ class PSOperators(object):
while obj != self.mark:
obj = self.pop()
def ps_readstring(self,
ps_boolean=ps_boolean,
len=len):
s = self.pop('stringtype')
def ps_readstring(self, ps_boolean=ps_boolean, len=len):
s = self.pop("stringtype")
oldstr = s.value
f = self.pop('filetype')
f = self.pop("filetype")
# pad = file.value.read(1)
# for StringIO, this is faster
f.value.pos = f.value.pos + 1
@ -335,18 +368,18 @@ class PSOperators(object):
def ps_known(self):
key = self.pop()
d = self.pop('dicttype', 'fonttype')
d = self.pop("dicttype", "fonttype")
self.push(ps_boolean(key.value in d.value))
def ps_if(self):
proc = self.pop('proceduretype')
if self.pop('booleantype').value:
proc = self.pop("proceduretype")
if self.pop("booleantype").value:
self.call_procedure(proc)
def ps_ifelse(self):
proc2 = self.pop('proceduretype')
proc1 = self.pop('proceduretype')
if self.pop('booleantype').value:
proc2 = self.pop("proceduretype")
proc1 = self.pop("proceduretype")
if self.pop("booleantype").value:
self.call_procedure(proc1)
else:
self.call_procedure(proc2)
@ -370,19 +403,19 @@ class PSOperators(object):
self.push(obj)
def ps_not(self):
obj = self.pop('booleantype', 'integertype')
if obj.type == 'booleantype':
obj = self.pop("booleantype", "integertype")
if obj.type == "booleantype":
self.push(ps_boolean(not obj.value))
else:
self.push(ps_integer(~obj.value))
def ps_print(self):
str = self.pop('stringtype')
print('PS output --->', str.value)
str = self.pop("stringtype")
print("PS output --->", str.value)
def ps_anchorsearch(self):
seek = self.pop('stringtype')
s = self.pop('stringtype')
seek = self.pop("stringtype")
s = self.pop("stringtype")
seeklen = len(seek.value)
if s.value[:seeklen] == seek.value:
self.push(ps_string(s.value[seeklen:]))
@ -393,12 +426,12 @@ class PSOperators(object):
self.push(ps_boolean(0))
def ps_array(self):
num = self.pop('integertype')
num = self.pop("integertype")
array = ps_array([None] * num.value)
self.push(array)
def ps_astore(self):
array = self.pop('arraytype')
array = self.pop("arraytype")
for i in range(len(array.value) - 1, -1, -1):
array.value[i] = self.pop()
self.push(array)
@ -410,13 +443,13 @@ class PSOperators(object):
def ps_put(self):
obj1 = self.pop()
obj2 = self.pop()
obj3 = self.pop('arraytype', 'dicttype', 'stringtype', 'proceduretype')
obj3 = self.pop("arraytype", "dicttype", "stringtype", "proceduretype")
tp = obj3.type
if tp == 'arraytype' or tp == 'proceduretype':
if tp == "arraytype" or tp == "proceduretype":
obj3.value[obj2.value] = obj1
elif tp == 'dicttype':
elif tp == "dicttype":
obj3.value[obj2.value] = obj1
elif tp == 'stringtype':
elif tp == "stringtype":
index = obj2.value
obj3.value = obj3.value[:index] + chr(obj1.value) + obj3.value[index + 1 :]
@ -424,54 +457,56 @@ class PSOperators(object):
obj1 = self.pop()
if obj1.value == "Encoding":
pass
obj2 = self.pop('arraytype', 'dicttype', 'stringtype', 'proceduretype', 'fonttype')
obj2 = self.pop(
"arraytype", "dicttype", "stringtype", "proceduretype", "fonttype"
)
tp = obj2.type
if tp in ('arraytype', 'proceduretype'):
if tp in ("arraytype", "proceduretype"):
self.push(obj2.value[obj1.value])
elif tp in ('dicttype', 'fonttype'):
elif tp in ("dicttype", "fonttype"):
self.push(obj2.value[obj1.value])
elif tp == 'stringtype':
elif tp == "stringtype":
self.push(ps_integer(ord(obj2.value[obj1.value])))
else:
assert False, "shouldn't get here"
def ps_getinterval(self):
obj1 = self.pop('integertype')
obj2 = self.pop('integertype')
obj3 = self.pop('arraytype', 'stringtype')
obj1 = self.pop("integertype")
obj2 = self.pop("integertype")
obj3 = self.pop("arraytype", "stringtype")
tp = obj3.type
if tp == 'arraytype':
if tp == "arraytype":
self.push(ps_array(obj3.value[obj2.value : obj2.value + obj1.value]))
elif tp == 'stringtype':
elif tp == "stringtype":
self.push(ps_string(obj3.value[obj2.value : obj2.value + obj1.value]))
def ps_putinterval(self):
obj1 = self.pop('arraytype', 'stringtype')
obj2 = self.pop('integertype')
obj3 = self.pop('arraytype', 'stringtype')
obj1 = self.pop("arraytype", "stringtype")
obj2 = self.pop("integertype")
obj3 = self.pop("arraytype", "stringtype")
tp = obj3.type
if tp == 'arraytype':
if tp == "arraytype":
obj3.value[obj2.value : obj2.value + len(obj1.value)] = obj1.value
elif tp == 'stringtype':
elif tp == "stringtype":
newstr = obj3.value[: obj2.value]
newstr = newstr + obj1.value
newstr = newstr + obj3.value[obj2.value + len(obj1.value) :]
obj3.value = newstr
def ps_cvn(self):
self.push(ps_name(self.pop('stringtype').value))
self.push(ps_name(self.pop("stringtype").value))
def ps_index(self):
n = self.pop('integertype').value
n = self.pop("integertype").value
if n < 0:
raise RuntimeError('index may not be negative')
raise RuntimeError("index may not be negative")
self.push(self.stack[-1 - n])
def ps_for(self):
proc = self.pop('proceduretype')
limit = self.pop('integertype', 'realtype').value
increment = self.pop('integertype', 'realtype').value
i = self.pop('integertype', 'realtype').value
proc = self.pop("proceduretype")
limit = self.pop("integertype", "realtype").value
increment = self.pop("integertype", "realtype").value
i = self.pop("integertype", "realtype").value
while 1:
if increment > 0:
if i > limit:
@ -487,51 +522,53 @@ class PSOperators(object):
i = i + increment
def ps_forall(self):
proc = self.pop('proceduretype')
obj = self.pop('arraytype', 'stringtype', 'dicttype')
proc = self.pop("proceduretype")
obj = self.pop("arraytype", "stringtype", "dicttype")
tp = obj.type
if tp == 'arraytype':
if tp == "arraytype":
for item in obj.value:
self.push(item)
self.call_procedure(proc)
elif tp == 'stringtype':
elif tp == "stringtype":
for item in obj.value:
self.push(ps_integer(ord(item)))
self.call_procedure(proc)
elif tp == 'dicttype':
elif tp == "dicttype":
for key, value in obj.value.items():
self.push(ps_name(key))
self.push(value)
self.call_procedure(proc)
def ps_definefont(self):
font = self.pop('dicttype')
font = self.pop("dicttype")
name = self.pop()
font = ps_font(font.value)
self.dictstack[0]['FontDirectory'].value[name.value] = font
self.dictstack[0]["FontDirectory"].value[name.value] = font
self.push(font)
def ps_findfont(self):
name = self.pop()
font = self.dictstack[0]['FontDirectory'].value[name.value]
font = self.dictstack[0]["FontDirectory"].value[name.value]
self.push(font)
def ps_pop(self):
self.pop()
def ps_dict(self):
self.pop('integertype')
self.pop("integertype")
self.push(ps_dict({}))
def ps_begin(self):
self.dictstack.append(self.pop('dicttype').value)
self.dictstack.append(self.pop("dicttype").value)
def ps_end(self):
if len(self.dictstack) > 2:
del self.dictstack[-1]
else:
raise RuntimeError('dictstack underflow')
raise RuntimeError("dictstack underflow")
notdef = '.notdef'
notdef = ".notdef"
from fontTools.encodings.StandardEncoding import StandardEncoding
ps_StandardEncoding = list(map(ps_name, StandardEncoding))

View File

@ -15,9 +15,11 @@ __all__ = [
"roundFunc",
]
def noRound(value):
return value
def otRound(value):
"""Round float value to nearest integer towards ``+Infinity``.
@ -41,10 +43,12 @@ def otRound(value):
# https://github.com/fonttools/fonttools/issues/1248#issuecomment-383198166
return int(math.floor(value + 0.5))
def maybeRound(v, tolerance, round=otRound):
rounded = round(v)
return rounded if abs(rounded - v) <= tolerance else v
def roundFunc(tolerance, round=otRound):
if tolerance < 0:
raise ValueError("Rounding tolerance must be positive")
@ -52,7 +56,7 @@ def roundFunc(tolerance, round=otRound):
if tolerance == 0:
return noRound
if tolerance >= .5:
if tolerance >= 0.5:
return round
return functools.partial(maybeRound, tolerance=tolerance, round=round)
@ -85,7 +89,7 @@ def nearestMultipleShortestRepr(value: float, factor: float) -> str:
return "0.0"
value = otRound(value / factor) * factor
eps = .5 * factor
eps = 0.5 * factor
lo = value - eps
hi = value + eps
# If the range of valid choices spans an integer, return the integer.
@ -99,7 +103,7 @@ def nearestMultipleShortestRepr(value: float, factor: float) -> str:
for i in range(len(lo)):
if lo[i] != hi[i]:
break
period = lo.find('.')
period = lo.find(".")
assert period < i
fmt = "%%.%df" % (i - period)
return fmt % value

View File

@ -58,6 +58,7 @@ __copyright__ = "Copyright 1998, Just van Rossum <just@letterror.com>"
class Error(Exception):
pass
def pack(fmt, obj):
formatstring, names, fixes = getformat(fmt, keep_pad_byte=True)
elements = []
@ -74,6 +75,7 @@ def pack(fmt, obj):
data = struct.pack(*(formatstring,) + tuple(elements))
return data
def unpack(fmt, data, obj=None):
if obj is None:
obj = {}
@ -98,10 +100,12 @@ def unpack(fmt, data, obj=None):
d[name] = value
return obj
def unpack2(fmt, data, obj=None):
length = calcsize(fmt)
return unpack(fmt, data[:length], obj), data[length:]
def calcsize(fmt):
formatstring, names, fixes = getformat(fmt)
return struct.calcsize(formatstring)
@ -125,13 +129,11 @@ _extraRE = re.compile(r"\s*([x@=<>!])\s*(#.*)?$")
# matches an "empty" string, possibly containing whitespace and/or a comment
_emptyRE = re.compile(r"\s*(#.*)?$")
_fixedpointmappings = {
8: "b",
16: "h",
32: "l"}
_fixedpointmappings = {8: "b", 16: "h", 32: "l"}
_formatcache = {}
def getformat(fmt, keep_pad_byte=False):
fmt = tostr(fmt, encoding="ascii")
try:
@ -147,7 +149,7 @@ def getformat(fmt, keep_pad_byte=False):
m = _extraRE.match(line)
if m:
formatchar = m.group(1)
if formatchar != 'x' and formatstring:
if formatchar != "x" and formatstring:
raise Error("a special fmt char must be first")
else:
m = _elementRE.match(line)
@ -171,6 +173,7 @@ def getformat(fmt, keep_pad_byte=False):
_formatcache[fmt] = formatstring, names, fixes
return formatstring, names, fixes
def _test():
fmt = """
# comments are allowed
@ -188,16 +191,16 @@ def _test():
apad: x
"""
print('size:', calcsize(fmt))
print("size:", calcsize(fmt))
class foo(object):
pass
i = foo()
i.ashort = 0x7fff
i.along = 0x7fffffff
i.abyte = 0x7f
i.ashort = 0x7FFF
i.along = 0x7FFFFFFF
i.abyte = 0x7F
i.achar = "a"
i.astr = "12345"
i.afloat = 0.5
@ -206,11 +209,12 @@ def _test():
i.abool = True
data = pack(fmt, i)
print('data:', repr(data))
print("data:", repr(data))
print(unpack(fmt, data))
i2 = foo()
unpack(fmt, data, i2)
print(vars(i2))
if __name__ == "__main__":
_test()

View File

@ -6,13 +6,13 @@ import sys
n = 3 # Max Bezier degree; 3 for cubic, 2 for quadratic
t, x, y = sp.symbols('t x y', real=True)
c = sp.symbols('c', real=False) # Complex representation instead of x/y
t, x, y = sp.symbols("t x y", real=True)
c = sp.symbols("c", real=False) # Complex representation instead of x/y
X = tuple(sp.symbols('x:%d'%(n+1), real=True))
Y = tuple(sp.symbols('y:%d'%(n+1), real=True))
P = tuple(zip(*(sp.symbols('p:%d[%s]'%(n+1,w), real=True) for w in '01')))
C = tuple(sp.symbols('c:%d'%(n+1), real=False))
X = tuple(sp.symbols("x:%d" % (n + 1), real=True))
Y = tuple(sp.symbols("y:%d" % (n + 1), real=True))
P = tuple(zip(*(sp.symbols("p:%d[%s]" % (n + 1, w), real=True) for w in "01")))
C = tuple(sp.symbols("c:%d" % (n + 1), real=False))
# Cubic Bernstein basis functions
BinomialCoefficient = [(1, 0)]
@ -25,15 +25,20 @@ del last, this
BernsteinPolynomial = tuple(
tuple(c * t**i * (1 - t) ** (n - i) for i, c in enumerate(coeffs))
for n,coeffs in enumerate(BinomialCoefficient))
for n, coeffs in enumerate(BinomialCoefficient)
)
BezierCurve = tuple(
tuple(sum(P[i][j]*bernstein for i,bernstein in enumerate(bernsteins))
for j in range(2))
for n,bernsteins in enumerate(BernsteinPolynomial))
tuple(
sum(P[i][j] * bernstein for i, bernstein in enumerate(bernsteins))
for j in range(2)
)
for n, bernsteins in enumerate(BernsteinPolynomial)
)
BezierCurveC = tuple(
sum(C[i] * bernstein for i, bernstein in enumerate(bernsteins))
for n,bernsteins in enumerate(BernsteinPolynomial))
for n, bernsteins in enumerate(BernsteinPolynomial)
)
def green(f, curveXY):
@ -44,17 +49,17 @@ def green(f, curveXY):
class _BezierFuncsLazy(dict):
def __init__(self, symfunc):
self._symfunc = symfunc
self._bezfuncs = {}
def __missing__(self, i):
args = ['p%d'%d for d in range(i+1)]
args = ["p%d" % d for d in range(i + 1)]
f = green(self._symfunc, BezierCurve[i])
f = sp.gcd_terms(f.collect(sum(P, ()))) # Optimize
return sp.lambdify(args, f)
class GreenPen(BasePen):
_BezierFuncs = {}
@ -97,6 +102,7 @@ class GreenPen(BasePen):
p0 = self._getCurrentPoint()
self.value += self._funcs[3](p0, p1, p2, p3)
# Sample pens.
# Do not use this in real code.
# Use fontTools.pens.momentsPen.MomentsPen instead.
@ -114,7 +120,7 @@ def printGreenPen(penName, funcs, file=sys.stdout, docstring=None):
print('"""%s"""' % docstring)
print(
'''from fontTools.pens.basePen import BasePen, OpenContourError
"""from fontTools.pens.basePen import BasePen, OpenContourError
try:
import cython
except ImportError:
@ -135,10 +141,14 @@ class %s(BasePen):
def __init__(self, glyphset=None):
BasePen.__init__(self, glyphset)
'''% (penName, penName), file=file)
"""
% (penName, penName),
file=file,
)
for name, f in funcs:
print(' self.%s = 0' % name, file=file)
print('''
print(" self.%s = 0" % name, file=file)
print(
"""
def _moveTo(self, p0):
self.__startPoint = p0
@ -154,32 +164,40 @@ class %s(BasePen):
raise OpenContourError(
"Green theorem is not defined on open contours."
)
''', end='', file=file)
""",
end="",
file=file,
)
for n in (1, 2, 3):
subs = {P[i][j]: [X, Y][j][i] for i in range(n + 1) for j in range(2)}
greens = [green(f, BezierCurve[n]) for name, f in funcs]
greens = [sp.gcd_terms(f.collect(sum(P, ()))) for f in greens] # Optimize
greens = [f.subs(subs) for f in greens] # Convert to p to x/y
defs, exprs = sp.cse(greens,
optimizations='basic',
symbols=(sp.Symbol('r%d'%i) for i in count()))
defs, exprs = sp.cse(
greens,
optimizations="basic",
symbols=(sp.Symbol("r%d" % i) for i in count()),
)
print()
for name, value in defs:
print(' @cython.locals(%s=cython.double)' % name, file=file)
print(" @cython.locals(%s=cython.double)" % name, file=file)
if n == 1:
print('''\
print(
"""\
@cython.locals(x0=cython.double, y0=cython.double)
@cython.locals(x1=cython.double, y1=cython.double)
def _lineTo(self, p1):
x0,y0 = self._getCurrentPoint()
x1,y1 = p1
''', file=file)
""",
file=file,
)
elif n == 2:
print('''\
print(
"""\
@cython.locals(x0=cython.double, y0=cython.double)
@cython.locals(x1=cython.double, y1=cython.double)
@cython.locals(x2=cython.double, y2=cython.double)
@ -187,9 +205,12 @@ class %s(BasePen):
x0,y0 = self._getCurrentPoint()
x1,y1 = p1
x2,y2 = p2
''', file=file)
""",
file=file,
)
elif n == 3:
print('''\
print(
"""\
@cython.locals(x0=cython.double, y0=cython.double)
@cython.locals(x1=cython.double, y1=cython.double)
@cython.locals(x2=cython.double, y2=cython.double)
@ -199,24 +220,30 @@ class %s(BasePen):
x1,y1 = p1
x2,y2 = p2
x3,y3 = p3
''', file=file)
""",
file=file,
)
for name, value in defs:
print(' %s = %s' % (name, value), file=file)
print(" %s = %s" % (name, value), file=file)
print(file=file)
for name, value in zip([f[0] for f in funcs], exprs):
print(' self.%s += %s' % (name, value), file=file)
print(" self.%s += %s" % (name, value), file=file)
print('''
print(
"""
if __name__ == '__main__':
from fontTools.misc.symfont import x, y, printGreenPen
printGreenPen('%s', ['''%penName, file=file)
printGreenPen('%s', ["""
% penName,
file=file,
)
for name, f in funcs:
print(" ('%s', %s)," % (name, str(f)), file=file)
print(' ])', file=file)
print(" ])", file=file)
if __name__ == '__main__':
if __name__ == "__main__":
pen = AreaPen()
pen.moveTo((100, 100))
pen.lineTo((100, 200))

View File

@ -29,12 +29,14 @@ def parseXML(xmlSnippet):
if isinstance(xmlSnippet, bytes):
xml += xmlSnippet
elif isinstance(xmlSnippet, str):
xml += tobytes(xmlSnippet, 'utf-8')
xml += tobytes(xmlSnippet, "utf-8")
elif isinstance(xmlSnippet, Iterable):
xml += b"".join(tobytes(s, 'utf-8') for s in xmlSnippet)
xml += b"".join(tobytes(s, "utf-8") for s in xmlSnippet)
else:
raise TypeError("expected string or sequence of strings; found %r"
% type(xmlSnippet).__name__)
raise TypeError(
"expected string or sequence of strings; found %r"
% type(xmlSnippet).__name__
)
xml += b"</root>"
reader.parser.Parse(xml, 0)
return reader.root[2]
@ -76,6 +78,7 @@ class FakeFont:
return self.glyphOrder_[glyphID]
else:
return "glyph%.5d" % glyphID
def getGlyphNameMany(self, lst):
return [self.getGlyphName(gid) for gid in lst]
@ -92,6 +95,7 @@ class FakeFont:
class TestXMLReader_(object):
def __init__(self):
from xml.parsers.expat import ParserCreate
self.parser = ParserCreate()
self.parser.StartElementHandler = self.startElement_
self.parser.EndElementHandler = self.endElement_
@ -114,7 +118,7 @@ class TestXMLReader_(object):
self.stack[-1][2].append(data)
def makeXMLWriter(newlinestr='\n'):
def makeXMLWriter(newlinestr="\n"):
# don't write OS-specific new lines
writer = XMLWriter(BytesIO(), newlinestr=newlinestr)
# erase XML declaration
@ -166,7 +170,7 @@ class MockFont(object):
to its glyphOrder."""
def __init__(self):
self._glyphOrder = ['.notdef']
self._glyphOrder = [".notdef"]
class AllocatingDict(dict):
def __missing__(reverseDict, key):
@ -174,7 +178,8 @@ class MockFont(object):
gid = len(reverseDict)
reverseDict[key] = gid
return gid
self._reverseGlyphOrder = AllocatingDict({'.notdef': 0})
self._reverseGlyphOrder = AllocatingDict({".notdef": 0})
self.lazy = False
def getGlyphID(self, glyph):
@ -192,7 +197,6 @@ class MockFont(object):
class TestCase(_TestCase):
def __init__(self, methodName):
_TestCase.__init__(self, methodName)
# Python 3 renamed assertRaisesRegexp to assertRaisesRegex,
@ -202,7 +206,6 @@ class TestCase(_TestCase):
class DataFilesHandler(TestCase):
def setUp(self):
self.tempdir = None
self.num_tempfiles = 0

View File

@ -51,7 +51,7 @@ def deHexStr(hexdata):
def hexStr(data):
"""Convert binary data to a hex string."""
h = string.hexdigits
r = ''
r = ""
for c in data:
i = byteord(c)
r = r + h[(i >> 4) & 0xF] + h[i & 0xF]
@ -74,7 +74,7 @@ def num2binary(l, bits=32):
items.append(binary)
items.reverse()
assert l in (0, -1), "number doesn't fit in number of bits"
return ' '.join(items)
return " ".join(items)
def binary2num(bin):
@ -151,4 +151,5 @@ def bytesjoin(iterable, joiner=b""):
if __name__ == "__main__":
import doctest, sys
sys.exit(doctest.testmod().failed)

View File

@ -10,8 +10,21 @@ import calendar
epoch_diff = calendar.timegm((1904, 1, 1, 0, 0, 0, 0, 0, 0))
DAYNAMES = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
MONTHNAMES = [None, "Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
MONTHNAMES = [
None,
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
]
def asctime(t=None):
@ -35,22 +48,27 @@ def asctime(t=None):
if t is None:
t = time.localtime()
s = "%s %s %2s %s" % (
DAYNAMES[t.tm_wday], MONTHNAMES[t.tm_mon], t.tm_mday,
time.strftime("%H:%M:%S %Y", t))
DAYNAMES[t.tm_wday],
MONTHNAMES[t.tm_mon],
t.tm_mday,
time.strftime("%H:%M:%S %Y", t),
)
return s
def timestampToString(value):
return asctime(time.gmtime(max(0, value + epoch_diff)))
def timestampFromString(value):
wkday, mnth = value[:7].split()
t = datetime.strptime(value[7:], ' %d %H:%M:%S %Y')
t = datetime.strptime(value[7:], " %d %H:%M:%S %Y")
t = t.replace(month=MONTHNAMES.index(mnth), tzinfo=timezone.utc)
wkday_idx = DAYNAMES.index(wkday)
assert t.weekday() == wkday_idx, '"' + value + '" has inconsistent weekday'
return int(t.timestamp()) - epoch_diff
def timestampNow():
# https://reproducible-builds.org/specs/source-date-epoch/
source_date_epoch = os.environ.get("SOURCE_DATE_EPOCH")
@ -58,6 +76,7 @@ def timestampNow():
return int(source_date_epoch) - epoch_diff
return int(time.time() - epoch_diff)
def timestampSinceEpoch(value):
return int(value - epoch_diff)
@ -65,4 +84,5 @@ def timestampSinceEpoch(value):
if __name__ == "__main__":
import sys
import doctest
sys.exit(doctest.testmod().failed)

View File

@ -248,6 +248,7 @@ class Transform(NamedTuple):
>>>
"""
import math
c = _normSinCos(math.cos(angle))
s = _normSinCos(math.sin(angle))
return self.transform((c, s, -s, c, 0, 0))
@ -263,6 +264,7 @@ class Transform(NamedTuple):
>>>
"""
import math
return self.transform((1, math.tan(y), math.tan(x), 1, 0, 0))
def transform(self, other):
@ -283,7 +285,8 @@ class Transform(NamedTuple):
yx1 * xx2 + yy1 * yx2,
yx1 * xy2 + yy1 * yy2,
xx2 * dx1 + yx2 * dy1 + dx2,
xy2*dx1 + yy2*dy1 + dy2)
xy2 * dx1 + yy2 * dy1 + dy2,
)
def reverseTransform(self, other):
"""Return a new transformation, which is the other transformation
@ -306,7 +309,8 @@ class Transform(NamedTuple):
yx1 * xx2 + yy1 * yx2,
yx1 * xy2 + yy1 * yy2,
xx2 * dx1 + yx2 * dy1 + dx2,
xy2*dx1 + yy2*dy1 + dy2)
xy2 * dx1 + yy2 * dy1 + dy2,
)
def inverse(self):
"""Return the inverse transformation.
@ -368,6 +372,7 @@ class Transform(NamedTuple):
Identity = Transform()
def Offset(x=0, y=0):
"""Return the identity transformation offset by x, y.
@ -378,6 +383,7 @@ def Offset(x=0, y=0):
"""
return Transform(1, 0, 0, 1, x, y)
def Scale(x, y=None):
"""Return the identity transformation scaled by x, y. The 'y' argument
may be None, which implies to use the x value for y as well.
@ -395,4 +401,5 @@ def Scale(x, y=None):
if __name__ == "__main__":
import sys
import doctest
sys.exit(doctest.testmod().failed)

View File

@ -8,15 +8,19 @@ import logging
log = logging.getLogger(__name__)
class TTXParseError(Exception): pass
class TTXParseError(Exception):
pass
BUFSIZE = 0x4000
class XMLReader(object):
def __init__(self, fileOrPath, ttFont, progress=None, quiet=None, contentOnly=False):
if fileOrPath == '-':
def __init__(
self, fileOrPath, ttFont, progress=None, quiet=None, contentOnly=False
):
if fileOrPath == "-":
fileOrPath = sys.stdin
if not hasattr(fileOrPath, "read"):
self.file = open(fileOrPath, "rb")
@ -29,6 +33,7 @@ class XMLReader(object):
self.progress = progress
if quiet is not None:
from fontTools.misc.loggingTools import deprecateArgument
deprecateArgument("quiet", "configure logging instead")
self.quiet = quiet
self.root = None
@ -55,6 +60,7 @@ class XMLReader(object):
def _parseFile(self, file):
from xml.parsers.expat import ParserCreate
parser = ParserCreate()
parser.StartElementHandler = self._startElementHandler
parser.EndElementHandler = self._endElementHandler
@ -83,7 +89,7 @@ class XMLReader(object):
self.stackSize = stackSize + 1
subFile = attrs.get("src")
if subFile is not None:
if hasattr(self.file, 'name'):
if hasattr(self.file, "name"):
# if file has a name, get its parent directory
dirname = os.path.dirname(self.file.name)
else:
@ -113,13 +119,13 @@ class XMLReader(object):
log.info(msg)
if tag == "GlyphOrder":
tableClass = ttLib.GlyphOrder
elif "ERROR" in attrs or ('raw' in attrs and safeEval(attrs['raw'])):
elif "ERROR" in attrs or ("raw" in attrs and safeEval(attrs["raw"])):
tableClass = DefaultTable
else:
tableClass = ttLib.getTableClass(tag)
if tableClass is None:
tableClass = DefaultTable
if tag == 'loca' and tag in self.ttFont:
if tag == "loca" and tag in self.ttFont:
# Special-case the 'loca' table as we need the
# original if the 'glyf' table isn't recompiled.
self.currentTable = self.ttFont[tag]
@ -157,7 +163,6 @@ class XMLReader(object):
class ProgressPrinter(object):
def __init__(self, title, maxval=100):
print(title)

View File

@ -9,12 +9,17 @@ INDENT = " "
class XMLWriter(object):
def __init__(self, fileOrPath, indentwhite=INDENT, idlefunc=None, encoding="utf_8",
newlinestr="\n"):
if encoding.lower().replace('-','').replace('_','') != 'utf8':
raise Exception('Only UTF-8 encoding is supported.')
if fileOrPath == '-':
def __init__(
self,
fileOrPath,
indentwhite=INDENT,
idlefunc=None,
encoding="utf_8",
newlinestr="\n",
):
if encoding.lower().replace("-", "").replace("_", "") != "utf8":
raise Exception("Only UTF-8 encoding is supported.")
if fileOrPath == "-":
fileOrPath = sys.stdout
if not hasattr(fileOrPath, "write"):
self.filename = fileOrPath
@ -30,11 +35,11 @@ class XMLWriter(object):
try:
# The bytes check should be first. See:
# https://github.com/fonttools/fonttools/pull/233
self.file.write(b'')
self.file.write(b"")
self.totype = tobytes
except TypeError:
# This better not fail.
self.file.write('')
self.file.write("")
self.totype = tostr
self.indentwhite = self.totype(indentwhite)
if newlinestr is None:
@ -84,7 +89,7 @@ class XMLWriter(object):
self.file.write(self.indentlevel * self.indentwhite)
self.needindent = 0
s = self.totype(data, encoding="utf_8")
if (strip):
if strip:
s = s.strip()
self.file.write(s)
@ -163,31 +168,36 @@ class XMLWriter(object):
def escape(data):
data = tostr(data, 'utf_8')
data = tostr(data, "utf_8")
data = data.replace("&", "&amp;")
data = data.replace("<", "&lt;")
data = data.replace(">", "&gt;")
data = data.replace("\r", "&#13;")
return data
def escapeattr(data):
data = escape(data)
data = data.replace('"', "&quot;")
return data
def escape8bit(data):
"""Input is Unicode string."""
def escapechar(c):
n = ord(c)
if 32 <= n <= 127 and c not in "<&>":
return c
else:
return "&#" + repr(n) + ";"
return strjoin(map(escapechar, data.decode('latin-1')))
return strjoin(map(escapechar, data.decode("latin-1")))
def hexStr(s):
h = string.hexdigits
r = ''
r = ""
for c in s:
i = byteord(c)
r = r + h[(i >> 4) & 0xF] + h[i & 0xF]

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
import sys
from fontTools.mtiLib import main
if __name__ == '__main__':
if __name__ == "__main__":
sys.exit(main())

View File

@ -2,5 +2,5 @@ import sys
from fontTools.otlLib.optimize import main
if __name__ == '__main__':
if __name__ == "__main__":
sys.exit(main())

View File

@ -7,7 +7,6 @@ __all__ = ["AreaPen"]
class AreaPen(BasePen):
def __init__(self, glyphset=None):
BasePen.__init__(self, glyphset)
self.value = 0
@ -18,7 +17,7 @@ class AreaPen(BasePen):
def _lineTo(self, p1):
x0, y0 = self._p0
x1, y1 = p1
self.value -= (x1 - x0) * (y1 + y0) * .5
self.value -= (x1 - x0) * (y1 + y0) * 0.5
self._p0 = p1
def _qCurveToOne(self, p1, p2):
@ -38,11 +37,7 @@ class AreaPen(BasePen):
x1, y1 = p1[0] - x0, p1[1] - y0
x2, y2 = p2[0] - x0, p2[1] - y0
x3, y3 = p3[0] - x0, p3[1] - y0
self.value -= (
x1 * ( - y2 - y3) +
x2 * (y1 - 2*y3) +
x3 * (y1 + 2*y2 )
) * 0.15
self.value -= (x1 * (-y2 - y3) + x2 * (y1 - 2 * y3) + x3 * (y1 + 2 * y2)) * 0.15
self._lineTo(p3)
self._p0 = p3

View File

@ -40,19 +40,25 @@ from typing import Tuple
from fontTools.misc.loggingTools import LogMixin
__all__ = ["AbstractPen", "NullPen", "BasePen", "PenError",
"decomposeSuperBezierSegment", "decomposeQuadraticSegment"]
__all__ = [
"AbstractPen",
"NullPen",
"BasePen",
"PenError",
"decomposeSuperBezierSegment",
"decomposeQuadraticSegment",
]
class PenError(Exception):
"""Represents an error during penning."""
class OpenContourError(PenError):
pass
class AbstractPen:
def moveTo(self, pt: Tuple[float, float]) -> None:
"""Begin a new sub path, set the current point to 'pt'. You must
end each sub path with a call to pen.closePath() or pen.endPath().
@ -116,7 +122,7 @@ class AbstractPen:
def addComponent(
self,
glyphName: str,
transformation: Tuple[float, float, float, float, float, float]
transformation: Tuple[float, float, float, float, float, float],
) -> None:
"""Add a sub glyph. The 'transformation' argument must be a 6-tuple
containing an affine transformation, or a Transform object from the
@ -128,8 +134,7 @@ class AbstractPen:
class NullPen(AbstractPen):
"""A pen that does nothing.
"""
"""A pen that does nothing."""
def moveTo(self, pt):
pass
@ -154,8 +159,8 @@ class NullPen(AbstractPen):
class LoggingPen(LogMixin, AbstractPen):
"""A pen with a ``log`` property (see fontTools.misc.loggingTools.LogMixin)
"""
"""A pen with a ``log`` property (see fontTools.misc.loggingTools.LogMixin)"""
pass
@ -187,16 +192,15 @@ class DecomposingPen(LoggingPen):
self.glyphSet = glyphSet
def addComponent(self, glyphName, transformation):
""" Transform the points of the base glyph and draw it onto self.
"""
"""Transform the points of the base glyph and draw it onto self."""
from fontTools.pens.transformPen import TransformPen
try:
glyph = self.glyphSet[glyphName]
except KeyError:
if not self.skipMissingComponents:
raise MissingComponentError(glyphName)
self.log.warning(
"glyph '%s' is missing from glyphSet; skipped" % glyphName)
self.log.warning("glyph '%s' is missing from glyphSet; skipped" % glyphName)
else:
tPen = TransformPen(self, transformation)
glyph.draw(tPen)
@ -350,13 +354,14 @@ def decomposeSuperBezierSegment(points):
factor = j / nDivisions
temp1 = points[i - 1]
temp2 = points[i - 2]
temp = (temp2[0] + factor * (temp1[0] - temp2[0]),
temp2[1] + factor * (temp1[1] - temp2[1]))
temp = (
temp2[0] + factor * (temp1[0] - temp2[0]),
temp2[1] + factor * (temp1[1] - temp2[1]),
)
if pt2 is None:
pt2 = temp
else:
pt3 = (0.5 * (pt2[0] + temp[0]),
0.5 * (pt2[1] + temp[1]))
pt3 = (0.5 * (pt2[0] + temp[0]), 0.5 * (pt2[1] + temp[1]))
bezierSegments.append((pt1, pt2, pt3))
pt1, pt2, pt3 = temp, None, None
bezierSegments.append((pt1, points[-2], points[-1]))
@ -387,13 +392,19 @@ def decomposeQuadraticSegment(points):
class _TestPen(BasePen):
"""Test class that prints PostScript to stdout."""
def _moveTo(self, pt):
print("%s %s moveto" % (pt[0], pt[1]))
def _lineTo(self, pt):
print("%s %s lineto" % (pt[0], pt[1]))
def _curveToOne(self, bcp1, bcp2, pt):
print("%s %s %s %s %s %s curveto" % (bcp1[0], bcp1[1],
bcp2[0], bcp2[1], pt[0], pt[1]))
print(
"%s %s %s %s %s %s curveto"
% (bcp1[0], bcp1[1], bcp2[0], bcp2[1], pt[0], pt[1])
)
def _closePath(self):
print("closepath")

View File

@ -84,8 +84,9 @@ class BoundsPen(ControlBoundsPen):
bounds = self.bounds
bounds = updateBounds(bounds, pt)
if not pointInRect(bcp1, bounds) or not pointInRect(bcp2, bounds):
bounds = unionRect(bounds, calcCubicBounds(
self._getCurrentPoint(), bcp1, bcp2, pt))
bounds = unionRect(
bounds, calcCubicBounds(self._getCurrentPoint(), bcp1, bcp2, pt)
)
self.bounds = bounds
def _qCurveToOne(self, bcp, pt):
@ -93,6 +94,7 @@ class BoundsPen(ControlBoundsPen):
bounds = self.bounds
bounds = updateBounds(bounds, pt)
if not pointInRect(bcp, bounds):
bounds = unionRect(bounds, calcQuadraticBounds(
self._getCurrentPoint(), bcp, pt))
bounds = unionRect(
bounds, calcQuadraticBounds(self._getCurrentPoint(), bcp, pt)
)
self.bounds = bounds

View File

@ -5,11 +5,11 @@ __all__ = ["CocoaPen"]
class CocoaPen(BasePen):
def __init__(self, glyphSet, path=None):
BasePen.__init__(self, glyphSet)
if path is None:
from AppKit import NSBezierPath
path = NSBezierPath.bezierPath()
self.path = path

View File

@ -40,8 +40,14 @@ class Cu2QuPen(AbstractPen):
but are handled separately as anchors.
"""
def __init__(self, other_pen, max_err, reverse_direction=False,
stats=None, ignore_single_points=False):
def __init__(
self,
other_pen,
max_err,
reverse_direction=False,
stats=None,
ignore_single_points=False,
):
if reverse_direction:
self.pen = ReverseContourPen(other_pen)
else:
@ -50,9 +56,13 @@ class Cu2QuPen(AbstractPen):
self.stats = stats
if ignore_single_points:
import warnings
warnings.warn("ignore_single_points is deprecated and "
warnings.warn(
"ignore_single_points is deprecated and "
"will be removed in future versions",
UserWarning, stacklevel=2)
UserWarning,
stacklevel=2,
)
self.ignore_single_points = ignore_single_points
self.start_pt = None
self.current_pt = None
@ -149,8 +159,7 @@ class Cu2QuPointPen(BasePointToSegmentPen):
stats: a dictionary counting the point numbers of quadratic segments.
"""
def __init__(self, other_point_pen, max_err, reverse_direction=False,
stats=None):
def __init__(self, other_point_pen, max_err, reverse_direction=False, stats=None):
BasePointToSegmentPen.__init__(self)
if reverse_direction:
self.pen = ReverseContourPointPen(other_point_pen)
@ -166,7 +175,7 @@ class Cu2QuPointPen(BasePointToSegmentPen):
prev_points = segments[-1][1]
prev_on_curve = prev_points[-1][0]
for segment_type, points in segments:
if segment_type == 'curve':
if segment_type == "curve":
for sub_points in self._split_super_bezier_segments(points):
on_curve, smooth, name, kwargs = sub_points[-1]
bcp1, bcp2 = sub_points[0][0], sub_points[1][0]
@ -200,8 +209,9 @@ class Cu2QuPointPen(BasePointToSegmentPen):
# a "super" bezier; decompose it
on_curve, smooth, name, kwargs = points[-1]
num_sub_segments = n - 1
for i, sub_points in enumerate(decomposeSuperBezierSegment([
pt for pt, _, _, _ in points])):
for i, sub_points in enumerate(
decomposeSuperBezierSegment([pt for pt, _, _, _ in points])
):
new_segment = []
for point in sub_points[:-1]:
new_segment.append((point, False, None, {}))
@ -213,8 +223,7 @@ class Cu2QuPointPen(BasePointToSegmentPen):
new_segment.append((sub_points[-1], True, None, {}))
sub_segments.append(new_segment)
else:
raise AssertionError(
"expected 2 control points, found: %d" % n)
raise AssertionError("expected 2 control points, found: %d" % n)
return sub_segments
def _drawPoints(self, segments):
@ -223,13 +232,15 @@ class Cu2QuPointPen(BasePointToSegmentPen):
last_offcurves = []
for i, (segment_type, points) in enumerate(segments):
if segment_type in ("move", "line"):
assert len(points) == 1, (
"illegal line segment point count: %d" % len(points))
assert len(points) == 1, "illegal line segment point count: %d" % len(
points
)
pt, smooth, name, kwargs = points[0]
pen.addPoint(pt, segment_type, smooth, name, **kwargs)
elif segment_type == "qcurve":
assert len(points) >= 2, (
"illegal qcurve segment point count: %d" % len(points))
assert len(points) >= 2, "illegal qcurve segment point count: %d" % len(
points
)
offcurves = points[:-1]
if offcurves:
if i == 0:
@ -249,8 +260,7 @@ class Cu2QuPointPen(BasePointToSegmentPen):
pen.addPoint(pt, segment_type, smooth, name, **kwargs)
else:
# 'curve' segments must have been converted to 'qcurve' by now
raise AssertionError(
"unexpected segment type: %r" % segment_type)
raise AssertionError("unexpected segment type: %r" % segment_type)
for (pt, smooth, name, kwargs) in last_offcurves:
pen.addPoint(pt, None, smooth, name, **kwargs)
pen.endPath()
@ -260,7 +270,6 @@ class Cu2QuPointPen(BasePointToSegmentPen):
self.pen.addComponent(baseGlyphName, transformation)
class Cu2QuMultiPen:
"""A filter multi-pen to convert cubic bezier curves to quadratic b-splines
in a interpolation-compatible manner, using the FontTools SegmentPen protocol.
@ -281,7 +290,10 @@ class Cu2QuMultiPen:
def __init__(self, other_pens, max_err, reverse_direction=False):
if reverse_direction:
other_pens = [ReverseContourPen(pen, outputImpliedClosingLine=True) for pen in other_pens]
other_pens = [
ReverseContourPen(pen, outputImpliedClosingLine=True)
for pen in other_pens
]
self.pens = other_pens
self.max_err = max_err
self.start_pts = None

View File

@ -4,7 +4,6 @@ from fontTools.pens.recordingPen import RecordingPen
class _PassThruComponentsMixin(object):
def addComponent(self, glyphName, transformation, **kwargs):
self._outPen.addComponent(glyphName, transformation, **kwargs)

View File

@ -65,9 +65,7 @@ class HashPointPen(AbstractPointPen):
pt_type = segmentType[0]
self.data.append(f"{pt_type}{pt[0]:g}{pt[1]:+g}")
def addComponent(
self, baseGlyphName, transformation, identifier=None, **kwargs
):
def addComponent(self, baseGlyphName, transformation, identifier=None, **kwargs):
tr = "".join([f"{t:+}" for t in transformation])
self.data.append("[")
try:

View File

@ -1,4 +1,5 @@
from fontTools.pens.basePen import BasePen, OpenContourError
try:
import cython
except ImportError:
@ -15,8 +16,8 @@ else:
__all__ = ["MomentsPen"]
class MomentsPen(BasePen):
class MomentsPen(BasePen):
def __init__(self, glyphset=None):
BasePen.__init__(self, glyphset)
@ -39,9 +40,7 @@ class MomentsPen(BasePen):
p0 = self._getCurrentPoint()
if p0 != self.__startPoint:
# Green theorem is not defined on open contours.
raise OpenContourError(
"Green theorem is not defined on open contours."
)
raise OpenContourError("Green theorem is not defined on open contours.")
@cython.locals(r0=cython.double)
@cython.locals(r1=cython.double)
@ -78,10 +77,30 @@ class MomentsPen(BasePen):
self.area += -r0 / 2 - r1 / 2 + x0 * (y0 + y1) / 2
self.momentX += -r2 * y0 / 6 - r3 / 3 - r5 * x1 / 6 + r6 * (r7 + y1) / 6
self.momentY += -r0*y1/6 - r8*x1/6 - r9*x1/6 + x0*(r8 + r9 + y0*y1)/6
self.momentXX += -r10*y0/12 - r10*y1/4 - r2*r5/12 - r4*r6*x1/12 + x0**3*(3*y0 + y1)/12
self.momentXY += -r2*r8/24 - r2*r9/8 - r3*r7/24 + r6*(r7*y1 + 3*r8 + r9)/24 - x0*x1*(r8 - r9)/12
self.momentYY += -r0*r9/12 - r1*r8/12 - r11*x1/12 - r12*x1/12 + x0*(r11 + r12 + r8*y1 + r9*y0)/12
self.momentY += (
-r0 * y1 / 6 - r8 * x1 / 6 - r9 * x1 / 6 + x0 * (r8 + r9 + y0 * y1) / 6
)
self.momentXX += (
-r10 * y0 / 12
- r10 * y1 / 4
- r2 * r5 / 12
- r4 * r6 * x1 / 12
+ x0**3 * (3 * y0 + y1) / 12
)
self.momentXY += (
-r2 * r8 / 24
- r2 * r9 / 8
- r3 * r7 / 24
+ r6 * (r7 * y1 + 3 * r8 + r9) / 24
- x0 * x1 * (r8 - r9) / 12
)
self.momentYY += (
-r0 * r9 / 12
- r1 * r8 / 12
- r11 * x1 / 12
- r12 * x1 / 12
+ x0 * (r11 + r12 + r8 * y1 + r9 * y0) / 12
)
@cython.locals(r0=cython.double)
@cython.locals(r1=cython.double)
@ -200,12 +219,99 @@ class MomentsPen(BasePen):
r52 = 10 * y1
r53 = 12 * y1
self.area += -r1/6 - r3/6 + x0*(r0 + r5 + y2)/6 + x1*y2/3 - y0*(r4 + x2)/6
self.momentX += -r11*(-r10 + y1)/30 + r12*(r13 + r8 + y2)/30 + r6*y2/15 - r7*r8/30 - r7*r9/30 + x0*(r14 - r15 - r16*y0 + r17)/30 - y0*(r11 + 2*r6 + r7)/30
self.momentY += -r18/30 - r20*x2/30 - r23/30 - r24*(r16 + x2)/30 + x0*(r0*y2 + r20 + r21 + r25 + r26 + r8*y0)/30 + x1*y2*(r10 + y1)/15 - y0*(r1 + r17)/30
self.momentXX += r12*(r1 - 5*r15 - r34*y0 + r36 + r9*x1)/420 + 2*r27*y2/105 - r28*r29/420 - r28*y2/4 - r31*(r0 - 3*y2)/420 - r6*x2*(r0 - r32)/105 + x0**3*(r30 + 21*y0 + y2)/84 - x0*(r0*r7 + r15*r37 - r2*r37 - r33*y2 + r38*y0 - r39 - r40 + r5*r7)/420 - y0*(8*r27 + 5*r28 + r31 + r33*x2)/420
self.momentXY += r12*(r13*y2 + 3*r21 + 105*r24 + r41*y0 + r42 + r46*y1)/840 - r16*x2*(r43 - r44)/840 - r21*r7/8 - r24*(r38 + r45*x1 + 3*r7)/840 - r41*r7*y2/840 - r42*r7/840 + r6*y2*(r32 + r8)/210 + x0*(-r15*r8 + r16*r25 + r18 + r21*r47 - r24*r34 - r26*x2 + r35*r46 + r48)/420 - y0*(r16*r2 + r30*r7 + r35*r45 + r39 + r40)/420
self.momentYY += -r2*r42/420 - r22*r29/420 - r24*(r14 + r36 + r52*x2)/420 - r49*x2/420 - r50*x2/12 - r51*(r47 + x2)/84 + x0*(r19*r46 + r21*r5 + r21*r52 + r24*r29 + r25*r53 + r26*y2 + r42*y0 + r49 + 5*r50 + 35*r51)/420 + x1*y2*(r43 + r44 + r9*y1)/210 - y0*(r19*r45 + r2*r53 - r21*r4 + r48)/420
self.area += (
-r1 / 6
- r3 / 6
+ x0 * (r0 + r5 + y2) / 6
+ x1 * y2 / 3
- y0 * (r4 + x2) / 6
)
self.momentX += (
-r11 * (-r10 + y1) / 30
+ r12 * (r13 + r8 + y2) / 30
+ r6 * y2 / 15
- r7 * r8 / 30
- r7 * r9 / 30
+ x0 * (r14 - r15 - r16 * y0 + r17) / 30
- y0 * (r11 + 2 * r6 + r7) / 30
)
self.momentY += (
-r18 / 30
- r20 * x2 / 30
- r23 / 30
- r24 * (r16 + x2) / 30
+ x0 * (r0 * y2 + r20 + r21 + r25 + r26 + r8 * y0) / 30
+ x1 * y2 * (r10 + y1) / 15
- y0 * (r1 + r17) / 30
)
self.momentXX += (
r12 * (r1 - 5 * r15 - r34 * y0 + r36 + r9 * x1) / 420
+ 2 * r27 * y2 / 105
- r28 * r29 / 420
- r28 * y2 / 4
- r31 * (r0 - 3 * y2) / 420
- r6 * x2 * (r0 - r32) / 105
+ x0**3 * (r30 + 21 * y0 + y2) / 84
- x0
* (
r0 * r7
+ r15 * r37
- r2 * r37
- r33 * y2
+ r38 * y0
- r39
- r40
+ r5 * r7
)
/ 420
- y0 * (8 * r27 + 5 * r28 + r31 + r33 * x2) / 420
)
self.momentXY += (
r12 * (r13 * y2 + 3 * r21 + 105 * r24 + r41 * y0 + r42 + r46 * y1) / 840
- r16 * x2 * (r43 - r44) / 840
- r21 * r7 / 8
- r24 * (r38 + r45 * x1 + 3 * r7) / 840
- r41 * r7 * y2 / 840
- r42 * r7 / 840
+ r6 * y2 * (r32 + r8) / 210
+ x0
* (
-r15 * r8
+ r16 * r25
+ r18
+ r21 * r47
- r24 * r34
- r26 * x2
+ r35 * r46
+ r48
)
/ 420
- y0 * (r16 * r2 + r30 * r7 + r35 * r45 + r39 + r40) / 420
)
self.momentYY += (
-r2 * r42 / 420
- r22 * r29 / 420
- r24 * (r14 + r36 + r52 * x2) / 420
- r49 * x2 / 420
- r50 * x2 / 12
- r51 * (r47 + x2) / 84
+ x0
* (
r19 * r46
+ r21 * r5
+ r21 * r52
+ r24 * r29
+ r25 * r53
+ r26 * y2
+ r42 * y0
+ r49
+ 5 * r50
+ 35 * r51
)
/ 420
+ x1 * y2 * (r43 + r44 + r9 * y1) / 210
- y0 * (r19 * r45 + r2 * r53 - r21 * r4 + r48) / 420
)
@cython.locals(r0=cython.double)
@cython.locals(r1=cython.double)
@ -484,20 +590,296 @@ class MomentsPen(BasePen):
r131 = 189 * r53
r132 = 90 * y2
self.area += -r1/20 - r3/20 - r4*(x2 + x3)/20 + x0*(r7 + r8 + 10*y0 + y3)/20 + 3*x1*(y2 + y3)/20 + 3*x2*y3/10 - y0*(r5 + r6 + x3)/20
self.momentX += r11/840 - r13/8 - r14/3 - r17*(-r15 + r8)/840 + r19*(r8 + 2*y3)/840 + r20*(r0 + r21 + 56*y0 + y3)/168 + r29*(-r23 + r25 + r28)/840 - r4*(10*r12 + r17 + r22)/840 + x0*(12*r27 + r30*y2 + r34 - r35*x1 - r37 - r38*y0 + r39*x1 - r4*x3 + r45)/840 - y0*(r17 + r30*x2 + r31*x1 + r32 + r33 + 18*r9)/840
self.momentY += -r4*(r25 + r58)/840 - r47/8 - r50/840 - r52/6 - r54*(r6 + 2*x3)/840 - r55*(r56 + r57 + x3)/168 + x0*(r35*y1 + r40*y0 + r44*y2 + 18*r48 + 140*r55 + r59 + r63 + 12*r64 + r65 + r66)/840 + x1*(r24*y1 + 10*r51 + r59 + r60 + r7*y3)/280 + x2*y3*(r15 + r8)/56 - y0*(r16*y1 + r31*y2 + r44*x2 + r45 + r61 - r62*x1)/840
self.momentXX += -r12*r72*(-r40 + r8)/9240 + 3*r18*(r28 + r34 - r38*y1 + r75)/3080 + r20*(r24*x3 - r72*y0 - r76*y0 - r77*y0 + r78 + r79*y3 + r80*y1 + 210*r81 + r84)/9240 - r29*(r12*r21 + 14*r13 + r44*r9 - r73*y3 + 54*r86 - 84*r87 - r89 - r90)/9240 - r4*(70*r12*x2 + 27*r67 + 42*r68 + r74)/9240 + 3*r67*y3/220 - r68*r69/9240 - r68*y3/4 - r70*r9*(-r62 + y2)/9240 + 3*r71*(r24 + r40)/3080 + x0**3*(r24 + r44 + 165*y0 + y3)/660 + x0*(r100*r27 + 162*r101 + r102 + r11 + 63*r18*y3 + r27*r91 - r33*y0 - r37*x3 + r43*x3 - r73*y0 - r88*y1 + r92*y2 - r93*y0 - 9*r94 - r95*y0 - r96*y0 - r97*y1 - 18*r98 + r99*x1*y3)/9240 - y0*(r12*r56 + r12*r80 + r32*x3 + 45*r67 + 14*r68 + 126*r71 + r74 + r85*r91 + 135*r9*x1 + r92*x2)/9240
self.momentXY += -r103*r12/18480 - r12*r51/8 - 3*r14*y2/44 + 3*r18*(r105 + r2*y1 + 18*r46 + 15*r48 + 7*r51)/6160 + r20*(1260*r106 + r107*y1 + r108 + 28*r109 + r110 + r111 + r112 + 30*r46 + 2310*r55 + r66)/18480 - r54*(7*r12 + 18*r85 + 15*r9)/18480 - r55*(r33 + r73 + r93 + r95 + r96 + r97)/18480 - r7*(42*r13 + r82*x3 + 28*r87 + r89 + r90)/18480 - 3*r85*(r48 - r66)/220 + 3*r9*y3*(r62 + 2*y2)/440 + x0*(-r1*y0 - 84*r106*x2 + r109*r56 + 54*r114 + r117*y1 + 15*r118 + 21*r119 + 81*r120 + r121*r46 + 54*r122 + 60*r123 + r124 - r21*x3*y0 + r23*y3 - r54*x3 - r55*r72 - r55*r76 - r55*r77 + r57*y0*y3 + r60*x3 + 84*r81*y0 + 189*r81*y1)/9240 + x1*(r104*r27 - r105*x3 - r113*r53 + 63*r114 + r115 - r16*r53 + 28*r47 + r51*r80)/3080 - y0*(54*r101 + r102 + r116*r5 + r117*x3 + 21*r13 - r19*y3 + r22*y3 + r78*x3 + 189*r83*x2 + 60*r86 + 81*r9*y1 + 15*r94 + 54*r98)/9240
self.momentYY += -r103*r116/9240 - r125*r70/9240 - r126*x3/12 - 3*r127*(r26 + r38)/3080 - r128*(r26 + r30 + x3)/660 - r4*(r112*x3 + r115 - 14*r119 + 84*r47)/9240 - r52*r69/9240 - r54*(r58 + r61 + r75)/9240 - r55*(r100*y1 + r121*y2 + r26*y3 + r79*y2 + r84 + 210*x2*y1)/9240 + x0*(r108*y1 + r110*y0 + r111*y0 + r112*y0 + 45*r125 + 14*r126 + 126*r127 + 770*r128 + 42*r129 + r130 + r131*y2 + r132*r64 + 135*r48*y1 + 630*r55*y1 + 126*r55*y2 + 14*r55*y3 + r63*y3 + r65*y3 + r66*y0)/9240 + x1*(27*r125 + 42*r126 + 70*r129 + r130 + r39*r53 + r44*r48 + 27*r53*y2 + 54*r64*y2)/3080 + 3*x2*y3*(r48 + r66 + r8*y3)/220 - y0*(r100*r46 + 18*r114 - 9*r118 - 27*r120 - 18*r122 - 30*r123 + r124 + r131*x2 + r132*x3*y1 + 162*r42*y1 + r50 + 63*r53*x3 + r64*r99)/9240
self.area += (
-r1 / 20
- r3 / 20
- r4 * (x2 + x3) / 20
+ x0 * (r7 + r8 + 10 * y0 + y3) / 20
+ 3 * x1 * (y2 + y3) / 20
+ 3 * x2 * y3 / 10
- y0 * (r5 + r6 + x3) / 20
)
self.momentX += (
r11 / 840
- r13 / 8
- r14 / 3
- r17 * (-r15 + r8) / 840
+ r19 * (r8 + 2 * y3) / 840
+ r20 * (r0 + r21 + 56 * y0 + y3) / 168
+ r29 * (-r23 + r25 + r28) / 840
- r4 * (10 * r12 + r17 + r22) / 840
+ x0
* (
12 * r27
+ r30 * y2
+ r34
- r35 * x1
- r37
- r38 * y0
+ r39 * x1
- r4 * x3
+ r45
)
/ 840
- y0 * (r17 + r30 * x2 + r31 * x1 + r32 + r33 + 18 * r9) / 840
)
self.momentY += (
-r4 * (r25 + r58) / 840
- r47 / 8
- r50 / 840
- r52 / 6
- r54 * (r6 + 2 * x3) / 840
- r55 * (r56 + r57 + x3) / 168
+ x0
* (
r35 * y1
+ r40 * y0
+ r44 * y2
+ 18 * r48
+ 140 * r55
+ r59
+ r63
+ 12 * r64
+ r65
+ r66
)
/ 840
+ x1 * (r24 * y1 + 10 * r51 + r59 + r60 + r7 * y3) / 280
+ x2 * y3 * (r15 + r8) / 56
- y0 * (r16 * y1 + r31 * y2 + r44 * x2 + r45 + r61 - r62 * x1) / 840
)
self.momentXX += (
-r12 * r72 * (-r40 + r8) / 9240
+ 3 * r18 * (r28 + r34 - r38 * y1 + r75) / 3080
+ r20
* (
r24 * x3
- r72 * y0
- r76 * y0
- r77 * y0
+ r78
+ r79 * y3
+ r80 * y1
+ 210 * r81
+ r84
)
/ 9240
- r29
* (
r12 * r21
+ 14 * r13
+ r44 * r9
- r73 * y3
+ 54 * r86
- 84 * r87
- r89
- r90
)
/ 9240
- r4 * (70 * r12 * x2 + 27 * r67 + 42 * r68 + r74) / 9240
+ 3 * r67 * y3 / 220
- r68 * r69 / 9240
- r68 * y3 / 4
- r70 * r9 * (-r62 + y2) / 9240
+ 3 * r71 * (r24 + r40) / 3080
+ x0**3 * (r24 + r44 + 165 * y0 + y3) / 660
+ x0
* (
r100 * r27
+ 162 * r101
+ r102
+ r11
+ 63 * r18 * y3
+ r27 * r91
- r33 * y0
- r37 * x3
+ r43 * x3
- r73 * y0
- r88 * y1
+ r92 * y2
- r93 * y0
- 9 * r94
- r95 * y0
- r96 * y0
- r97 * y1
- 18 * r98
+ r99 * x1 * y3
)
/ 9240
- y0
* (
r12 * r56
+ r12 * r80
+ r32 * x3
+ 45 * r67
+ 14 * r68
+ 126 * r71
+ r74
+ r85 * r91
+ 135 * r9 * x1
+ r92 * x2
)
/ 9240
)
self.momentXY += (
-r103 * r12 / 18480
- r12 * r51 / 8
- 3 * r14 * y2 / 44
+ 3 * r18 * (r105 + r2 * y1 + 18 * r46 + 15 * r48 + 7 * r51) / 6160
+ r20
* (
1260 * r106
+ r107 * y1
+ r108
+ 28 * r109
+ r110
+ r111
+ r112
+ 30 * r46
+ 2310 * r55
+ r66
)
/ 18480
- r54 * (7 * r12 + 18 * r85 + 15 * r9) / 18480
- r55 * (r33 + r73 + r93 + r95 + r96 + r97) / 18480
- r7 * (42 * r13 + r82 * x3 + 28 * r87 + r89 + r90) / 18480
- 3 * r85 * (r48 - r66) / 220
+ 3 * r9 * y3 * (r62 + 2 * y2) / 440
+ x0
* (
-r1 * y0
- 84 * r106 * x2
+ r109 * r56
+ 54 * r114
+ r117 * y1
+ 15 * r118
+ 21 * r119
+ 81 * r120
+ r121 * r46
+ 54 * r122
+ 60 * r123
+ r124
- r21 * x3 * y0
+ r23 * y3
- r54 * x3
- r55 * r72
- r55 * r76
- r55 * r77
+ r57 * y0 * y3
+ r60 * x3
+ 84 * r81 * y0
+ 189 * r81 * y1
)
/ 9240
+ x1
* (
r104 * r27
- r105 * x3
- r113 * r53
+ 63 * r114
+ r115
- r16 * r53
+ 28 * r47
+ r51 * r80
)
/ 3080
- y0
* (
54 * r101
+ r102
+ r116 * r5
+ r117 * x3
+ 21 * r13
- r19 * y3
+ r22 * y3
+ r78 * x3
+ 189 * r83 * x2
+ 60 * r86
+ 81 * r9 * y1
+ 15 * r94
+ 54 * r98
)
/ 9240
)
self.momentYY += (
-r103 * r116 / 9240
- r125 * r70 / 9240
- r126 * x3 / 12
- 3 * r127 * (r26 + r38) / 3080
- r128 * (r26 + r30 + x3) / 660
- r4 * (r112 * x3 + r115 - 14 * r119 + 84 * r47) / 9240
- r52 * r69 / 9240
- r54 * (r58 + r61 + r75) / 9240
- r55
* (r100 * y1 + r121 * y2 + r26 * y3 + r79 * y2 + r84 + 210 * x2 * y1)
/ 9240
+ x0
* (
r108 * y1
+ r110 * y0
+ r111 * y0
+ r112 * y0
+ 45 * r125
+ 14 * r126
+ 126 * r127
+ 770 * r128
+ 42 * r129
+ r130
+ r131 * y2
+ r132 * r64
+ 135 * r48 * y1
+ 630 * r55 * y1
+ 126 * r55 * y2
+ 14 * r55 * y3
+ r63 * y3
+ r65 * y3
+ r66 * y0
)
/ 9240
+ x1
* (
27 * r125
+ 42 * r126
+ 70 * r129
+ r130
+ r39 * r53
+ r44 * r48
+ 27 * r53 * y2
+ 54 * r64 * y2
)
/ 3080
+ 3 * x2 * y3 * (r48 + r66 + r8 * y3) / 220
- y0
* (
r100 * r46
+ 18 * r114
- 9 * r118
- 27 * r120
- 18 * r122
- 30 * r123
+ r124
+ r131 * x2
+ r132 * x3 * y1
+ 162 * r42 * y1
+ r50
+ 63 * r53 * x3
+ r64 * r99
)
/ 9240
)
if __name__ == '__main__':
if __name__ == "__main__":
from fontTools.misc.symfont import x, y, printGreenPen
printGreenPen('MomentsPen', [
('area', 1),
('momentX', x),
('momentY', y),
('momentXX', x**2),
('momentXY', x*y),
('momentYY', y**2),
])
printGreenPen(
"MomentsPen",
[
("area", 1),
("momentX", x),
("momentY", y),
("momentXX", x**2),
("momentXY", x * y),
("momentYY", y**2),
],
)

View File

@ -2,7 +2,12 @@
"""Calculate the perimeter of a glyph."""
from fontTools.pens.basePen import BasePen
from fontTools.misc.bezierTools import approximateQuadraticArcLengthC, calcQuadraticArcLengthC, approximateCubicArcLengthC, calcCubicArcLengthC
from fontTools.misc.bezierTools import (
approximateQuadraticArcLengthC,
calcQuadraticArcLengthC,
approximateCubicArcLengthC,
calcCubicArcLengthC,
)
import math
@ -12,8 +17,8 @@ __all__ = ["PerimeterPen"]
def _distance(p0, p1):
return math.hypot(p0[0] - p1[0], p0[1] - p1[1])
class PerimeterPen(BasePen):
class PerimeterPen(BasePen):
def __init__(self, glyphset=None, tolerance=0.005):
BasePen.__init__(self, glyphset)
self.value = 0
@ -22,8 +27,14 @@ class PerimeterPen(BasePen):
# Choose which algorithm to use for quadratic and for cubic.
# Quadrature is faster but has fixed error characteristic with no strong
# error bound. The cutoff points are derived empirically.
self._addCubic = self._addCubicQuadrature if tolerance >= 0.0015 else self._addCubicRecursive
self._addQuadratic = self._addQuadraticQuadrature if tolerance >= 0.00075 else self._addQuadraticExact
self._addCubic = (
self._addCubicQuadrature if tolerance >= 0.0015 else self._addCubicRecursive
)
self._addQuadratic = (
self._addQuadraticQuadrature
if tolerance >= 0.00075
else self._addQuadraticExact
)
def _moveTo(self, p0):
self.__startPoint = p0

View File

@ -119,7 +119,7 @@ class PointInsidePen(BasePen):
by = (y3 - y2) * 3.0 - cy
ay = y4 - dy - cy - by
solutions = sorted(solveCubic(ay, by, cy, dy - y))
solutions = [t for t in solutions if -0. <= t <= 1.]
solutions = [t for t in solutions if -0.0 <= t <= 1.0]
if not solutions:
return
@ -175,7 +175,9 @@ class PointInsidePen(BasePen):
b = (y2 - c) * 2.0
a = y3 - c - b
solutions = sorted(solveQuadratic(a, b, c - y))
solutions = [t for t in solutions if ZERO_MINUS_EPSILON <= t <= ONE_PLUS_EPSILON]
solutions = [
t for t in solutions if ZERO_MINUS_EPSILON <= t <= ONE_PLUS_EPSILON
]
if not solutions:
return
# XXX

View File

@ -45,7 +45,7 @@ class AbstractPointPen:
smooth: bool = False,
name: Optional[str] = None,
identifier: Optional[str] = None,
**kwargs: Any
**kwargs: Any,
) -> None:
"""Add a point to the current sub path."""
raise NotImplementedError
@ -55,7 +55,7 @@ class AbstractPointPen:
baseGlyphName: str,
transformation: Tuple[float, float, float, float, float, float],
identifier: Optional[str] = None,
**kwargs: Any
**kwargs: Any,
) -> None:
"""Add a sub glyph."""
raise NotImplementedError
@ -154,8 +154,9 @@ class BasePointToSegmentPen(AbstractPointPen):
self._flushContour(segments)
def addPoint(self, pt, segmentType=None, smooth=False, name=None,
identifier=None, **kwargs):
def addPoint(
self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs
):
if self.currentPath is None:
raise PenError("Path not begun")
self.currentPath.append((pt, segmentType, smooth, name, kwargs))
@ -388,8 +389,9 @@ class GuessSmoothPointPen(AbstractPointPen):
self._outPen.endPath()
self._points = None
def addPoint(self, pt, segmentType=None, smooth=False, name=None,
identifier=None, **kwargs):
def addPoint(
self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs
):
if self._points is None:
raise PenError("Path not begun")
if identifier is not None:
@ -464,7 +466,9 @@ class ReverseContourPointPen(AbstractPointPen):
lastSegmentType = nextSegmentType
else:
segmentType = None
pen.addPoint(pt, segmentType=segmentType, smooth=smooth, name=name, **kwargs)
pen.addPoint(
pt, segmentType=segmentType, smooth=smooth, name=name, **kwargs
)
pen.endPath()
def beginPath(self, identifier=None, **kwargs):
@ -480,7 +484,9 @@ class ReverseContourPointPen(AbstractPointPen):
self._flushContour()
self.currentContour = None
def addPoint(self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs):
def addPoint(
self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs
):
if self.currentContour is None:
raise PenError("Path not begun")
if identifier is not None:

View File

@ -5,11 +5,11 @@ __all__ = ["QtPen"]
class QtPen(BasePen):
def __init__(self, glyphSet, path=None):
BasePen.__init__(self, glyphSet)
if path is None:
from PyQt5.QtGui import QPainterPath
path = QPainterPath()
self.path = path

View File

@ -42,4 +42,3 @@ class QuartzPen(BasePen):
def _closePath(self):
CGPathCloseSubpath(self.path)

View File

@ -48,20 +48,28 @@ class RecordingPen(AbstractPen):
def __init__(self):
self.value = []
def moveTo(self, p0):
self.value.append(('moveTo', (p0,)))
self.value.append(("moveTo", (p0,)))
def lineTo(self, p1):
self.value.append(('lineTo', (p1,)))
self.value.append(("lineTo", (p1,)))
def qCurveTo(self, *points):
self.value.append(('qCurveTo', points))
self.value.append(("qCurveTo", points))
def curveTo(self, *points):
self.value.append(('curveTo', points))
self.value.append(("curveTo", points))
def closePath(self):
self.value.append(('closePath', ()))
self.value.append(("closePath", ()))
def endPath(self):
self.value.append(('endPath', ()))
self.value.append(("endPath", ()))
def addComponent(self, glyphName, transformation):
self.value.append(('addComponent', (glyphName, transformation)))
self.value.append(("addComponent", (glyphName, transformation)))
def replay(self, pen):
replayRecording(self.value, pen)
@ -90,6 +98,7 @@ class DecomposingRecordingPen(DecomposingPen, RecordingPen):
a: [('moveTo', ((0, 0),)), ('curveTo', ((1, 1), (2, 2), (3, 3))), ('closePath', ())]
b: [('moveTo', ((-1, 1),)), ('curveTo', ((0, 2), (1, 3), (2, 4))), ('closePath', ())]
"""
# raises KeyError if base glyph is not found in glyphSet
skipMissingComponents = False
@ -130,7 +139,9 @@ class RecordingPointPen(AbstractPointPen):
def endPath(self):
self.value.append(("endPath", (), {}))
def addPoint(self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs):
def addPoint(
self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs
):
if identifier is not None:
kwargs["identifier"] = identifier
self.value.append(("addPoint", (pt, segmentType, smooth, name), kwargs))
@ -152,4 +163,5 @@ if __name__ == "__main__":
pen.curveTo((50, 75), (60, 50), (50, 25))
pen.closePath()
from pprint import pprint
pprint(pen.value)

View File

@ -35,11 +35,18 @@ class ReportLabPen(BasePen):
if __name__ == "__main__":
import sys
if len(sys.argv) < 3:
print("Usage: reportLabPen.py <OTF/TTF font> <glyphname> [<image file to create>]")
print(" If no image file name is created, by default <glyphname>.png is created.")
print(
"Usage: reportLabPen.py <OTF/TTF font> <glyphname> [<image file to create>]"
)
print(
" If no image file name is created, by default <glyphname>.png is created."
)
print(" example: reportLabPen.py Arial.TTF R test.png")
print(" (The file format will be PNG, regardless of the image file name supplied)")
print(
" (The file format will be PNG, regardless of the image file name supplied)"
)
sys.exit(0)
from fontTools.ttLib import TTFont
@ -47,7 +54,7 @@ if __name__=="__main__":
path = sys.argv[1]
glyphName = sys.argv[2]
if (len(sys.argv) > 3):
if len(sys.argv) > 3:
imageFile = sys.argv[3]
else:
imageFile = "%s.png" % glyphName

View File

@ -40,16 +40,14 @@ def reversedContour(contour, outputImpliedClosingLine=False):
firstType, firstPts = contour.pop(0)
assert firstType in ("moveTo", "qCurveTo"), (
"invalid initial segment type: %r" % firstType)
"invalid initial segment type: %r" % firstType
)
firstOnCurve = firstPts[-1]
if firstType == "qCurveTo":
# special case for TrueType paths contaning only off-curve points
assert firstOnCurve is None, (
"off-curve only paths must end with 'None'")
assert not contour, (
"only one qCurveTo allowed per off-curve path")
firstPts = ((firstPts[0],) + tuple(reversed(firstPts[1:-1])) +
(None,))
assert firstOnCurve is None, "off-curve only paths must end with 'None'"
assert not contour, "only one qCurveTo allowed per off-curve path"
firstPts = (firstPts[0],) + tuple(reversed(firstPts[1:-1])) + (None,)
if not contour:
# contour contains only one segment, nothing to reverse
@ -67,8 +65,7 @@ def reversedContour(contour, outputImpliedClosingLine=False):
if outputImpliedClosingLine or firstOnCurve != lastOnCurve:
# emit an implied line between the last and first points
yield "lineTo", (lastOnCurve,)
contour[-1] = (lastType,
tuple(lastPts[:-1]) + (firstOnCurve,))
contour[-1] = (lastType, tuple(lastPts[:-1]) + (firstOnCurve,))
if len(contour) > 1:
secondType, secondPts = contour[0]
@ -84,8 +81,7 @@ def reversedContour(contour, outputImpliedClosingLine=False):
if secondType == "lineTo" and firstPts != secondPts:
del contour[0]
if contour:
contour[-1] = (lastType,
tuple(lastPts[:-1]) + secondPts)
contour[-1] = (lastType, tuple(lastPts[:-1]) + secondPts)
else:
# for open paths, the last point will become the first
yield firstType, (lastOnCurve,)
@ -94,8 +90,7 @@ def reversedContour(contour, outputImpliedClosingLine=False):
# we iterate over all segment pairs in reverse order, and yield
# each one with the off-curve points reversed (if any), and
# with the on-curve point of the following segment
for (curType, curPts), (_, nextPts) in pairwise(
contour, reverse=True):
for (curType, curPts), (_, nextPts) in pairwise(contour, reverse=True):
yield curType, tuple(reversed(curPts[:-1])) + (nextPts[-1],)
yield "closePath" if closed else "endPath", ()

View File

@ -53,8 +53,8 @@ class StatisticsPen(MomentsPen):
self.varianceX = varianceX = self.momentXX / area - meanX**2
self.varianceY = varianceY = self.momentYY / area - meanY**2
self.stddevX = stddevX = math.copysign(abs(varianceX)**.5, varianceX)
self.stddevY = stddevY = math.copysign(abs(varianceY)**.5, varianceY)
self.stddevX = stddevX = math.copysign(abs(varianceX) ** 0.5, varianceX)
self.stddevY = stddevY = math.copysign(abs(varianceY) ** 0.5, varianceY)
# Covariance(X,Y) = ( E[X.Y] - E[X]E[Y] )
self.covariance = covariance = self.momentXY / area - meanX * meanY
@ -75,28 +75,48 @@ def _test(glyphset, upem, glyphs):
from fontTools.pens.transformPen import TransformPen
from fontTools.misc.transform import Scale
print('upem', upem)
print("upem", upem)
for glyph_name in glyphs:
print()
print("glyph:", glyph_name)
glyph = glyphset[glyph_name]
pen = StatisticsPen(glyphset=glyphset)
transformer = TransformPen(pen, Scale(1./upem))
transformer = TransformPen(pen, Scale(1.0 / upem))
glyph.draw(transformer)
for item in ['area', 'momentX', 'momentY', 'momentXX', 'momentYY', 'momentXY', 'meanX', 'meanY', 'varianceX', 'varianceY', 'stddevX', 'stddevY', 'covariance', 'correlation', 'slant']:
for item in [
"area",
"momentX",
"momentY",
"momentXX",
"momentYY",
"momentXY",
"meanX",
"meanY",
"varianceX",
"varianceY",
"stddevX",
"stddevY",
"covariance",
"correlation",
"slant",
]:
print("%s: %g" % (item, getattr(pen, item)))
def main(args):
if not args:
return
filename, glyphs = args[0], args[1:]
from fontTools.ttLib import TTFont
font = TTFont(filename)
if not glyphs:
glyphs = font.getGlyphOrder()
_test(font.getGlyphSet(), font['head'].unitsPerEm, glyphs)
_test(font.getGlyphSet(), font["head"].unitsPerEm, glyphs)
if __name__ == '__main__':
if __name__ == "__main__":
import sys
main(sys.argv[1:])

View File

@ -36,6 +36,7 @@ class SVGPathPen(BasePen):
glyphset[glyphname].draw(pen)
print(tpen.getCommands())
"""
def __init__(self, glyphSet, ntos: Callable[[float], str] = str):
BasePen.__init__(self, glyphSet)
self._commands = []
@ -209,22 +210,25 @@ def main(args=None):
if args is None:
import sys
args = sys.argv[1:]
from fontTools.ttLib import TTFont
import argparse
parser = argparse.ArgumentParser(
"fonttools pens.svgPathPen", description="Generate SVG from text")
"fonttools pens.svgPathPen", description="Generate SVG from text"
)
parser.add_argument("font", metavar="font.ttf", help="Font file.")
parser.add_argument("text", metavar="text", help="Text string.")
parser.add_argument(
"font", metavar="font.ttf", help="Font file.")
parser.add_argument(
"text", metavar="text", help="Text string.")
parser.add_argument(
"--variations", metavar="AXIS=LOC", default='',
"--variations",
metavar="AXIS=LOC",
default="",
help="List of space separated locations. A location consist in "
"the name of a variation axis, followed by '=' and a number. E.g.: "
"wght=700 wdth=80. The default is the location of the base master.")
"wght=700 wdth=80. The default is the location of the base master.",
)
options = parser.parse_args(args)
@ -233,18 +237,18 @@ def main(args=None):
location = {}
for tag_v in options.variations.split():
fields = tag_v.split('=')
fields = tag_v.split("=")
tag = fields[0].strip()
v = int(fields[1])
location[tag] = v
hhea = font['hhea']
hhea = font["hhea"]
ascent, descent = hhea.ascent, hhea.descent
glyphset = font.getGlyphSet(location=location)
cmap = font['cmap'].getBestCmap()
cmap = font["cmap"].getBestCmap()
s = ''
s = ""
width = 0
for u in text:
g = cmap[ord(u)]
@ -254,20 +258,29 @@ def main(args=None):
glyph.draw(pen)
commands = pen.getCommands()
s += '<g transform="translate(%d %d) scale(1 -1)"><path d="%s"/></g>\n' % (width, ascent, commands)
s += '<g transform="translate(%d %d) scale(1 -1)"><path d="%s"/></g>\n' % (
width,
ascent,
commands,
)
width += glyph.width
print('<?xml version="1.0" encoding="UTF-8"?>')
print('<svg width="%d" height="%d" xmlns="http://www.w3.org/2000/svg">' % (width, ascent-descent))
print(s, end='')
print('</svg>')
print(
'<svg width="%d" height="%d" xmlns="http://www.w3.org/2000/svg">'
% (width, ascent - descent)
)
print(s, end="")
print("</svg>")
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
import doctest
sys.exit(doctest.testmod().failed)
sys.exit(main())

View File

@ -32,14 +32,14 @@ class T2CharStringPen(BasePen):
return [pt[0] - p0[0], pt[1] - p0[1]]
def _moveTo(self, pt):
self._commands.append(('rmoveto', self._p(pt)))
self._commands.append(("rmoveto", self._p(pt)))
def _lineTo(self, pt):
self._commands.append(('rlineto', self._p(pt)))
self._commands.append(("rlineto", self._p(pt)))
def _curveToOne(self, pt1, pt2, pt3):
_p = self._p
self._commands.append(('rrcurveto', _p(pt1)+_p(pt2)+_p(pt3)))
self._commands.append(("rrcurveto", _p(pt1) + _p(pt2) + _p(pt3)))
def _closePath(self):
pass
@ -51,15 +51,18 @@ class T2CharStringPen(BasePen):
commands = self._commands
if optimize:
maxstack = 48 if not self._CFF2 else 513
commands = specializeCommands(commands,
generalizeFirst=False,
maxstack=maxstack)
commands = specializeCommands(
commands, generalizeFirst=False, maxstack=maxstack
)
program = commandsToProgram(commands)
if self._width is not None:
assert not self._CFF2, "CFF2 does not allow encoding glyph width in CharString."
assert (
not self._CFF2
), "CFF2 does not allow encoding glyph width in CharString."
program.insert(0, otRound(self._width))
if not self._CFF2:
program.append('endchar')
program.append("endchar")
charString = T2CharString(
program=program, private=private, globalSubrs=globalSubrs)
program=program, private=private, globalSubrs=globalSubrs
)
return charString

View File

@ -14,24 +14,31 @@ class TeePen(AbstractPen):
if len(pens) == 1:
pens = pens[0]
self.pens = pens
def moveTo(self, p0):
for pen in self.pens:
pen.moveTo(p0)
def lineTo(self, p1):
for pen in self.pens:
pen.lineTo(p1)
def qCurveTo(self, *points):
for pen in self.pens:
pen.qCurveTo(*points)
def curveTo(self, *points):
for pen in self.pens:
pen.curveTo(*points)
def closePath(self):
for pen in self.pens:
pen.closePath()
def endPath(self):
for pen in self.pens:
pen.endPath()
def addComponent(self, glyphName, transformation):
for pen in self.pens:
pen.addComponent(glyphName, transformation)
@ -39,6 +46,7 @@ class TeePen(AbstractPen):
if __name__ == "__main__":
from fontTools.pens.basePen import _TestPen
pen = TeePen(_TestPen(), _TestPen())
pen.moveTo((0, 0))
pen.lineTo((0, 100))

View File

@ -18,6 +18,7 @@ class TransformPen(FilterPen):
super(TransformPen, self).__init__(outPen)
if not hasattr(transformation, "transformPoint"):
from fontTools.misc.transform import Transform
transformation = Transform(*transformation)
self._transformation = transformation
self._transformPoint = transformation.transformPoint
@ -85,6 +86,7 @@ class TransformPointPen(FilterPointPen):
super().__init__(outPointPen)
if not hasattr(transformation, "transformPoint"):
from fontTools.misc.transform import Transform
transformation = Transform(*transformation)
self._transformation = transformation
self._transformPoint = transformation.transformPoint
@ -101,6 +103,7 @@ class TransformPointPen(FilterPointPen):
if __name__ == "__main__":
from fontTools.pens.basePen import _TestPen
pen = TransformPen(_TestPen(None), (2, 0, 0.5, 2, -10, 0))
pen.moveTo((0, 0))
pen.lineTo((0, 100))

View File

@ -5,11 +5,11 @@ __all__ = ["WxPen"]
class WxPen(BasePen):
def __init__(self, glyphSet, path=None):
BasePen.__init__(self, glyphSet)
if path is None:
import wx
path = wx.GraphicsRenderer.GetDefaultRenderer().CreatePath()
self.path = path

File diff suppressed because it is too large Load Diff

View File

@ -2,5 +2,5 @@ import sys
from fontTools.subset import main
if __name__ == '__main__':
if __name__ == "__main__":
sys.exit(main())

View File

@ -7,17 +7,15 @@ from fontTools.subset.util import _add_method, _uniq_sort
class _ClosureGlyphsT2Decompiler(psCharStrings.SimpleT2Decompiler):
def __init__(self, components, localSubrs, globalSubrs):
psCharStrings.SimpleT2Decompiler.__init__(self,
localSubrs,
globalSubrs)
psCharStrings.SimpleT2Decompiler.__init__(self, localSubrs, globalSubrs)
self.components = components
def op_endchar(self, index):
args = self.popall()
if len(args) >= 4:
from fontTools.encodings.StandardEncoding import StandardEncoding
# endchar can do seac accent bulding; The T2 spec says it's deprecated,
# but recent software that shall remain nameless does output it.
adx, ady, bchar, achar = args[-4:]
@ -26,7 +24,8 @@ class _ClosureGlyphsT2Decompiler(psCharStrings.SimpleT2Decompiler):
self.components.add(baseGlyph)
self.components.add(accentGlyph)
@_add_method(ttLib.getTableClass('CFF '))
@_add_method(ttLib.getTableClass("CFF "))
def closure_glyphs(self, s):
cff = self.cff
assert len(cff) == 1
@ -48,13 +47,14 @@ def closure_glyphs(self, s):
s.glyphs.update(components)
decompose = components
def _empty_charstring(font, glyphName, isCFF2, ignoreWidth=False):
c, fdSelectIndex = font.CharStrings.getItemAndSelector(glyphName)
if isCFF2 or ignoreWidth:
# CFF2 charstrings have no widths nor 'endchar' operators
c.setProgram([] if isCFF2 else ['endchar'])
c.setProgram([] if isCFF2 else ["endchar"])
else:
if hasattr(font, 'FDArray') and font.FDArray is not None:
if hasattr(font, "FDArray") and font.FDArray is not None:
private = font.FDArray[fdSelectIndex].Private
else:
private = font.Private
@ -63,11 +63,12 @@ def _empty_charstring(font, glyphName, isCFF2, ignoreWidth=False):
pen = NullPen()
c.draw(pen) # this will set the charstring's width
if c.width != dfltWdX:
c.program = [c.width - nmnlWdX, 'endchar']
c.program = [c.width - nmnlWdX, "endchar"]
else:
c.program = ['endchar']
c.program = ["endchar"]
@_add_method(ttLib.getTableClass('CFF '))
@_add_method(ttLib.getTableClass("CFF "))
def prune_pre_subset(self, font, options):
cff = self.cff
# CFF table must have one font only
@ -87,7 +88,8 @@ def prune_pre_subset(self, font, options):
return True # bool(cff.fontNames)
@_add_method(ttLib.getTableClass('CFF '))
@_add_method(ttLib.getTableClass("CFF "))
def subset_glyphs(self, s):
cff = self.cff
for fontname in cff.keys():
@ -98,7 +100,8 @@ def subset_glyphs(self, s):
# Load all glyphs
for g in font.charset:
if g not in glyphs: continue
if g not in glyphs:
continue
c, _ = cs.getItemAndSelector(g)
if cs.charStringsAreIndexed:
@ -117,31 +120,31 @@ def subset_glyphs(self, s):
newCharStrings[g] = indicesIdx
cs.charStrings = newCharStrings
else:
cs.charStrings = {g:v
for g,v in cs.charStrings.items()
if g in glyphs}
cs.charStrings = {g: v for g, v in cs.charStrings.items() if g in glyphs}
font.charset = [g for g in font.charset if g in glyphs]
font.numGlyphs = len(font.charset)
if s.options.retain_gids:
isCFF2 = cff.major > 1
for g in s.glyphs_emptied:
_empty_charstring(font, g, isCFF2=isCFF2, ignoreWidth=True)
return True # any(cff[fontname].numGlyphs for fontname in cff.keys())
@_add_method(psCharStrings.T2CharString)
def subset_subroutines(self, subrs, gsubrs):
p = self.program
for i in range(1, len(p)):
if p[i] == 'callsubr':
if p[i] == "callsubr":
assert isinstance(p[i - 1], int)
p[i - 1] = subrs._used.index(p[i - 1] + subrs._old_bias) - subrs._new_bias
elif p[i] == 'callgsubr':
elif p[i] == "callgsubr":
assert isinstance(p[i - 1], int)
p[i-1] = gsubrs._used.index(p[i-1] + gsubrs._old_bias) - gsubrs._new_bias
p[i - 1] = (
gsubrs._used.index(p[i - 1] + gsubrs._old_bias) - gsubrs._new_bias
)
@_add_method(psCharStrings.T2CharString)
def drop_hints(self):
@ -157,19 +160,21 @@ def drop_hints(self):
self.program = self.program[hints.last_hint :]
if not self.program:
# TODO CFF2 no need for endchar.
self.program.append('endchar')
if hasattr(self, 'width'):
self.program.append("endchar")
if hasattr(self, "width"):
# Insert width back if needed
if self.width != self.private.defaultWidthX:
# For CFF2 charstrings, this should never happen
assert self.private.defaultWidthX is not None, "CFF2 CharStrings must not have an initial width value"
assert (
self.private.defaultWidthX is not None
), "CFF2 CharStrings must not have an initial width value"
self.program.insert(0, self.width - self.private.nominalWidthX)
if hints.has_hintmask:
i = 0
p = self.program
while i < len(p):
if p[i] in ['hintmask', 'cntrmask']:
if p[i] in ["hintmask", "cntrmask"]:
assert i + 1 <= len(p)
del p[i : i + 2]
continue
@ -179,13 +184,12 @@ def drop_hints(self):
del self._hints
class _MarkingT2Decompiler(psCharStrings.SimpleT2Decompiler):
class _MarkingT2Decompiler(psCharStrings.SimpleT2Decompiler):
def __init__(self, localSubrs, globalSubrs, private):
psCharStrings.SimpleT2Decompiler.__init__(self,
localSubrs,
globalSubrs,
private)
psCharStrings.SimpleT2Decompiler.__init__(
self, localSubrs, globalSubrs, private
)
for subrs in [localSubrs, globalSubrs]:
if subrs and not hasattr(subrs, "_used"):
subrs._used = set()
@ -198,8 +202,8 @@ class _MarkingT2Decompiler(psCharStrings.SimpleT2Decompiler):
self.globalSubrs._used.add(self.operandStack[-1] + self.globalBias)
psCharStrings.SimpleT2Decompiler.op_callgsubr(self, index)
class _DehintingT2Decompiler(psCharStrings.T2WidthExtractor):
class _DehintingT2Decompiler(psCharStrings.T2WidthExtractor):
class Hints(object):
def __init__(self):
# Whether calling this charstring produces any hint stems
@ -223,16 +227,20 @@ class _DehintingT2Decompiler(psCharStrings.T2WidthExtractor):
self.has_hintmask = False
# List of indices of calls to empty subroutines to remove.
self.deletions = []
pass
def __init__(self, css, localSubrs, globalSubrs, nominalWidthX, defaultWidthX, private=None):
def __init__(
self, css, localSubrs, globalSubrs, nominalWidthX, defaultWidthX, private=None
):
self._css = css
psCharStrings.T2WidthExtractor.__init__(
self, localSubrs, globalSubrs, nominalWidthX, defaultWidthX)
self, localSubrs, globalSubrs, nominalWidthX, defaultWidthX
)
self.private = private
def execute(self, charString):
old_hints = charString._hints if hasattr(charString, '_hints') else None
old_hints = charString._hints if hasattr(charString, "_hints") else None
charString._hints = self.Hints()
psCharStrings.T2WidthExtractor.execute(self, charString)
@ -268,19 +276,24 @@ class _DehintingT2Decompiler(psCharStrings.T2WidthExtractor):
def op_hstem(self, index):
psCharStrings.T2WidthExtractor.op_hstem(self, index)
self.processHint(index)
def op_vstem(self, index):
psCharStrings.T2WidthExtractor.op_vstem(self, index)
self.processHint(index)
def op_hstemhm(self, index):
psCharStrings.T2WidthExtractor.op_hstemhm(self, index)
self.processHint(index)
def op_vstemhm(self, index):
psCharStrings.T2WidthExtractor.op_vstemhm(self, index)
self.processHint(index)
def op_hintmask(self, index):
rv = psCharStrings.T2WidthExtractor.op_hintmask(self, index)
self.processHintmask(index)
return rv
def op_cntrmask(self, index):
rv = psCharStrings.T2WidthExtractor.op_cntrmask(self, index)
self.processHintmask(index)
@ -340,7 +353,7 @@ class _DehintingT2Decompiler(psCharStrings.T2WidthExtractor):
hints.status = max(hints.status, subr_hints.status)
@_add_method(ttLib.getTableClass('CFF '))
@_add_method(ttLib.getTableClass("CFF "))
def prune_post_subset(self, ttfFont, options):
cff = self.cff
for fontname in cff.keys():
@ -369,19 +382,21 @@ def prune_post_subset(self, ttfFont, options):
def _delete_empty_subrs(private_dict):
if hasattr(private_dict, 'Subrs') and not private_dict.Subrs:
if 'Subrs' in private_dict.rawDict:
del private_dict.rawDict['Subrs']
if hasattr(private_dict, "Subrs") and not private_dict.Subrs:
if "Subrs" in private_dict.rawDict:
del private_dict.rawDict["Subrs"]
del private_dict.Subrs
@deprecateFunction("use 'CFFFontSet.desubroutinize()' instead", category=DeprecationWarning)
@_add_method(ttLib.getTableClass('CFF '))
@deprecateFunction(
"use 'CFFFontSet.desubroutinize()' instead", category=DeprecationWarning
)
@_add_method(ttLib.getTableClass("CFF "))
def desubroutinize(self):
self.cff.desubroutinize()
@_add_method(ttLib.getTableClass('CFF '))
@_add_method(ttLib.getTableClass("CFF "))
def remove_hints(self):
cff = self.cff
for fontname in cff.keys():
@ -407,10 +422,14 @@ def remove_hints(self):
c, _ = cs.getItemAndSelector(g)
c.decompile()
subrs = getattr(c.private, "Subrs", [])
decompiler = _DehintingT2Decompiler(css, subrs, c.globalSubrs,
decompiler = _DehintingT2Decompiler(
css,
subrs,
c.globalSubrs,
c.private.nominalWidthX,
c.private.defaultWidthX,
c.private)
c.private,
)
decompiler.execute(c)
c.width = decompiler.width
for charstring in css:
@ -419,22 +438,33 @@ def remove_hints(self):
# Drop font-wide hinting values
all_privs = []
if hasattr(font, 'FDArray'):
if hasattr(font, "FDArray"):
all_privs.extend(fd.Private for fd in font.FDArray)
else:
all_privs.append(font.Private)
for priv in all_privs:
for k in ['BlueValues', 'OtherBlues',
'FamilyBlues', 'FamilyOtherBlues',
'BlueScale', 'BlueShift', 'BlueFuzz',
'StemSnapH', 'StemSnapV', 'StdHW', 'StdVW',
'ForceBold', 'LanguageGroup', 'ExpansionFactor']:
for k in [
"BlueValues",
"OtherBlues",
"FamilyBlues",
"FamilyOtherBlues",
"BlueScale",
"BlueShift",
"BlueFuzz",
"StemSnapH",
"StemSnapV",
"StdHW",
"StdVW",
"ForceBold",
"LanguageGroup",
"ExpansionFactor",
]:
if hasattr(priv, k):
setattr(priv, k, None)
self.remove_unused_subroutines()
@_add_method(ttLib.getTableClass('CFF '))
@_add_method(ttLib.getTableClass("CFF "))
def remove_unused_subroutines(self):
cff = self.cff
for fontname in cff.keys():
@ -450,16 +480,20 @@ def remove_unused_subroutines(self):
decompiler.execute(c)
all_subrs = [font.GlobalSubrs]
if hasattr(font, 'FDArray'):
all_subrs.extend(fd.Private.Subrs for fd in font.FDArray if hasattr(fd.Private, 'Subrs') and fd.Private.Subrs)
elif hasattr(font.Private, 'Subrs') and font.Private.Subrs:
if hasattr(font, "FDArray"):
all_subrs.extend(
fd.Private.Subrs
for fd in font.FDArray
if hasattr(fd.Private, "Subrs") and fd.Private.Subrs
)
elif hasattr(font.Private, "Subrs") and font.Private.Subrs:
all_subrs.append(font.Private.Subrs)
subrs = set(subrs) # Remove duplicates
# Prepare
for subrs in all_subrs:
if not hasattr(subrs, '_used'):
if not hasattr(subrs, "_used"):
subrs._used = set()
subrs._used = _uniq_sort(subrs._used)
subrs._old_bias = psCharStrings.calcSubrBias(subrs)
@ -474,7 +508,7 @@ def remove_unused_subroutines(self):
# Renumber subroutines themselves
for subrs in all_subrs:
if subrs == font.GlobalSubrs:
if not hasattr(font, 'FDArray') and hasattr(font.Private, 'Subrs'):
if not hasattr(font, "FDArray") and hasattr(font.Private, "Subrs"):
local_subrs = font.Private.Subrs
else:
local_subrs = []
@ -482,16 +516,16 @@ def remove_unused_subroutines(self):
local_subrs = subrs
subrs.items = [subrs.items[i] for i in subrs._used]
if hasattr(subrs, 'file'):
if hasattr(subrs, "file"):
del subrs.file
if hasattr(subrs, 'offsets'):
if hasattr(subrs, "offsets"):
del subrs.offsets
for subr in subrs.items:
subr.subset_subroutines(local_subrs, font.GlobalSubrs)
# Delete local SubrsIndex if empty
if hasattr(font, 'FDArray'):
if hasattr(font, "FDArray"):
for fd in font.FDArray:
_delete_empty_subrs(fd.Private)
else:

View File

@ -19,7 +19,6 @@ def _map_point(matrix, pt):
class EllipticalArc(object):
def __init__(self, current_point, rx, ry, rotation, large, sweep, target_point):
self.current_point = current_point
self.rx = rx

View File

@ -11,9 +11,9 @@ from .arc import EllipticalArc
import re
COMMANDS = set('MmZzLlHhVvCcSsQqTtAa')
COMMANDS = set("MmZzLlHhVvCcSsQqTtAa")
ARC_COMMANDS = set("Aa")
UPPERCASE = set('MZLHVCSQTA')
UPPERCASE = set("MZLHVCSQTA")
COMMAND_RE = re.compile("([MmZzLlHhVvCcSsQqTtAa])")
@ -136,11 +136,13 @@ def parse_path(pathdef, pen, current_pos=(0, 0), arc_class=EllipticalArc):
# If this element starts with numbers, it is an implicit command
# and we don't change the command. Check that it's allowed:
if command is None:
raise ValueError("Unallowed implicit command in %s, position %s" % (
pathdef, len(pathdef.split()) - len(elements)))
raise ValueError(
"Unallowed implicit command in %s, position %s"
% (pathdef, len(pathdef.split()) - len(elements))
)
last_command = command # Used by S and T
if command == 'M':
if command == "M":
# Moveto command.
x = elements.pop()
y = elements.pop()
@ -164,9 +166,9 @@ def parse_path(pathdef, pen, current_pos=(0, 0), arc_class=EllipticalArc):
# Implicit moveto commands are treated as lineto commands.
# So we set command to lineto here, in case there are
# further implicit commands after this moveto.
command = 'L'
command = "L"
elif command == 'Z':
elif command == "Z":
# Close path
if current_pos != start_pos:
pen.lineTo((start_pos.real, start_pos.imag))
@ -175,7 +177,7 @@ def parse_path(pathdef, pen, current_pos=(0, 0), arc_class=EllipticalArc):
start_pos = None
command = None # You can't have implicit commands after closing.
elif command == 'L':
elif command == "L":
x = elements.pop()
y = elements.pop()
pos = float(x) + float(y) * 1j
@ -184,7 +186,7 @@ def parse_path(pathdef, pen, current_pos=(0, 0), arc_class=EllipticalArc):
pen.lineTo((pos.real, pos.imag))
current_pos = pos
elif command == 'H':
elif command == "H":
x = elements.pop()
pos = float(x) + current_pos.imag * 1j
if not absolute:
@ -192,7 +194,7 @@ def parse_path(pathdef, pen, current_pos=(0, 0), arc_class=EllipticalArc):
pen.lineTo((pos.real, pos.imag))
current_pos = pos
elif command == 'V':
elif command == "V":
y = elements.pop()
pos = current_pos.real + float(y) * 1j
if not absolute:
@ -200,7 +202,7 @@ def parse_path(pathdef, pen, current_pos=(0, 0), arc_class=EllipticalArc):
pen.lineTo((pos.real, pos.imag))
current_pos = pos
elif command == 'C':
elif command == "C":
control1 = float(elements.pop()) + float(elements.pop()) * 1j
control2 = float(elements.pop()) + float(elements.pop()) * 1j
end = float(elements.pop()) + float(elements.pop()) * 1j
@ -210,17 +212,19 @@ def parse_path(pathdef, pen, current_pos=(0, 0), arc_class=EllipticalArc):
control2 += current_pos
end += current_pos
pen.curveTo((control1.real, control1.imag),
pen.curveTo(
(control1.real, control1.imag),
(control2.real, control2.imag),
(end.real, end.imag))
(end.real, end.imag),
)
current_pos = end
last_control = control2
elif command == 'S':
elif command == "S":
# Smooth curve. First control point is the "reflection" of
# the second control point in the previous path.
if last_command not in 'CS':
if last_command not in "CS":
# If there is no previous command or if the previous command
# was not an C, c, S or s, assume the first control point is
# coincident with the current point.
@ -238,13 +242,15 @@ def parse_path(pathdef, pen, current_pos=(0, 0), arc_class=EllipticalArc):
control2 += current_pos
end += current_pos
pen.curveTo((control1.real, control1.imag),
pen.curveTo(
(control1.real, control1.imag),
(control2.real, control2.imag),
(end.real, end.imag))
(end.real, end.imag),
)
current_pos = end
last_control = control2
elif command == 'Q':
elif command == "Q":
control = float(elements.pop()) + float(elements.pop()) * 1j
end = float(elements.pop()) + float(elements.pop()) * 1j
@ -256,11 +262,11 @@ def parse_path(pathdef, pen, current_pos=(0, 0), arc_class=EllipticalArc):
current_pos = end
last_control = control
elif command == 'T':
elif command == "T":
# Smooth curve. Control point is the "reflection" of
# the second control point in the previous path.
if last_command not in 'QT':
if last_command not in "QT":
# If there is no previous command or if the previous command
# was not an Q, q, T or t, assume the first control point is
# coincident with the current point.
@ -280,7 +286,7 @@ def parse_path(pathdef, pen, current_pos=(0, 0), arc_class=EllipticalArc):
current_pos = end
last_control = control
elif command == 'A':
elif command == "A":
rx = abs(float(elements.pop()))
ry = abs(float(elements.pop()))
rotation = float(elements.pop())

View File

@ -5,18 +5,18 @@ def _prefer_non_zero(*args):
for arg in args:
if arg != 0:
return arg
return 0.
return 0.0
def _ntos(n):
# %f likes to add unnecessary 0's, %g isn't consistent about # decimals
return ('%.3f' % n).rstrip('0').rstrip('.')
return ("%.3f" % n).rstrip("0").rstrip(".")
def _strip_xml_ns(tag):
# ElementTree API doesn't provide a way to ignore XML namespaces in tags
# so we here strip them ourselves: cf. https://bugs.python.org/issue18304
return tag.split('}', 1)[1] if '}' in tag else tag
return tag.split("}", 1)[1] if "}" in tag else tag
def _transform(raw_value):
@ -24,12 +24,12 @@ def _transform(raw_value):
# No other transform functions are supported at the moment.
# https://developer.mozilla.org/en-US/docs/Web/SVG/Attribute/transform
# start simple: if you aren't exactly matrix(...) then no love
match = re.match(r'matrix\((.*)\)', raw_value)
match = re.match(r"matrix\((.*)\)", raw_value)
if not match:
raise NotImplementedError
matrix = tuple(float(p) for p in re.split(r'\s+|,', match.group(1)))
matrix = tuple(float(p) for p in re.split(r"\s+|,", match.group(1)))
if len(matrix) != 6:
raise ValueError('wrong # of terms in %s' % raw_value)
raise ValueError("wrong # of terms in %s" % raw_value)
return matrix
@ -38,81 +38,83 @@ class PathBuilder(object):
self.paths = []
self.transforms = []
def _start_path(self, initial_path=''):
def _start_path(self, initial_path=""):
self.paths.append(initial_path)
self.transforms.append(None)
def _end_path(self):
self._add('z')
self._add("z")
def _add(self, path_snippet):
path = self.paths[-1]
if path:
path += ' ' + path_snippet
path += " " + path_snippet
else:
path = path_snippet
self.paths[-1] = path
def _move(self, c, x, y):
self._add('%s%s,%s' % (c, _ntos(x), _ntos(y)))
self._add("%s%s,%s" % (c, _ntos(x), _ntos(y)))
def M(self, x, y):
self._move('M', x, y)
self._move("M", x, y)
def m(self, x, y):
self._move('m', x, y)
self._move("m", x, y)
def _arc(self, c, rx, ry, x, y, large_arc):
self._add('%s%s,%s 0 %d 1 %s,%s' % (c, _ntos(rx), _ntos(ry), large_arc,
_ntos(x), _ntos(y)))
self._add(
"%s%s,%s 0 %d 1 %s,%s"
% (c, _ntos(rx), _ntos(ry), large_arc, _ntos(x), _ntos(y))
)
def A(self, rx, ry, x, y, large_arc=0):
self._arc('A', rx, ry, x, y, large_arc)
self._arc("A", rx, ry, x, y, large_arc)
def a(self, rx, ry, x, y, large_arc=0):
self._arc('a', rx, ry, x, y, large_arc)
self._arc("a", rx, ry, x, y, large_arc)
def _vhline(self, c, x):
self._add('%s%s' % (c, _ntos(x)))
self._add("%s%s" % (c, _ntos(x)))
def H(self, x):
self._vhline('H', x)
self._vhline("H", x)
def h(self, x):
self._vhline('h', x)
self._vhline("h", x)
def V(self, y):
self._vhline('V', y)
self._vhline("V", y)
def v(self, y):
self._vhline('v', y)
self._vhline("v", y)
def _line(self, c, x, y):
self._add('%s%s,%s' % (c, _ntos(x), _ntos(y)))
self._add("%s%s,%s" % (c, _ntos(x), _ntos(y)))
def L(self, x, y):
self._line('L', x, y)
self._line("L", x, y)
def l(self, x, y):
self._line('l', x, y)
self._line("l", x, y)
def _parse_line(self, line):
x1 = float(line.attrib.get('x1', 0))
y1 = float(line.attrib.get('y1', 0))
x2 = float(line.attrib.get('x2', 0))
y2 = float(line.attrib.get('y2', 0))
x1 = float(line.attrib.get("x1", 0))
y1 = float(line.attrib.get("y1", 0))
x2 = float(line.attrib.get("x2", 0))
y2 = float(line.attrib.get("y2", 0))
self._start_path()
self.M(x1, y1)
self.L(x2, y2)
def _parse_rect(self, rect):
x = float(rect.attrib.get('x', 0))
y = float(rect.attrib.get('y', 0))
w = float(rect.attrib.get('width'))
h = float(rect.attrib.get('height'))
rx = float(rect.attrib.get('rx', 0))
ry = float(rect.attrib.get('ry', 0))
x = float(rect.attrib.get("x", 0))
y = float(rect.attrib.get("y", 0))
w = float(rect.attrib.get("width"))
h = float(rect.attrib.get("height"))
rx = float(rect.attrib.get("rx", 0))
ry = float(rect.attrib.get("ry", 0))
rx = _prefer_non_zero(rx, ry)
ry = _prefer_non_zero(ry, rx)
@ -135,22 +137,22 @@ class PathBuilder(object):
self._end_path()
def _parse_path(self, path):
if 'd' in path.attrib:
self._start_path(initial_path=path.attrib['d'])
if "d" in path.attrib:
self._start_path(initial_path=path.attrib["d"])
def _parse_polygon(self, poly):
if 'points' in poly.attrib:
self._start_path('M' + poly.attrib['points'])
if "points" in poly.attrib:
self._start_path("M" + poly.attrib["points"])
self._end_path()
def _parse_polyline(self, poly):
if 'points' in poly.attrib:
self._start_path('M' + poly.attrib['points'])
if "points" in poly.attrib:
self._start_path("M" + poly.attrib["points"])
def _parse_circle(self, circle):
cx = float(circle.attrib.get('cx', 0))
cy = float(circle.attrib.get('cy', 0))
r = float(circle.attrib.get('r'))
cx = float(circle.attrib.get("cx", 0))
cy = float(circle.attrib.get("cy", 0))
r = float(circle.attrib.get("r"))
# arc doesn't seem to like being a complete shape, draw two halves
self._start_path()
@ -159,10 +161,10 @@ class PathBuilder(object):
self.A(r, r, cx - r, cy, large_arc=1)
def _parse_ellipse(self, ellipse):
cx = float(ellipse.attrib.get('cx', 0))
cy = float(ellipse.attrib.get('cy', 0))
rx = float(ellipse.attrib.get('rx'))
ry = float(ellipse.attrib.get('ry'))
cx = float(ellipse.attrib.get("cx", 0))
cy = float(ellipse.attrib.get("cy", 0))
rx = float(ellipse.attrib.get("rx"))
ry = float(ellipse.attrib.get("ry"))
# arc doesn't seem to like being a complete shape, draw two halves
self._start_path()
@ -172,10 +174,10 @@ class PathBuilder(object):
def add_path_from_element(self, el):
tag = _strip_xml_ns(el.tag)
parse_fn = getattr(self, '_parse_%s' % tag.lower(), None)
parse_fn = getattr(self, "_parse_%s" % tag.lower(), None)
if not callable(parse_fn):
return False
parse_fn(el)
if 'transform' in el.attrib:
self.transforms[-1] = _transform(el.attrib['transform'])
if "transform" in el.attrib:
self.transforms[-1] = _transform(el.attrib["transform"])
return True

View File

@ -19,7 +19,11 @@ import fontTools
from fontTools.misc import eexec
from fontTools.misc.macCreatorType import getMacCreatorAndType
from fontTools.misc.textTools import bytechr, byteord, bytesjoin, tobytes
from fontTools.misc.psOperators import _type1_pre_eexec_order, _type1_fontinfo_order, _type1_post_eexec_order
from fontTools.misc.psOperators import (
_type1_pre_eexec_order,
_type1_fontinfo_order,
_type1_post_eexec_order,
)
from fontTools.encodings.StandardEncoding import StandardEncoding
import os
import re
@ -40,7 +44,8 @@ else:
haveMacSupport = 1
class T1Error(Exception): pass
class T1Error(Exception):
pass
class T1Font(object):
@ -91,6 +96,7 @@ class T1Font(object):
def parse(self):
from fontTools.misc import psLib
from fontTools.misc import psCharStrings
self.font = psLib.suckfont(self.data, self.encoding)
charStrings = self.font["CharStrings"]
lenIV = self.font["Private"].get("lenIV", 4)
@ -98,8 +104,9 @@ class T1Font(object):
subrs = self.font["Private"]["Subrs"]
for glyphName, charString in charStrings.items():
charString, R = eexec.decrypt(charString, 4330)
charStrings[glyphName] = psCharStrings.T1CharString(charString[lenIV:],
subrs=subrs)
charStrings[glyphName] = psCharStrings.T1CharString(
charString[lenIV:], subrs=subrs
)
for i in range(len(subrs)):
charString, R = eexec.decrypt(subrs[i], 4330)
subrs[i] = psCharStrings.T1CharString(charString[lenIV:], subrs=subrs)
@ -111,9 +118,13 @@ class T1Font(object):
eexec_began = False
eexec_dict = {}
lines = []
lines.extend([self._tobytes(f"%!FontType1-1.1: {sf['FontName']}"),
lines.extend(
[
self._tobytes(f"%!FontType1-1.1: {sf['FontName']}"),
self._tobytes(f"%t1Font: ({fontTools.version})"),
self._tobytes(f"%%BeginResource: font {sf['FontName']}")])
self._tobytes(f"%%BeginResource: font {sf['FontName']}"),
]
)
# follow t1write.c:writeRegNameKeyedFont
size = 3 # Headroom for new key addition
size += 1 # FontMatrix is always counted
@ -149,9 +160,7 @@ class T1Font(object):
for _ in range(8):
lines.append(self._tobytes("0" * 64))
lines.extend([b"cleartomark",
b"%%EndResource",
b"%%EOF"])
lines.extend([b"cleartomark", b"%%EndResource", b"%%EOF"])
data = bytesjoin(lines, "\n")
return data
@ -179,35 +188,65 @@ class T1Font(object):
elif not NP_key and subvalue == PD_value:
NP_key = subkey
if subkey == 'OtherSubrs':
if subkey == "OtherSubrs":
# XXX: assert that no flex hint is used
lines.append(self._tobytes(hintothers))
elif subkey == "Subrs":
# XXX: standard Subrs only
lines.append(b"/Subrs 5 array")
for i, subr_bin in enumerate(std_subrs):
encrypted_subr, R = eexec.encrypt(bytesjoin([char_IV, subr_bin]), 4330)
lines.append(bytesjoin([self._tobytes(f"dup {i} {len(encrypted_subr)} {RD_key} "), encrypted_subr, self._tobytes(f" {NP_key}")]))
lines.append(b'def')
encrypted_subr, R = eexec.encrypt(
bytesjoin([char_IV, subr_bin]), 4330
)
lines.append(
bytesjoin(
[
self._tobytes(
f"dup {i} {len(encrypted_subr)} {RD_key} "
),
encrypted_subr,
self._tobytes(f" {NP_key}"),
]
)
)
lines.append(b"def")
lines.append(b"put")
else:
lines.extend(self._make_lines(subkey, subvalue))
elif key == "CharStrings":
lines.append(b"dup /CharStrings")
lines.append(self._tobytes(f"{len(eexec_dict['CharStrings'])} dict dup begin"))
lines.append(
self._tobytes(f"{len(eexec_dict['CharStrings'])} dict dup begin")
)
for glyph_name, char_bin in eexec_dict["CharStrings"].items():
char_bin.compile()
encrypted_char, R = eexec.encrypt(bytesjoin([char_IV, char_bin.bytecode]), 4330)
lines.append(bytesjoin([self._tobytes(f"/{glyph_name} {len(encrypted_char)} {RD_key} "), encrypted_char, self._tobytes(f" {ND_key}")]))
encrypted_char, R = eexec.encrypt(
bytesjoin([char_IV, char_bin.bytecode]), 4330
)
lines.append(
bytesjoin(
[
self._tobytes(
f"/{glyph_name} {len(encrypted_char)} {RD_key} "
),
encrypted_char,
self._tobytes(f" {ND_key}"),
]
)
)
lines.append(b"end put")
else:
lines.extend(self._make_lines(key, value))
lines.extend([b"end",
lines.extend(
[
b"end",
b"dup /FontName get exch definefont pop",
b"mark",
b"currentfile closefile\n"])
b"currentfile closefile\n",
]
)
eexec_portion = bytesjoin(lines, "\n")
encrypted_eexec, R = eexec.encrypt(bytesjoin([eexec_IV, eexec_portion]), 55665)
@ -250,19 +289,21 @@ class T1Font(object):
# low level T1 data read and write functions
def read(path, onlyHeader=False):
"""reads any Type 1 font file, returns raw data"""
_, ext = os.path.splitext(path)
ext = ext.lower()
creator, typ = getMacCreatorAndType(path)
if typ == 'LWFN':
return readLWFN(path, onlyHeader), 'LWFN'
if ext == '.pfb':
return readPFB(path, onlyHeader), 'PFB'
if typ == "LWFN":
return readLWFN(path, onlyHeader), "LWFN"
if ext == ".pfb":
return readPFB(path, onlyHeader), "PFB"
else:
return readOther(path), 'OTHER'
return readOther(path), "OTHER"
def write(path, data, kind='OTHER', dohex=False):
def write(path, data, kind="OTHER", dohex=False):
assertType1(data)
kind = kind.upper()
try:
@ -271,9 +312,9 @@ def write(path, data, kind='OTHER', dohex=False):
pass
err = 1
try:
if kind == 'LWFN':
if kind == "LWFN":
writeLWFN(path, data)
elif kind == 'PFB':
elif kind == "PFB":
writePFB(path, data)
else:
writeOther(path, data, dohex)
@ -295,13 +336,14 @@ HEXLINELENGTH = 80
def readLWFN(path, onlyHeader=False):
"""reads an LWFN font file, returns raw data"""
from fontTools.misc.macRes import ResourceReader
reader = ResourceReader(path)
try:
data = []
for res in reader.get('POST', []):
for res in reader.get("POST", []):
code = byteord(res.data[0])
if byteord(res.data[1]) != 0:
raise T1Error('corrupt LWFN file')
raise T1Error("corrupt LWFN file")
if code in [1, 2]:
if onlyHeader and code == 2:
break
@ -314,20 +356,21 @@ def readLWFN(path, onlyHeader=False):
elif code == 0:
pass # comment, ignore
else:
raise T1Error('bad chunk code: ' + repr(code))
raise T1Error("bad chunk code: " + repr(code))
finally:
reader.close()
data = bytesjoin(data)
assertType1(data)
return data
def readPFB(path, onlyHeader=False):
"""reads a PFB font file, returns raw data"""
data = []
with open(path, "rb") as f:
while True:
if f.read(1) != bytechr(128):
raise T1Error('corrupt PFB file')
raise T1Error("corrupt PFB file")
code = byteord(f.read(1))
if code in [1, 2]:
chunklen = stringToLong(f.read(4))
@ -337,13 +380,14 @@ def readPFB(path, onlyHeader=False):
elif code == 3:
break
else:
raise T1Error('bad chunk code: ' + repr(code))
raise T1Error("bad chunk code: " + repr(code))
if onlyHeader:
break
data = bytesjoin(data)
assertType1(data)
return data
def readOther(path):
"""reads any (font) file, returns raw data"""
with open(path, "rb") as f:
@ -358,8 +402,10 @@ def readOther(path):
data.append(chunk)
return bytesjoin(data)
# file writing tools
def writeLWFN(path, data):
# Res.FSpCreateResFile was deprecated in OS X 10.5
Res.FSpCreateResFile(path, "just", "LWFN", 0)
@ -374,15 +420,16 @@ def writeLWFN(path, data):
else:
code = 1
while chunk:
res = Res.Resource(bytechr(code) + '\0' + chunk[:LWFNCHUNKSIZE - 2])
res.AddResource('POST', resID, '')
res = Res.Resource(bytechr(code) + "\0" + chunk[: LWFNCHUNKSIZE - 2])
res.AddResource("POST", resID, "")
chunk = chunk[LWFNCHUNKSIZE - 2 :]
resID = resID + 1
res = Res.Resource(bytechr(5) + '\0')
res.AddResource('POST', resID, '')
res = Res.Resource(bytechr(5) + "\0")
res.AddResource("POST", resID, "")
finally:
Res.CloseResFile(resRef)
def writePFB(path, data):
chunks = findEncryptedChunks(data)
with open(path, "wb") as f:
@ -396,6 +443,7 @@ def writePFB(path, data):
f.write(chunk)
f.write(bytechr(128) + bytechr(3))
def writeOther(path, data, dohex=False):
chunks = findEncryptedChunks(data)
with open(path, "wb") as f:
@ -408,7 +456,7 @@ def writeOther(path, data, dohex=False):
if code == 2 and dohex:
while chunk:
f.write(eexec.hexString(chunk[:hexlinelen]))
f.write(b'\r')
f.write(b"\r")
chunk = chunk[hexlinelen:]
else:
f.write(chunk)
@ -419,12 +467,13 @@ def writeOther(path, data, dohex=False):
EEXECBEGIN = b"currentfile eexec"
# The spec allows for 512 ASCII zeros interrupted by arbitrary whitespace to
# follow eexec
EEXECEND = re.compile(b'(0[ \t\r\n]*){512}', flags=re.M)
EEXECEND = re.compile(b"(0[ \t\r\n]*){512}", flags=re.M)
EEXECINTERNALEND = b"currentfile closefile"
EEXECBEGINMARKER = b"%-- eexec start\r"
EEXECENDMARKER = b"%-- eexec end\r"
_ishexRE = re.compile(b'[0-9A-Fa-f]*$')
_ishexRE = re.compile(b"[0-9A-Fa-f]*$")
def isHex(text):
return _ishexRE.match(text) is not None
@ -439,10 +488,12 @@ def decryptType1(data):
chunk = deHexString(chunk)
decrypted, R = eexec.decrypt(chunk, 55665)
decrypted = decrypted[4:]
if decrypted[-len(EEXECINTERNALEND)-1:-1] != EEXECINTERNALEND \
and decrypted[-len(EEXECINTERNALEND)-2:-2] != EEXECINTERNALEND:
if (
decrypted[-len(EEXECINTERNALEND) - 1 : -1] != EEXECINTERNALEND
and decrypted[-len(EEXECINTERNALEND) - 2 : -2] != EEXECINTERNALEND
):
raise T1Error("invalid end of eexec part")
decrypted = decrypted[:-len(EEXECINTERNALEND)-2] + b'\r'
decrypted = decrypted[: -len(EEXECINTERNALEND) - 2] + b"\r"
data.append(EEXECBEGINMARKER + decrypted + EEXECENDMARKER)
else:
if chunk[-len(EEXECBEGIN) - 1 : -1] == EEXECBEGIN:
@ -451,6 +502,7 @@ def decryptType1(data):
data.append(chunk)
return bytesjoin(data)
def findEncryptedChunks(data):
chunks = []
while True:
@ -475,16 +527,18 @@ def findEncryptedChunks(data):
chunks.append((0, data))
return chunks
def deHexString(hexstring):
return eexec.deHexString(bytesjoin(hexstring.split()))
# Type 1 assertion
_fontType1RE = re.compile(br"/FontType\s+1\s+def")
_fontType1RE = re.compile(rb"/FontType\s+1\s+def")
def assertType1(data):
for head in [b'%!PS-AdobeFont', b'%!FontType1']:
for head in [b"%!PS-AdobeFont", b"%!FontType1"]:
if data[: len(head)] == head:
break
else:
@ -499,15 +553,17 @@ def assertType1(data):
# pfb helpers
def longToString(long):
s = b""
for i in range(4):
s += bytechr((long & (0xff << (i * 8))) >> i * 8)
s += bytechr((long & (0xFF << (i * 8))) >> i * 8)
return s
def stringToLong(s):
if len(s) != 4:
raise ValueError('string must be 4 bytes long')
raise ValueError("string must be 4 bytes long")
l = 0
for i in range(4):
l += byteord(s[i]) << (i * 8)
@ -523,10 +579,12 @@ font_dictionary_keys.remove("FontMatrix")
FontInfo_dictionary_keys = list(_type1_fontinfo_order)
# extend because AFDKO tx may use following keys
FontInfo_dictionary_keys.extend([
FontInfo_dictionary_keys.extend(
[
"FSType",
"Copyright",
])
]
)
Private_dictionary_keys = [
# We don't know what names will be actually used.
@ -570,7 +628,7 @@ std_subrs = [
# return
b"\x0b",
# 3 1 3 callother pop callsubr return
b"\x8e\x8c\x8e\x0c\x10\x0c\x11\x0a\x0b"
b"\x8e\x8c\x8e\x0c\x10\x0c\x11\x0a\x0b",
]
# follow t1write.c:writeRegNameKeyedFont
eexec_IV = b"cccc"

View File

@ -7,14 +7,22 @@ import sys
log = logging.getLogger(__name__)
class TTLibError(Exception): pass
class TTLibFileIsCollectionError (TTLibError): pass
class TTLibError(Exception):
pass
class TTLibFileIsCollectionError(TTLibError):
pass
@deprecateFunction("use logging instead", category=DeprecationWarning)
def debugmsg(msg):
import time
print(msg + time.strftime(" (%H:%M:%S)", time.localtime(time.time())))
from fontTools.ttLib.ttFont import *
from fontTools.ttLib.ttCollection import TTCollection
@ -62,7 +70,7 @@ def main(args=None):
allows for extracting a single font from a
collection, or combining multiple fonts into a
collection.
"""
""",
)
parser.add_argument("font", metavar="font", nargs="*", help="Font file.")
parser.add_argument(
@ -100,5 +108,6 @@ def main(args=None):
collection.fonts = fonts
collection.save(outFile)
if __name__ == "__main__":
sys.exit(main())

View File

@ -7,7 +7,7 @@ def getSFNTResIndices(path):
"""Determine whether a file has a 'sfnt' resource fork or not."""
try:
reader = ResourceReader(path)
indices = reader.getIndices('sfnt')
indices = reader.getIndices("sfnt")
reader.close()
return indices
except ResourceError:
@ -21,6 +21,7 @@ def openTTFonts(path):
font objects as there are sfnt resources in the file.
"""
from fontTools import ttLib
fonts = []
sfnts = getSFNTResIndices(path)
if not sfnts:
@ -39,11 +40,12 @@ class SFNTResourceReader(BytesIO):
def __init__(self, path, res_name_or_index):
from fontTools import ttLib
reader = ResourceReader(path)
if isinstance(res_name_or_index, str):
rsrc = reader.getNamedResource('sfnt', res_name_or_index)
rsrc = reader.getNamedResource("sfnt", res_name_or_index)
else:
rsrc = reader.getIndResource('sfnt', res_name_or_index)
rsrc = reader.getIndResource("sfnt", res_name_or_index)
if rsrc is None:
raise ttLib.TTLibError("sfnt resource not found: %s" % res_name_or_index)
reader.close()

View File

@ -24,8 +24,8 @@ import logging
log = logging.getLogger(__name__)
class SFNTReader(object):
class SFNTReader(object):
def __new__(cls, *args, **kwargs):
"""Return an instance of the SFNTReader sub-class which is compatible
with the input file type.
@ -38,6 +38,7 @@ class SFNTReader(object):
if sfntVersion == "wOF2":
# return new WOFF2Reader object
from fontTools.ttLib.woff2 import WOFF2Reader
return object.__new__(WOFF2Reader)
# return default object
return object.__new__(cls)
@ -56,7 +57,10 @@ class SFNTReader(object):
header = readTTCHeader(self.file)
numFonts = header.numFonts
if not 0 <= fontNumber < numFonts:
raise TTLibFileIsCollectionError("specify a font number between 0 and %d (inclusive)" % (numFonts - 1))
raise TTLibFileIsCollectionError(
"specify a font number between 0 and %d (inclusive)"
% (numFonts - 1)
)
self.numFonts = numFonts
self.file.seek(header.offsetTable[fontNumber])
data = self.file.read(sfntDirectorySize)
@ -104,9 +108,9 @@ class SFNTReader(object):
entry = self.tables[Tag(tag)]
data = entry.loadData(self.file)
if self.checkChecksums:
if tag == 'head':
if tag == "head":
# Beh: we have to special-case the 'head' table.
checksum = calcChecksum(data[:8] + b'\0\0\0\0' + data[12:])
checksum = calcChecksum(data[:8] + b"\0\0\0\0" + data[12:])
else:
checksum = calcChecksum(data)
if self.checkChecksums > 1:
@ -179,36 +183,44 @@ def compress(data, level=ZLIB_COMPRESSION_LEVEL):
The default value is a compromise between speed and compression (6).
"""
if not (0 <= level <= 9):
raise ValueError('Bad compression level: %s' % level)
raise ValueError("Bad compression level: %s" % level)
if not USE_ZOPFLI or level == 0:
from zlib import compress
return compress(data, level)
else:
from zopfli.zlib import compress
return compress(data, numiterations=ZOPFLI_LEVELS[level])
class SFNTWriter(object):
def __new__(cls, *args, **kwargs):
"""Return an instance of the SFNTWriter sub-class which is compatible
with the specified 'flavor'.
"""
flavor = None
if kwargs and 'flavor' in kwargs:
flavor = kwargs['flavor']
if kwargs and "flavor" in kwargs:
flavor = kwargs["flavor"]
elif args and len(args) > 3:
flavor = args[3]
if cls is SFNTWriter:
if flavor == "woff2":
# return new WOFF2Writer object
from fontTools.ttLib.woff2 import WOFF2Writer
return object.__new__(WOFF2Writer)
# return default object
return object.__new__(cls)
def __init__(self, file, numTables, sfntVersion="\000\001\000\000",
flavor=None, flavorData=None):
def __init__(
self,
file,
numTables,
sfntVersion="\000\001\000\000",
flavor=None,
flavorData=None,
):
self.file = file
self.numTables = numTables
self.sfntVersion = Tag(sfntVersion)
@ -223,7 +235,9 @@ class SFNTWriter(object):
self.signature = "wOFF"
# to calculate WOFF checksum adjustment, we also need the original SFNT offsets
self.origNextTableOffset = sfntDirectorySize + numTables * sfntDirectoryEntrySize
self.origNextTableOffset = (
sfntDirectorySize + numTables * sfntDirectoryEntrySize
)
else:
assert not self.flavor, "Unknown flavor '%s'" % self.flavor
self.directoryFormat = sfntDirectoryFormat
@ -231,14 +245,21 @@ class SFNTWriter(object):
self.DirectoryEntry = SFNTDirectoryEntry
from fontTools.ttLib import getSearchRange
self.searchRange, self.entrySelector, self.rangeShift = getSearchRange(numTables, 16)
self.searchRange, self.entrySelector, self.rangeShift = getSearchRange(
numTables, 16
)
self.directoryOffset = self.file.tell()
self.nextTableOffset = self.directoryOffset + self.directorySize + numTables * self.DirectoryEntry.formatSize
self.nextTableOffset = (
self.directoryOffset
+ self.directorySize
+ numTables * self.DirectoryEntry.formatSize
)
# clear out directory area
self.file.seek(self.nextTableOffset)
# make sure we're actually where we want to be. (old cStringIO bug)
self.file.write(b'\0' * (self.nextTableOffset - self.file.tell()))
self.file.write(b"\0" * (self.nextTableOffset - self.file.tell()))
self.tables = OrderedDict()
def setEntry(self, tag, entry):
@ -255,8 +276,8 @@ class SFNTWriter(object):
entry = self.DirectoryEntry()
entry.tag = tag
entry.offset = self.nextTableOffset
if tag == 'head':
entry.checkSum = calcChecksum(data[:8] + b'\0\0\0\0' + data[12:])
if tag == "head":
entry.checkSum = calcChecksum(data[:8] + b"\0\0\0\0" + data[12:])
self.headTable = data
entry.uncompressed = True
else:
@ -272,7 +293,7 @@ class SFNTWriter(object):
# Don't depend on f.seek() as we need to add the padding even if no
# subsequent write follows (seek is lazy), ie. after the final table
# in the font.
self.file.write(b'\0' * (self.nextTableOffset - self.file.tell()))
self.file.write(b"\0" * (self.nextTableOffset - self.file.tell()))
assert self.nextTableOffset == self.file.tell()
self.setEntry(tag, entry)
@ -286,7 +307,10 @@ class SFNTWriter(object):
"""
tables = sorted(self.tables.items())
if len(tables) != self.numTables:
raise TTLibError("wrong number of tables; expected %d, found %d" % (self.numTables, len(tables)))
raise TTLibError(
"wrong number of tables; expected %d, found %d"
% (self.numTables, len(tables))
)
if self.flavor == "woff":
self.signature = b"wOFF"
@ -302,8 +326,10 @@ class SFNTWriter(object):
self.majorVersion = data.majorVersion
self.minorVersion = data.minorVersion
else:
if hasattr(self, 'headTable'):
self.majorVersion, self.minorVersion = struct.unpack(">HH", self.headTable[4:8])
if hasattr(self, "headTable"):
self.majorVersion, self.minorVersion = struct.unpack(
">HH", self.headTable[4:8]
)
else:
self.majorVersion = self.minorVersion = 0
if data.metaData:
@ -319,7 +345,7 @@ class SFNTWriter(object):
self.file.seek(0, 2)
off = self.file.tell()
paddedOff = (off + 3) & ~3
self.file.write('\0' * (paddedOff - off))
self.file.write("\0" * (paddedOff - off))
self.privOffset = self.file.tell()
self.privLength = len(data.privData)
self.file.write(data.privData)
@ -356,7 +382,10 @@ class SFNTWriter(object):
if self.DirectoryEntry != SFNTDirectoryEntry:
# Create a SFNT directory for checksum calculation purposes
from fontTools.ttLib import getSearchRange
self.searchRange, self.entrySelector, self.rangeShift = getSearchRange(self.numTables, 16)
self.searchRange, self.entrySelector, self.rangeShift = getSearchRange(
self.numTables, 16
)
directory = sstruct.pack(sfntDirectoryFormat, self)
tables = sorted(self.tables.items())
for tag, entry in tables:
@ -371,15 +400,15 @@ class SFNTWriter(object):
assert directory_end == len(directory)
checksums.append(calcChecksum(directory))
checksum = sum(checksums) & 0xffffffff
checksum = sum(checksums) & 0xFFFFFFFF
# BiboAfba!
checksumadjustment = (0xB1B0AFBA - checksum) & 0xffffffff
checksumadjustment = (0xB1B0AFBA - checksum) & 0xFFFFFFFF
return checksumadjustment
def writeMasterChecksum(self, directory):
checksumadjustment = self._calcMasterChecksum(directory)
# write the checksum to the file
self.file.seek(self.tables['head'].offset + 8)
self.file.seek(self.tables["head"].offset + 8)
self.file.write(struct.pack(">L", checksumadjustment))
def reordersTables(self):
@ -454,7 +483,6 @@ woffDirectoryEntrySize = sstruct.calcsize(woffDirectoryEntryFormat)
class DirectoryEntry(object):
def __init__(self):
self.uncompressed = False # if True, always embed entry raw
@ -477,12 +505,12 @@ class DirectoryEntry(object):
file.seek(self.offset)
data = file.read(self.length)
assert len(data) == self.length
if hasattr(self.__class__, 'decodeData'):
if hasattr(self.__class__, "decodeData"):
data = self.decodeData(data)
return data
def saveData(self, file, data):
if hasattr(self.__class__, 'encodeData'):
if hasattr(self.__class__, "encodeData"):
data = self.encodeData(data)
self.length = len(data)
file.seek(self.offset)
@ -494,11 +522,13 @@ class DirectoryEntry(object):
def encodeData(self, data):
return data
class SFNTDirectoryEntry(DirectoryEntry):
format = sfntDirectoryEntryFormat
formatSize = sfntDirectoryEntrySize
class WOFFDirectoryEntry(DirectoryEntry):
format = woffDirectoryEntryFormat
@ -512,11 +542,12 @@ class WOFFDirectoryEntry(DirectoryEntry):
# defined `ZLIB_COMPRESSION_LEVEL`, which is also applied when
# compressing the metadata. For backward compatibility, we still
# use the class attribute if it was already set.
if not hasattr(WOFFDirectoryEntry, 'zlibCompressionLevel'):
if not hasattr(WOFFDirectoryEntry, "zlibCompressionLevel"):
self.zlibCompressionLevel = ZLIB_COMPRESSION_LEVEL
def decodeData(self, rawData):
import zlib
if self.length == self.origLength:
data = rawData
else:
@ -538,9 +569,10 @@ class WOFFDirectoryEntry(DirectoryEntry):
self.length = len(rawData)
return rawData
class WOFFFlavorData():
Flavor = 'woff'
class WOFFFlavorData:
Flavor = "woff"
def __init__(self, reader=None):
self.majorVersion = None
@ -565,6 +597,7 @@ class WOFFFlavorData():
def _decompress(self, rawData):
import zlib
return zlib.decompress(rawData)
@ -588,9 +621,10 @@ def calcChecksum(data):
for i in range(0, len(data), blockSize):
block = data[i : i + blockSize]
longs = struct.unpack(">%dL" % (len(block) // 4), block)
value = (value + sum(longs)) & 0xffffffff
value = (value + sum(longs)) & 0xFFFFFFFF
return value
def readTTCHeader(file):
file.seek(0)
data = file.read(ttcHeaderSize)
@ -600,15 +634,20 @@ def readTTCHeader(file):
sstruct.unpack(ttcHeaderFormat, data, self)
if self.TTCTag != "ttcf":
raise TTLibError("Not a Font Collection")
assert self.Version == 0x00010000 or self.Version == 0x00020000, "unrecognized TTC version 0x%08x" % self.Version
self.offsetTable = struct.unpack(">%dL" % self.numFonts, file.read(self.numFonts * 4))
assert self.Version == 0x00010000 or self.Version == 0x00020000, (
"unrecognized TTC version 0x%08x" % self.Version
)
self.offsetTable = struct.unpack(
">%dL" % self.numFonts, file.read(self.numFonts * 4)
)
if self.Version == 0x00020000:
pass # ignoring version 2.0 signatures
return self
def writeTTCHeader(file, numFonts):
self = SimpleNamespace()
self.TTCTag = 'ttcf'
self.TTCTag = "ttcf"
self.Version = 0x00010000
self.numFonts = numFonts
file.seek(0)
@ -617,7 +656,9 @@ def writeTTCHeader(file, numFonts):
file.write(struct.pack(">%dL" % self.numFonts, *([0] * self.numFonts)))
return offset
if __name__ == "__main__":
import sys
import doctest
sys.exit(doctest.testmod().failed)

View File

@ -267,5 +267,5 @@ standardGlyphOrder = [
"cacute", # 254
"Ccaron", # 255
"ccaron", # 256
"dcroat" # 257
"dcroat", # 257
]

View File

@ -28,8 +28,8 @@ smallGlyphMetricsFormat = """
Advance: B
"""
class BitmapGlyphMetrics(object):
class BitmapGlyphMetrics(object):
def toXML(self, writer, ttFont):
writer.begintag(self.__class__.__name__)
writer.newline()
@ -47,13 +47,18 @@ class BitmapGlyphMetrics(object):
name, attrs, content = element
# Make sure this is a metric that is needed by GlyphMetrics.
if name in metricNames:
vars(self)[name] = safeEval(attrs['value'])
vars(self)[name] = safeEval(attrs["value"])
else:
log.warning("unknown name '%s' being ignored in %s.", name, self.__class__.__name__)
log.warning(
"unknown name '%s' being ignored in %s.",
name,
self.__class__.__name__,
)
class BigGlyphMetrics(BitmapGlyphMetrics):
binaryFormat = bigGlyphMetricsFormat
class SmallGlyphMetrics(BitmapGlyphMetrics):
binaryFormat = smallGlyphMetricsFormat

View File

@ -6,14 +6,24 @@
from fontTools.misc.textTools import bytesjoin
from fontTools.misc import sstruct
from . import E_B_D_T_
from .BitmapGlyphMetrics import BigGlyphMetrics, bigGlyphMetricsFormat, SmallGlyphMetrics, smallGlyphMetricsFormat
from .E_B_D_T_ import BitmapGlyph, BitmapPlusSmallMetricsMixin, BitmapPlusBigMetricsMixin
from .BitmapGlyphMetrics import (
BigGlyphMetrics,
bigGlyphMetricsFormat,
SmallGlyphMetrics,
smallGlyphMetricsFormat,
)
from .E_B_D_T_ import (
BitmapGlyph,
BitmapPlusSmallMetricsMixin,
BitmapPlusBigMetricsMixin,
)
import struct
class table_C_B_D_T_(E_B_D_T_.table_E_B_D_T_):
# Change the data locator table being referenced.
locatorName = 'CBLC'
locatorName = "CBLC"
# Modify the format class accessor for color bitmap use.
def getImageFormatClass(self, imageFormat):
@ -22,20 +32,22 @@ class table_C_B_D_T_(E_B_D_T_.table_E_B_D_T_):
except KeyError:
return cbdt_bitmap_classes[imageFormat]
# Helper method for removing export features not supported by color bitmaps.
# Write data in the parent class will default to raw if an option is unsupported.
def _removeUnsupportedForColor(dataFunctions):
dataFunctions = dict(dataFunctions)
del dataFunctions['row']
del dataFunctions["row"]
return dataFunctions
class ColorBitmapGlyph(BitmapGlyph):
fileExtension = '.png'
fileExtension = ".png"
xmlDataFunctions = _removeUnsupportedForColor(BitmapGlyph.xmlDataFunctions)
class cbdt_bitmap_format_17(BitmapPlusSmallMetricsMixin, ColorBitmapGlyph):
class cbdt_bitmap_format_17(BitmapPlusSmallMetricsMixin, ColorBitmapGlyph):
def decompile(self):
self.metrics = SmallGlyphMetrics()
dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics)
@ -53,8 +65,8 @@ class cbdt_bitmap_format_17(BitmapPlusSmallMetricsMixin, ColorBitmapGlyph):
dataList.append(self.imageData)
return bytesjoin(dataList)
class cbdt_bitmap_format_18(BitmapPlusBigMetricsMixin, ColorBitmapGlyph):
class cbdt_bitmap_format_18(BitmapPlusBigMetricsMixin, ColorBitmapGlyph):
def decompile(self):
self.metrics = BigGlyphMetrics()
dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics)
@ -72,8 +84,8 @@ class cbdt_bitmap_format_18(BitmapPlusBigMetricsMixin, ColorBitmapGlyph):
dataList.append(self.imageData)
return bytesjoin(dataList)
class cbdt_bitmap_format_19(ColorBitmapGlyph):
class cbdt_bitmap_format_19(ColorBitmapGlyph):
def decompile(self):
(dataLen,) = struct.unpack(">L", self.data[:4])
data = self.data[4:]
@ -84,6 +96,7 @@ class cbdt_bitmap_format_19(ColorBitmapGlyph):
def compile(self, ttFont):
return struct.pack(">L", len(self.imageData)) + self.imageData
# Dict for CBDT extended formats.
cbdt_bitmap_classes = {
17: cbdt_bitmap_format_17,

View File

@ -4,6 +4,7 @@
from . import E_B_L_C_
class table_C_B_L_C_(E_B_L_C_.table_E_B_L_C_):
dependencies = ['CBDT']
dependencies = ["CBDT"]

View File

@ -4,7 +4,6 @@ from . import DefaultTable
class table_C_F_F_(DefaultTable.DefaultTable):
def __init__(self, tag=None):
DefaultTable.DefaultTable.__init__(self, tag)
self.cff = cffLib.CFFFontSet()
@ -28,6 +27,7 @@ class table_C_F_F_(DefaultTable.DefaultTable):
def getGlyphOrder(self):
if self._gaveGlyphOrder:
from fontTools import ttLib
raise ttLib.TTLibError("illegal use of getGlyphOrder()")
self._gaveGlyphOrder = True
return self.cff[self.cff.fontNames[0]].getGlyphOrder()

View File

@ -3,7 +3,6 @@ from fontTools.ttLib.tables.C_F_F_ import table_C_F_F_
class table_C_F_F__2(table_C_F_F_):
def decompile(self, data, otFont):
self.cff.decompile(BytesIO(data), otFont, isCFF2=True)
assert len(self.cff) == 1, "can't deal with multi-font CFF tables."

View File

@ -26,13 +26,11 @@ class table_C_O_L_R_(DefaultTable.DefaultTable):
baseGlyph = baseRec.BaseGlyph
firstLayerIndex = baseRec.FirstLayerIndex
numLayers = baseRec.NumLayers
assert (firstLayerIndex + numLayers <= numLayerRecords)
assert firstLayerIndex + numLayers <= numLayerRecords
layers = []
for i in range(firstLayerIndex, firstLayerIndex + numLayers):
layerRec = layerRecords[i]
layers.append(
LayerRecord(layerRec.LayerGlyph, layerRec.PaletteIndex)
)
layers.append(LayerRecord(layerRec.LayerGlyph, layerRec.PaletteIndex))
colorLayerLists[baseGlyph] = layers
return colorLayerLists
@ -142,8 +140,8 @@ class table_C_O_L_R_(DefaultTable.DefaultTable):
def __delitem__(self, glyphName):
del self.ColorLayers[glyphName]
class LayerRecord(object):
class LayerRecord(object):
def __init__(self, name=None, colorID=None):
self.name = name
self.colorID = colorID

View File

@ -23,13 +23,21 @@ class table_C_P_A_L_(DefaultTable.DefaultTable):
self.paletteEntryLabels = []
def decompile(self, data, ttFont):
self.version, self.numPaletteEntries, numPalettes, numColorRecords, goffsetFirstColorRecord = struct.unpack(">HHHHL", data[:12])
assert (self.version <= 1), "Version of CPAL table is higher than I know how to handle"
(
self.version,
self.numPaletteEntries,
numPalettes,
numColorRecords,
goffsetFirstColorRecord,
) = struct.unpack(">HHHHL", data[:12])
assert (
self.version <= 1
), "Version of CPAL table is higher than I know how to handle"
self.palettes = []
pos = 12
for i in range(numPalettes):
startIndex = struct.unpack(">H", data[pos : pos + 2])[0]
assert (startIndex + self.numPaletteEntries <= numColorRecords)
assert startIndex + self.numPaletteEntries <= numColorRecords
pos += 2
palette = []
ppos = goffsetFirstColorRecord + startIndex * 4
@ -43,23 +51,33 @@ class table_C_P_A_L_(DefaultTable.DefaultTable):
offsetToPaletteEntryLabelArray = 0
else:
pos = 12 + numPalettes * 2
(offsetToPaletteTypeArray, offsetToPaletteLabelArray,
offsetToPaletteEntryLabelArray) = (
struct.unpack(">LLL", data[pos:pos+12]))
(
offsetToPaletteTypeArray,
offsetToPaletteLabelArray,
offsetToPaletteEntryLabelArray,
) = struct.unpack(">LLL", data[pos : pos + 12])
self.paletteTypes = self._decompileUInt32Array(
data, offsetToPaletteTypeArray, numPalettes,
default=self.DEFAULT_PALETTE_TYPE)
data,
offsetToPaletteTypeArray,
numPalettes,
default=self.DEFAULT_PALETTE_TYPE,
)
self.paletteLabels = self._decompileUInt16Array(
data, offsetToPaletteLabelArray, numPalettes, default=self.NO_NAME_ID)
data, offsetToPaletteLabelArray, numPalettes, default=self.NO_NAME_ID
)
self.paletteEntryLabels = self._decompileUInt16Array(
data, offsetToPaletteEntryLabelArray,
self.numPaletteEntries, default=self.NO_NAME_ID)
data,
offsetToPaletteEntryLabelArray,
self.numPaletteEntries,
default=self.NO_NAME_ID,
)
def _decompileUInt16Array(self, data, offset, numElements, default=0):
if offset == 0:
return [default] * numElements
result = array.array("H", data[offset : offset + 2 * numElements])
if sys.byteorder != "big": result.byteswap()
if sys.byteorder != "big":
result.byteswap()
assert len(result) == numElements, result
return result.tolist()
@ -67,7 +85,8 @@ class table_C_P_A_L_(DefaultTable.DefaultTable):
if offset == 0:
return [default] * numElements
result = array.array("I", data[offset : offset + 4 * numElements])
if sys.byteorder != "big": result.byteswap()
if sys.byteorder != "big":
result.byteswap()
assert len(result) == numElements, result
return result.tolist()
@ -80,9 +99,14 @@ class table_C_P_A_L_(DefaultTable.DefaultTable):
offsetToFirstColorRecord = 12 + len(colorRecordIndices)
if self.version >= 1:
offsetToFirstColorRecord += 12
header = struct.pack(">HHHHL", self.version,
self.numPaletteEntries, len(self.palettes),
numColorRecords, offsetToFirstColorRecord)
header = struct.pack(
">HHHHL",
self.version,
self.numPaletteEntries,
len(self.palettes),
numColorRecords,
offsetToFirstColorRecord,
)
if self.version == 0:
dataList = [header, colorRecordIndices, colorRecords]
else:
@ -102,17 +126,25 @@ class table_C_P_A_L_(DefaultTable.DefaultTable):
else:
offsetToPaletteEntryLabelArray = pos
pos += len(paletteLabels)
header1 = struct.pack(">LLL",
header1 = struct.pack(
">LLL",
offsetToPaletteTypeArray,
offsetToPaletteLabelArray,
offsetToPaletteEntryLabelArray)
dataList = [header, colorRecordIndices, header1,
colorRecords, paletteTypes, paletteLabels,
paletteEntryLabels]
offsetToPaletteEntryLabelArray,
)
dataList = [
header,
colorRecordIndices,
header1,
colorRecords,
paletteTypes,
paletteLabels,
paletteEntryLabels,
]
return bytesjoin(dataList)
def _compilePalette(self, palette):
assert(len(palette) == self.numPaletteEntries)
assert len(palette) == self.numPaletteEntries
pack = lambda c: struct.pack(">BBBB", c.blue, c.green, c.red, c.alpha)
return bytesjoin([pack(color) for color in palette])
@ -131,40 +163,39 @@ class table_C_P_A_L_(DefaultTable.DefaultTable):
def _compilePaletteTypes(self):
if self.version == 0 or not any(self.paletteTypes):
return b''
return b""
assert len(self.paletteTypes) == len(self.palettes)
result = bytesjoin([struct.pack(">I", ptype)
for ptype in self.paletteTypes])
result = bytesjoin([struct.pack(">I", ptype) for ptype in self.paletteTypes])
assert len(result) == 4 * len(self.palettes)
return result
def _compilePaletteLabels(self):
if self.version == 0 or all(l == self.NO_NAME_ID for l in self.paletteLabels):
return b''
return b""
assert len(self.paletteLabels) == len(self.palettes)
result = bytesjoin([struct.pack(">H", label)
for label in self.paletteLabels])
result = bytesjoin([struct.pack(">H", label) for label in self.paletteLabels])
assert len(result) == 2 * len(self.palettes)
return result
def _compilePaletteEntryLabels(self):
if self.version == 0 or all(l == self.NO_NAME_ID for l in self.paletteEntryLabels):
return b''
if self.version == 0 or all(
l == self.NO_NAME_ID for l in self.paletteEntryLabels
):
return b""
assert len(self.paletteEntryLabels) == self.numPaletteEntries
result = bytesjoin([struct.pack(">H", label)
for label in self.paletteEntryLabels])
result = bytesjoin(
[struct.pack(">H", label) for label in self.paletteEntryLabels]
)
assert len(result) == 2 * self.numPaletteEntries
return result
def toXML(self, writer, ttFont):
numPalettes = len(self.palettes)
paletteLabels = {i: nameID
for (i, nameID) in enumerate(self.paletteLabels)}
paletteLabels = {i: nameID for (i, nameID) in enumerate(self.paletteLabels)}
paletteTypes = {i: typ for (i, typ) in enumerate(self.paletteTypes)}
writer.simpletag("version", value=self.version)
writer.newline()
writer.simpletag("numPaletteEntries",
value=self.numPaletteEntries)
writer.simpletag("numPaletteEntries", value=self.numPaletteEntries)
writer.newline()
for index, palette in enumerate(self.palettes):
attrs = {"index": index}
@ -176,24 +207,30 @@ class table_C_P_A_L_(DefaultTable.DefaultTable):
attrs["type"] = paletteType
writer.begintag("palette", **attrs)
writer.newline()
if (self.version > 0 and paletteLabel != self.NO_NAME_ID and
ttFont and "name" in ttFont):
if (
self.version > 0
and paletteLabel != self.NO_NAME_ID
and ttFont
and "name" in ttFont
):
name = ttFont["name"].getDebugName(paletteLabel)
if name is not None:
writer.comment(name)
writer.newline()
assert(len(palette) == self.numPaletteEntries)
assert len(palette) == self.numPaletteEntries
for cindex, color in enumerate(palette):
color.toXML(writer, ttFont, cindex)
writer.endtag("palette")
writer.newline()
if self.version > 0 and not all(l == self.NO_NAME_ID for l in self.paletteEntryLabels):
if self.version > 0 and not all(
l == self.NO_NAME_ID for l in self.paletteEntryLabels
):
writer.begintag("paletteEntryLabels")
writer.newline()
for index, label in enumerate(self.paletteEntryLabels):
if label != self.NO_NAME_ID:
writer.simpletag("label", index=index, value=label)
if (self.version > 0 and label and ttFont and "name" in ttFont):
if self.version > 0 and label and ttFont and "name" in ttFont:
name = ttFont["name"].getDebugName(label)
if name is not None:
writer.comment(name)
@ -225,7 +262,8 @@ class table_C_P_A_L_(DefaultTable.DefaultTable):
colorLabels[labelIndex] = nameID
self.paletteEntryLabels = [
colorLabels.get(i, self.NO_NAME_ID)
for i in range(self.numPaletteEntries)]
for i in range(self.numPaletteEntries)
]
elif "value" in attrs:
value = safeEval(attrs["value"])
setattr(self, name, value)
@ -234,7 +272,6 @@ class table_C_P_A_L_(DefaultTable.DefaultTable):
class Color(namedtuple("Color", "blue green red alpha")):
def hex(self):
return "#%02X%02X%02X%02X" % (self.red, self.green, self.blue, self.alpha)
@ -247,7 +284,7 @@ class Color(namedtuple("Color", "blue green red alpha")):
@classmethod
def fromHex(cls, value):
if value[0] == '#':
if value[0] == "#":
value = value[1:]
red = int(value[0:2], 16)
green = int(value[2:4], 16)

View File

@ -37,21 +37,31 @@ DSIG_SignatureBlockFormat = """
# on compilation with no padding whatsoever.
#
class table_D_S_I_G_(DefaultTable.DefaultTable):
class table_D_S_I_G_(DefaultTable.DefaultTable):
def decompile(self, data, ttFont):
dummy, newData = sstruct.unpack2(DSIG_HeaderFormat, data, self)
assert self.ulVersion == 1, "DSIG ulVersion must be 1"
assert self.usFlag & ~1 == 0, "DSIG usFlag must be 0x1 or 0x0"
self.signatureRecords = sigrecs = []
for n in range(self.usNumSigs):
sigrec, newData = sstruct.unpack2(DSIG_SignatureFormat, newData, SignatureRecord())
assert sigrec.ulFormat == 1, "DSIG signature record #%d ulFormat must be 1" % n
sigrec, newData = sstruct.unpack2(
DSIG_SignatureFormat, newData, SignatureRecord()
)
assert sigrec.ulFormat == 1, (
"DSIG signature record #%d ulFormat must be 1" % n
)
sigrecs.append(sigrec)
for sigrec in sigrecs:
dummy, newData = sstruct.unpack2(DSIG_SignatureBlockFormat, data[sigrec.ulOffset:], sigrec)
assert sigrec.usReserved1 == 0, "DSIG signature record #%d usReserverd1 must be 0" % n
assert sigrec.usReserved2 == 0, "DSIG signature record #%d usReserverd2 must be 0" % n
dummy, newData = sstruct.unpack2(
DSIG_SignatureBlockFormat, data[sigrec.ulOffset :], sigrec
)
assert sigrec.usReserved1 == 0, (
"DSIG signature record #%d usReserverd1 must be 0" % n
)
assert sigrec.usReserved2 == 0, (
"DSIG signature record #%d usReserverd2 must be 0" % n
)
sigrec.pkcs7 = newData[: sigrec.cbSignature]
def compile(self, ttFont):
@ -72,13 +82,20 @@ class table_D_S_I_G_(DefaultTable.DefaultTable):
offset += sigrec.ulLength
if offset % 2:
# Pad to even bytes
data.append(b'\0')
data.append(b"\0")
return bytesjoin(headers + data)
def toXML(self, xmlWriter, ttFont):
xmlWriter.comment("note that the Digital Signature will be invalid after recompilation!")
xmlWriter.comment(
"note that the Digital Signature will be invalid after recompilation!"
)
xmlWriter.newline()
xmlWriter.simpletag("tableHeader", version=self.ulVersion, numSigs=self.usNumSigs, flag="0x%X" % self.usFlag)
xmlWriter.simpletag(
"tableHeader",
version=self.ulVersion,
numSigs=self.usNumSigs,
flag="0x%X" % self.usFlag,
)
for sigrec in self.signatureRecords:
xmlWriter.newline()
sigrec.toXML(xmlWriter, ttFont)
@ -96,20 +113,25 @@ class table_D_S_I_G_(DefaultTable.DefaultTable):
sigrec.fromXML(name, attrs, content, ttFont)
self.signatureRecords.append(sigrec)
pem_spam = lambda l, spam={
"-----BEGIN PKCS7-----": True, "-----END PKCS7-----": True, "": True
"-----BEGIN PKCS7-----": True,
"-----END PKCS7-----": True,
"": True,
}: not spam.get(l.strip())
def b64encode(b):
s = base64.b64encode(b)
# Line-break at 76 chars.
items = []
while s:
items.append(tostr(s[:76]))
items.append('\n')
items.append("\n")
s = s[76:]
return strjoin(items)
class SignatureRecord(object):
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.__dict__)

Some files were not shown because too many files have changed in this diff Show More