Blacken code
This commit is contained in:
parent
698d8fb387
commit
d584daa8fd
@ -30,14 +30,17 @@ needs_sphinx = "1.3"
|
|||||||
# Add any Sphinx extension module names here, as strings. They can be
|
# Add any Sphinx extension module names here, as strings. They can be
|
||||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
||||||
# ones.
|
# ones.
|
||||||
extensions = ["sphinx.ext.autodoc", "sphinx.ext.viewcode", "sphinx.ext.napoleon", "sphinx.ext.coverage", "sphinx.ext.autosectionlabel"]
|
extensions = [
|
||||||
|
"sphinx.ext.autodoc",
|
||||||
|
"sphinx.ext.viewcode",
|
||||||
|
"sphinx.ext.napoleon",
|
||||||
|
"sphinx.ext.coverage",
|
||||||
|
"sphinx.ext.autosectionlabel",
|
||||||
|
]
|
||||||
|
|
||||||
autodoc_mock_imports = ["gtk", "reportlab"]
|
autodoc_mock_imports = ["gtk", "reportlab"]
|
||||||
|
|
||||||
autodoc_default_options = {
|
autodoc_default_options = {"members": True, "inherited-members": True}
|
||||||
'members': True,
|
|
||||||
'inherited-members': True
|
|
||||||
}
|
|
||||||
|
|
||||||
# Add any paths that contain templates here, relative to this directory.
|
# Add any paths that contain templates here, relative to this directory.
|
||||||
templates_path = ["_templates"]
|
templates_path = ["_templates"]
|
||||||
@ -52,9 +55,11 @@ source_suffix = ".rst"
|
|||||||
master_doc = "index"
|
master_doc = "index"
|
||||||
|
|
||||||
# General information about the project.
|
# General information about the project.
|
||||||
project = u"fontTools"
|
project = "fontTools"
|
||||||
copyright = u"2020, Just van Rossum, Behdad Esfahbod, and the fontTools Authors. CC BY-SA 4.0"
|
copyright = (
|
||||||
author = u"Just van Rossum, Behdad Esfahbod, and the fontTools Authors"
|
"2020, Just van Rossum, Behdad Esfahbod, and the fontTools Authors. CC BY-SA 4.0"
|
||||||
|
)
|
||||||
|
author = "Just van Rossum, Behdad Esfahbod, and the fontTools Authors"
|
||||||
|
|
||||||
# HTML page title
|
# HTML page title
|
||||||
html_title = "fontTools Documentation"
|
html_title = "fontTools Documentation"
|
||||||
@ -64,9 +69,9 @@ html_title = "fontTools Documentation"
|
|||||||
# built documents.
|
# built documents.
|
||||||
#
|
#
|
||||||
# The short X.Y version.
|
# The short X.Y version.
|
||||||
version = u"4.0"
|
version = "4.0"
|
||||||
# The full version, including alpha/beta/rc tags.
|
# The full version, including alpha/beta/rc tags.
|
||||||
release = u"4.0"
|
release = "4.0"
|
||||||
|
|
||||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||||
# for a list of supported languages.
|
# for a list of supported languages.
|
||||||
@ -142,8 +147,8 @@ latex_documents = [
|
|||||||
(
|
(
|
||||||
master_doc,
|
master_doc,
|
||||||
"fontTools.tex",
|
"fontTools.tex",
|
||||||
u"fontTools Documentation",
|
"fontTools Documentation",
|
||||||
u"Just van Rossum, Behdad Esfahbod et al.",
|
"Just van Rossum, Behdad Esfahbod et al.",
|
||||||
"manual",
|
"manual",
|
||||||
)
|
)
|
||||||
]
|
]
|
||||||
@ -153,7 +158,7 @@ latex_documents = [
|
|||||||
|
|
||||||
# One entry per manual page. List of tuples
|
# One entry per manual page. List of tuples
|
||||||
# (source start file, name, description, authors, manual section).
|
# (source start file, name, description, authors, manual section).
|
||||||
man_pages = [(master_doc, "fonttools", u"fontTools Documentation", [author], 1)]
|
man_pages = [(master_doc, "fonttools", "fontTools Documentation", [author], 1)]
|
||||||
|
|
||||||
|
|
||||||
# -- Options for Texinfo output -------------------------------------------
|
# -- Options for Texinfo output -------------------------------------------
|
||||||
@ -165,7 +170,7 @@ texinfo_documents = [
|
|||||||
(
|
(
|
||||||
master_doc,
|
master_doc,
|
||||||
"fontTools",
|
"fontTools",
|
||||||
u"fontTools Documentation",
|
"fontTools Documentation",
|
||||||
author,
|
author,
|
||||||
"fontTools",
|
"fontTools",
|
||||||
"A library for manipulating fonts, written in Python.",
|
"A library for manipulating fonts, written in Python.",
|
||||||
|
@ -22,13 +22,14 @@ def main(args=None):
|
|||||||
sys.argv.append("help")
|
sys.argv.append("help")
|
||||||
if sys.argv[1] == "-h" or sys.argv[1] == "--help":
|
if sys.argv[1] == "-h" or sys.argv[1] == "--help":
|
||||||
sys.argv[1] = "help"
|
sys.argv[1] = "help"
|
||||||
mod = 'fontTools.'+sys.argv[1]
|
mod = "fontTools." + sys.argv[1]
|
||||||
sys.argv[1] = sys.argv[0] + ' ' + sys.argv[1]
|
sys.argv[1] = sys.argv[0] + " " + sys.argv[1]
|
||||||
del sys.argv[0]
|
del sys.argv[0]
|
||||||
|
|
||||||
import runpy
|
import runpy
|
||||||
runpy.run_module(mod, run_name='__main__')
|
|
||||||
|
runpy.run_module(mod, run_name="__main__")
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == "__main__":
|
||||||
sys.exit(main())
|
sys.exit(main())
|
||||||
|
@ -82,10 +82,7 @@ kernRE = re.compile(
|
|||||||
# regular expressions to parse composite info lines of the form:
|
# regular expressions to parse composite info lines of the form:
|
||||||
# Aacute 2 ; PCC A 0 0 ; PCC acute 182 211 ;
|
# Aacute 2 ; PCC A 0 0 ; PCC acute 182 211 ;
|
||||||
compositeRE = re.compile(
|
compositeRE = re.compile(
|
||||||
r"([.A-Za-z0-9_]+)" # char name
|
r"([.A-Za-z0-9_]+)" r"\s+" r"(\d+)" r"\s*;\s*" # char name # number of parts
|
||||||
r"\s+"
|
|
||||||
r"(\d+)" # number of parts
|
|
||||||
r"\s*;\s*"
|
|
||||||
)
|
)
|
||||||
componentRE = re.compile(
|
componentRE = re.compile(
|
||||||
r"PCC\s+" # PPC
|
r"PCC\s+" # PPC
|
||||||
@ -125,16 +122,17 @@ class AFM(object):
|
|||||||
|
|
||||||
_attrs = None
|
_attrs = None
|
||||||
|
|
||||||
_keywords = ['StartFontMetrics',
|
_keywords = [
|
||||||
'EndFontMetrics',
|
"StartFontMetrics",
|
||||||
'StartCharMetrics',
|
"EndFontMetrics",
|
||||||
'EndCharMetrics',
|
"StartCharMetrics",
|
||||||
'StartKernData',
|
"EndCharMetrics",
|
||||||
'StartKernPairs',
|
"StartKernData",
|
||||||
'EndKernPairs',
|
"StartKernPairs",
|
||||||
'EndKernData',
|
"EndKernPairs",
|
||||||
'StartComposites',
|
"EndKernData",
|
||||||
'EndComposites',
|
"StartComposites",
|
||||||
|
"EndComposites",
|
||||||
]
|
]
|
||||||
|
|
||||||
def __init__(self, path=None):
|
def __init__(self, path=None):
|
||||||
@ -235,13 +233,15 @@ class AFM(object):
|
|||||||
assert len(components) == ncomponents
|
assert len(components) == ncomponents
|
||||||
self._composites[charname] = components
|
self._composites[charname] = components
|
||||||
|
|
||||||
def write(self, path, sep='\r'):
|
def write(self, path, sep="\r"):
|
||||||
"""Writes out an AFM font to the given path."""
|
"""Writes out an AFM font to the given path."""
|
||||||
import time
|
import time
|
||||||
lines = [ "StartFontMetrics 2.0",
|
|
||||||
"Comment Generated by afmLib; at %s" % (
|
lines = [
|
||||||
time.strftime("%m/%d/%Y %H:%M:%S",
|
"StartFontMetrics 2.0",
|
||||||
time.localtime(time.time())))]
|
"Comment Generated by afmLib; at %s"
|
||||||
|
% (time.strftime("%m/%d/%Y %H:%M:%S", time.localtime(time.time()))),
|
||||||
|
]
|
||||||
|
|
||||||
# write comments, assuming (possibly wrongly!) they should
|
# write comments, assuming (possibly wrongly!) they should
|
||||||
# all appear at the top
|
# all appear at the top
|
||||||
@ -267,19 +267,25 @@ class AFM(object):
|
|||||||
|
|
||||||
# write char metrics
|
# write char metrics
|
||||||
lines.append("StartCharMetrics " + repr(len(self._chars)))
|
lines.append("StartCharMetrics " + repr(len(self._chars)))
|
||||||
items = [(charnum, (charname, width, box)) for charname, (charnum, width, box) in self._chars.items()]
|
items = [
|
||||||
|
(charnum, (charname, width, box))
|
||||||
|
for charname, (charnum, width, box) in self._chars.items()
|
||||||
|
]
|
||||||
|
|
||||||
def myKey(a):
|
def myKey(a):
|
||||||
"""Custom key function to make sure unencoded chars (-1)
|
"""Custom key function to make sure unencoded chars (-1)
|
||||||
end up at the end of the list after sorting."""
|
end up at the end of the list after sorting."""
|
||||||
if a[0] == -1:
|
if a[0] == -1:
|
||||||
a = (0xffff,) + a[1:] # 0xffff is an arbitrary large number
|
a = (0xFFFF,) + a[1:] # 0xffff is an arbitrary large number
|
||||||
return a
|
return a
|
||||||
|
|
||||||
items.sort(key=myKey)
|
items.sort(key=myKey)
|
||||||
|
|
||||||
for charnum, (charname, width, (l, b, r, t)) in items:
|
for charnum, (charname, width, (l, b, r, t)) in items:
|
||||||
lines.append("C %d ; WX %d ; N %s ; B %d %d %d %d ;" %
|
lines.append(
|
||||||
(charnum, width, charname, l, b, r, t))
|
"C %d ; WX %d ; N %s ; B %d %d %d %d ;"
|
||||||
|
% (charnum, width, charname, l, b, r, t)
|
||||||
|
)
|
||||||
lines.append("EndCharMetrics")
|
lines.append("EndCharMetrics")
|
||||||
|
|
||||||
# write kerning info
|
# write kerning info
|
||||||
@ -394,9 +400,9 @@ class AFM(object):
|
|||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
if hasattr(self, "FullName"):
|
if hasattr(self, "FullName"):
|
||||||
return '<AFM object for %s>' % self.FullName
|
return "<AFM object for %s>" % self.FullName
|
||||||
else:
|
else:
|
||||||
return '<AFM object at %x>' % id(self)
|
return "<AFM object at %x>" % id(self)
|
||||||
|
|
||||||
|
|
||||||
def readlines(path):
|
def readlines(path):
|
||||||
@ -404,20 +410,22 @@ def readlines(path):
|
|||||||
data = f.read()
|
data = f.read()
|
||||||
return data.splitlines()
|
return data.splitlines()
|
||||||
|
|
||||||
def writelines(path, lines, sep='\r'):
|
|
||||||
|
def writelines(path, lines, sep="\r"):
|
||||||
with open(path, "w", encoding="ascii", newline=sep) as f:
|
with open(path, "w", encoding="ascii", newline=sep) as f:
|
||||||
f.write("\n".join(lines) + "\n")
|
f.write("\n".join(lines) + "\n")
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
import EasyDialogs
|
import EasyDialogs
|
||||||
|
|
||||||
path = EasyDialogs.AskFileForOpen()
|
path = EasyDialogs.AskFileForOpen()
|
||||||
if path:
|
if path:
|
||||||
afm = AFM(path)
|
afm = AFM(path)
|
||||||
char = 'A'
|
char = "A"
|
||||||
if afm.has_char(char):
|
if afm.has_char(char):
|
||||||
print(afm[char]) # print charnum, width and boundingbox
|
print(afm[char]) # print charnum, width and boundingbox
|
||||||
pair = ('A', 'V')
|
pair = ("A", "V")
|
||||||
if afm.has_kernpair(pair):
|
if afm.has_kernpair(pair):
|
||||||
print(afm[pair]) # print kerning value for pair
|
print(afm[pair]) # print kerning value for pair
|
||||||
print(afm.Version) # various other afm entries have become attributes
|
print(afm.Version) # various other afm entries have become attributes
|
||||||
|
@ -5061,10 +5061,12 @@ _aglfnText = """\
|
|||||||
class AGLError(Exception):
|
class AGLError(Exception):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
LEGACY_AGL2UV = {}
|
LEGACY_AGL2UV = {}
|
||||||
AGL2UV = {}
|
AGL2UV = {}
|
||||||
UV2AGL = {}
|
UV2AGL = {}
|
||||||
|
|
||||||
|
|
||||||
def _builddicts():
|
def _builddicts():
|
||||||
import re
|
import re
|
||||||
|
|
||||||
@ -5073,7 +5075,7 @@ def _builddicts():
|
|||||||
parseAGL_RE = re.compile("([A-Za-z0-9]+);((?:[0-9A-F]{4})(?: (?:[0-9A-F]{4}))*)$")
|
parseAGL_RE = re.compile("([A-Za-z0-9]+);((?:[0-9A-F]{4})(?: (?:[0-9A-F]{4}))*)$")
|
||||||
|
|
||||||
for line in lines:
|
for line in lines:
|
||||||
if not line or line[:1] == '#':
|
if not line or line[:1] == "#":
|
||||||
continue
|
continue
|
||||||
m = parseAGL_RE.match(line)
|
m = parseAGL_RE.match(line)
|
||||||
if not m:
|
if not m:
|
||||||
@ -5089,7 +5091,7 @@ def _builddicts():
|
|||||||
parseAGLFN_RE = re.compile("([0-9A-F]{4});([A-Za-z0-9]+);.*?$")
|
parseAGLFN_RE = re.compile("([0-9A-F]{4});([A-Za-z0-9]+);.*?$")
|
||||||
|
|
||||||
for line in lines:
|
for line in lines:
|
||||||
if not line or line[:1] == '#':
|
if not line or line[:1] == "#":
|
||||||
continue
|
continue
|
||||||
m = parseAGLFN_RE.match(line)
|
m = parseAGLFN_RE.match(line)
|
||||||
if not m:
|
if not m:
|
||||||
@ -5101,6 +5103,7 @@ def _builddicts():
|
|||||||
AGL2UV[glyphName] = unicode
|
AGL2UV[glyphName] = unicode
|
||||||
UV2AGL[unicode] = glyphName
|
UV2AGL[unicode] = glyphName
|
||||||
|
|
||||||
|
|
||||||
_builddicts()
|
_builddicts()
|
||||||
|
|
||||||
|
|
||||||
@ -5123,8 +5126,7 @@ def toUnicode(glyph, isZapfDingbats=False):
|
|||||||
# 3. Map each component to a character string according to the
|
# 3. Map each component to a character string according to the
|
||||||
# procedure below, and concatenate those strings; the result
|
# procedure below, and concatenate those strings; the result
|
||||||
# is the character string to which the glyph name is mapped.
|
# is the character string to which the glyph name is mapped.
|
||||||
result = [_glyphComponentToUnicode(c, isZapfDingbats)
|
result = [_glyphComponentToUnicode(c, isZapfDingbats) for c in components]
|
||||||
for c in components]
|
|
||||||
return "".join(result)
|
return "".join(result)
|
||||||
|
|
||||||
|
|
||||||
@ -5169,7 +5171,7 @@ def _glyphComponentToUnicode(component, isZapfDingbats):
|
|||||||
return uni
|
return uni
|
||||||
|
|
||||||
# Otherwise, map the component to an empty string.
|
# Otherwise, map the component to an empty string.
|
||||||
return ''
|
return ""
|
||||||
|
|
||||||
|
|
||||||
# https://github.com/adobe-type-tools/agl-aglfn/blob/master/zapfdingbats.txt
|
# https://github.com/adobe-type-tools/agl-aglfn/blob/master/zapfdingbats.txt
|
||||||
@ -5177,12 +5179,13 @@ _AGL_ZAPF_DINGBATS = (
|
|||||||
" ✁✂✄☎✆✝✞✟✠✡☛☞✌✍✎✏✑✒✓✔✕✖✗✘✙✚✛✜✢✣✤✥✦✧★✩✪✫✬✭✮✯✰✱✲✳✴✵✶✷✸✹✺✻✼✽✾✿❀"
|
" ✁✂✄☎✆✝✞✟✠✡☛☞✌✍✎✏✑✒✓✔✕✖✗✘✙✚✛✜✢✣✤✥✦✧★✩✪✫✬✭✮✯✰✱✲✳✴✵✶✷✸✹✺✻✼✽✾✿❀"
|
||||||
"❁❂❃❄❅❆❇❈❉❊❋●❍■❏❑▲▼◆❖ ◗❘❙❚❯❱❲❳❨❩❬❭❪❫❴❵❛❜❝❞❡❢❣❤✐❥❦❧♠♥♦♣ ✉✈✇"
|
"❁❂❃❄❅❆❇❈❉❊❋●❍■❏❑▲▼◆❖ ◗❘❙❚❯❱❲❳❨❩❬❭❪❫❴❵❛❜❝❞❡❢❣❤✐❥❦❧♠♥♦♣ ✉✈✇"
|
||||||
"①②③④⑤⑥⑦⑧⑨⑩❶❷❸❹❺❻❼❽❾❿➀➁➂➃➄➅➆➇➈➉➊➋➌➍➎➏➐➑➒➓➔→➣↔"
|
"①②③④⑤⑥⑦⑧⑨⑩❶❷❸❹❺❻❼❽❾❿➀➁➂➃➄➅➆➇➈➉➊➋➌➍➎➏➐➑➒➓➔→➣↔"
|
||||||
"↕➙➛➜➝➞➟➠➡➢➤➥➦➧➨➩➫➭➯➲➳➵➸➺➻➼➽➾➚➪➶➹➘➴➷➬➮➱✃❐❒❮❰")
|
"↕➙➛➜➝➞➟➠➡➢➤➥➦➧➨➩➫➭➯➲➳➵➸➺➻➼➽➾➚➪➶➹➘➴➷➬➮➱✃❐❒❮❰"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def _zapfDingbatsToUnicode(glyph):
|
def _zapfDingbatsToUnicode(glyph):
|
||||||
"""Helper for toUnicode()."""
|
"""Helper for toUnicode()."""
|
||||||
if len(glyph) < 2 or glyph[0] != 'a':
|
if len(glyph) < 2 or glyph[0] != "a":
|
||||||
return None
|
return None
|
||||||
try:
|
try:
|
||||||
gid = int(glyph[1:])
|
gid = int(glyph[1:])
|
||||||
@ -5191,7 +5194,7 @@ def _zapfDingbatsToUnicode(glyph):
|
|||||||
if gid < 0 or gid >= len(_AGL_ZAPF_DINGBATS):
|
if gid < 0 or gid >= len(_AGL_ZAPF_DINGBATS):
|
||||||
return None
|
return None
|
||||||
uchar = _AGL_ZAPF_DINGBATS[gid]
|
uchar = _AGL_ZAPF_DINGBATS[gid]
|
||||||
return uchar if uchar != ' ' else None
|
return uchar if uchar != " " else None
|
||||||
|
|
||||||
|
|
||||||
_re_uni = re.compile("^uni([0-9A-F]+)$")
|
_re_uni = re.compile("^uni([0-9A-F]+)$")
|
||||||
@ -5205,12 +5208,11 @@ def _uniToUnicode(component):
|
|||||||
digits = match.group(1)
|
digits = match.group(1)
|
||||||
if len(digits) % 4 != 0:
|
if len(digits) % 4 != 0:
|
||||||
return None
|
return None
|
||||||
chars = [int(digits[i : i + 4], 16)
|
chars = [int(digits[i : i + 4], 16) for i in range(0, len(digits), 4)]
|
||||||
for i in range(0, len(digits), 4)]
|
|
||||||
if any(c >= 0xD800 and c <= 0xDFFF for c in chars):
|
if any(c >= 0xD800 and c <= 0xDFFF for c in chars):
|
||||||
# The AGL specification explicitly excluded surrogate pairs.
|
# The AGL specification explicitly excluded surrogate pairs.
|
||||||
return None
|
return None
|
||||||
return ''.join([chr(c) for c in chars])
|
return "".join([chr(c) for c in chars])
|
||||||
|
|
||||||
|
|
||||||
_re_u = re.compile("^u([0-9A-F]{4,6})$")
|
_re_u = re.compile("^u([0-9A-F]{4,6})$")
|
||||||
@ -5226,7 +5228,6 @@ def _uToUnicode(component):
|
|||||||
value = int(digits, 16)
|
value = int(digits, 16)
|
||||||
except ValueError:
|
except ValueError:
|
||||||
return None
|
return None
|
||||||
if ((value >= 0x0000 and value <= 0xD7FF) or
|
if (value >= 0x0000 and value <= 0xD7FF) or (value >= 0xE000 and value <= 0x10FFFF):
|
||||||
(value >= 0xE000 and value <= 0x10FFFF)):
|
|
||||||
return chr(value)
|
return chr(value)
|
||||||
return None
|
return None
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -33,7 +33,7 @@ def stringToProgram(string):
|
|||||||
|
|
||||||
|
|
||||||
def programToString(program):
|
def programToString(program):
|
||||||
return ' '.join(str(x) for x in program)
|
return " ".join(str(x) for x in program)
|
||||||
|
|
||||||
|
|
||||||
def programToCommands(program, getNumRegions=None):
|
def programToCommands(program, getNumRegions=None):
|
||||||
@ -73,7 +73,7 @@ def programToCommands(program, getNumRegions=None):
|
|||||||
stack.append(token)
|
stack.append(token)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if token == 'blend':
|
if token == "blend":
|
||||||
assert getNumRegions is not None
|
assert getNumRegions is not None
|
||||||
numSourceFonts = 1 + getNumRegions(vsIndex)
|
numSourceFonts = 1 + getNumRegions(vsIndex)
|
||||||
# replace the blend op args on the stack with a single list
|
# replace the blend op args on the stack with a single list
|
||||||
@ -87,16 +87,24 @@ def programToCommands(program, getNumRegions=None):
|
|||||||
# if a blend op exists, this is or will be a CFF2 charstring.
|
# if a blend op exists, this is or will be a CFF2 charstring.
|
||||||
continue
|
continue
|
||||||
|
|
||||||
elif token == 'vsindex':
|
elif token == "vsindex":
|
||||||
vsIndex = stack[-1]
|
vsIndex = stack[-1]
|
||||||
assert type(vsIndex) is int
|
assert type(vsIndex) is int
|
||||||
|
|
||||||
elif (not seenWidthOp) and token in {'hstem', 'hstemhm', 'vstem', 'vstemhm',
|
elif (not seenWidthOp) and token in {
|
||||||
'cntrmask', 'hintmask',
|
"hstem",
|
||||||
'hmoveto', 'vmoveto', 'rmoveto',
|
"hstemhm",
|
||||||
'endchar'}:
|
"vstem",
|
||||||
|
"vstemhm",
|
||||||
|
"cntrmask",
|
||||||
|
"hintmask",
|
||||||
|
"hmoveto",
|
||||||
|
"vmoveto",
|
||||||
|
"rmoveto",
|
||||||
|
"endchar",
|
||||||
|
}:
|
||||||
seenWidthOp = True
|
seenWidthOp = True
|
||||||
parity = token in {'hmoveto', 'vmoveto'}
|
parity = token in {"hmoveto", "vmoveto"}
|
||||||
if lenBlendStack:
|
if lenBlendStack:
|
||||||
# lenBlendStack has the number of args represented by the last blend
|
# lenBlendStack has the number of args represented by the last blend
|
||||||
# arg and all the preceding args. We need to now add the number of
|
# arg and all the preceding args. We need to now add the number of
|
||||||
@ -106,18 +114,18 @@ def programToCommands(program, getNumRegions=None):
|
|||||||
numArgs = len(stack)
|
numArgs = len(stack)
|
||||||
if numArgs and (numArgs % 2) ^ parity:
|
if numArgs and (numArgs % 2) ^ parity:
|
||||||
width = stack.pop(0)
|
width = stack.pop(0)
|
||||||
commands.append(('', [width]))
|
commands.append(("", [width]))
|
||||||
|
|
||||||
if token in {'hintmask', 'cntrmask'}:
|
if token in {"hintmask", "cntrmask"}:
|
||||||
if stack:
|
if stack:
|
||||||
commands.append(('', stack))
|
commands.append(("", stack))
|
||||||
commands.append((token, []))
|
commands.append((token, []))
|
||||||
commands.append(('', [next(it)]))
|
commands.append(("", [next(it)]))
|
||||||
else:
|
else:
|
||||||
commands.append((token, stack))
|
commands.append((token, stack))
|
||||||
stack = []
|
stack = []
|
||||||
if stack:
|
if stack:
|
||||||
commands.append(('', stack))
|
commands.append(("", stack))
|
||||||
return commands
|
return commands
|
||||||
|
|
||||||
|
|
||||||
@ -126,11 +134,12 @@ def _flattenBlendArgs(args):
|
|||||||
for arg in args:
|
for arg in args:
|
||||||
if isinstance(arg, list):
|
if isinstance(arg, list):
|
||||||
token_list.extend(arg)
|
token_list.extend(arg)
|
||||||
token_list.append('blend')
|
token_list.append("blend")
|
||||||
else:
|
else:
|
||||||
token_list.append(arg)
|
token_list.append(arg)
|
||||||
return token_list
|
return token_list
|
||||||
|
|
||||||
|
|
||||||
def commandsToProgram(commands):
|
def commandsToProgram(commands):
|
||||||
"""Takes a commands list as returned by programToCommands() and converts
|
"""Takes a commands list as returned by programToCommands() and converts
|
||||||
it back to a T2CharString program list."""
|
it back to a T2CharString program list."""
|
||||||
@ -146,75 +155,93 @@ def commandsToProgram(commands):
|
|||||||
|
|
||||||
def _everyN(el, n):
|
def _everyN(el, n):
|
||||||
"""Group the list el into groups of size n"""
|
"""Group the list el into groups of size n"""
|
||||||
if len(el) % n != 0: raise ValueError(el)
|
if len(el) % n != 0:
|
||||||
|
raise ValueError(el)
|
||||||
for i in range(0, len(el), n):
|
for i in range(0, len(el), n):
|
||||||
yield el[i : i + n]
|
yield el[i : i + n]
|
||||||
|
|
||||||
|
|
||||||
class _GeneralizerDecombinerCommandsMap(object):
|
class _GeneralizerDecombinerCommandsMap(object):
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def rmoveto(args):
|
def rmoveto(args):
|
||||||
if len(args) != 2: raise ValueError(args)
|
if len(args) != 2:
|
||||||
yield ('rmoveto', args)
|
raise ValueError(args)
|
||||||
|
yield ("rmoveto", args)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def hmoveto(args):
|
def hmoveto(args):
|
||||||
if len(args) != 1: raise ValueError(args)
|
if len(args) != 1:
|
||||||
yield ('rmoveto', [args[0], 0])
|
raise ValueError(args)
|
||||||
|
yield ("rmoveto", [args[0], 0])
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def vmoveto(args):
|
def vmoveto(args):
|
||||||
if len(args) != 1: raise ValueError(args)
|
if len(args) != 1:
|
||||||
yield ('rmoveto', [0, args[0]])
|
raise ValueError(args)
|
||||||
|
yield ("rmoveto", [0, args[0]])
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def rlineto(args):
|
def rlineto(args):
|
||||||
if not args: raise ValueError(args)
|
if not args:
|
||||||
|
raise ValueError(args)
|
||||||
for args in _everyN(args, 2):
|
for args in _everyN(args, 2):
|
||||||
yield ('rlineto', args)
|
yield ("rlineto", args)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def hlineto(args):
|
def hlineto(args):
|
||||||
if not args: raise ValueError(args)
|
if not args:
|
||||||
|
raise ValueError(args)
|
||||||
it = iter(args)
|
it = iter(args)
|
||||||
try:
|
try:
|
||||||
while True:
|
while True:
|
||||||
yield ('rlineto', [next(it), 0])
|
yield ("rlineto", [next(it), 0])
|
||||||
yield ('rlineto', [0, next(it)])
|
yield ("rlineto", [0, next(it)])
|
||||||
except StopIteration:
|
except StopIteration:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def vlineto(args):
|
def vlineto(args):
|
||||||
if not args: raise ValueError(args)
|
if not args:
|
||||||
|
raise ValueError(args)
|
||||||
it = iter(args)
|
it = iter(args)
|
||||||
try:
|
try:
|
||||||
while True:
|
while True:
|
||||||
yield ('rlineto', [0, next(it)])
|
yield ("rlineto", [0, next(it)])
|
||||||
yield ('rlineto', [next(it), 0])
|
yield ("rlineto", [next(it), 0])
|
||||||
except StopIteration:
|
except StopIteration:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def rrcurveto(args):
|
def rrcurveto(args):
|
||||||
if not args: raise ValueError(args)
|
if not args:
|
||||||
|
raise ValueError(args)
|
||||||
for args in _everyN(args, 6):
|
for args in _everyN(args, 6):
|
||||||
yield ('rrcurveto', args)
|
yield ("rrcurveto", args)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def hhcurveto(args):
|
def hhcurveto(args):
|
||||||
if len(args) < 4 or len(args) % 4 > 1: raise ValueError(args)
|
if len(args) < 4 or len(args) % 4 > 1:
|
||||||
|
raise ValueError(args)
|
||||||
if len(args) % 2 == 1:
|
if len(args) % 2 == 1:
|
||||||
yield ('rrcurveto', [args[1], args[0], args[2], args[3], args[4], 0])
|
yield ("rrcurveto", [args[1], args[0], args[2], args[3], args[4], 0])
|
||||||
args = args[5:]
|
args = args[5:]
|
||||||
for args in _everyN(args, 4):
|
for args in _everyN(args, 4):
|
||||||
yield ('rrcurveto', [args[0], 0, args[1], args[2], args[3], 0])
|
yield ("rrcurveto", [args[0], 0, args[1], args[2], args[3], 0])
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def vvcurveto(args):
|
def vvcurveto(args):
|
||||||
if len(args) < 4 or len(args) % 4 > 1: raise ValueError(args)
|
if len(args) < 4 or len(args) % 4 > 1:
|
||||||
|
raise ValueError(args)
|
||||||
if len(args) % 2 == 1:
|
if len(args) % 2 == 1:
|
||||||
yield ('rrcurveto', [args[0], args[1], args[2], args[3], 0, args[4]])
|
yield ("rrcurveto", [args[0], args[1], args[2], args[3], 0, args[4]])
|
||||||
args = args[5:]
|
args = args[5:]
|
||||||
for args in _everyN(args, 4):
|
for args in _everyN(args, 4):
|
||||||
yield ('rrcurveto', [0, args[0], args[1], args[2], 0, args[3]])
|
yield ("rrcurveto", [0, args[0], args[1], args[2], 0, args[3]])
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def hvcurveto(args):
|
def hvcurveto(args):
|
||||||
if len(args) < 4 or len(args) % 8 not in {0,1,4,5}: raise ValueError(args)
|
if len(args) < 4 or len(args) % 8 not in {0, 1, 4, 5}:
|
||||||
|
raise ValueError(args)
|
||||||
last_args = None
|
last_args = None
|
||||||
if len(args) % 2 == 1:
|
if len(args) % 2 == 1:
|
||||||
lastStraight = len(args) % 8 == 5
|
lastStraight = len(args) % 8 == 5
|
||||||
@ -223,20 +250,22 @@ class _GeneralizerDecombinerCommandsMap(object):
|
|||||||
try:
|
try:
|
||||||
while True:
|
while True:
|
||||||
args = next(it)
|
args = next(it)
|
||||||
yield ('rrcurveto', [args[0], 0, args[1], args[2], 0, args[3]])
|
yield ("rrcurveto", [args[0], 0, args[1], args[2], 0, args[3]])
|
||||||
args = next(it)
|
args = next(it)
|
||||||
yield ('rrcurveto', [0, args[0], args[1], args[2], args[3], 0])
|
yield ("rrcurveto", [0, args[0], args[1], args[2], args[3], 0])
|
||||||
except StopIteration:
|
except StopIteration:
|
||||||
pass
|
pass
|
||||||
if last_args:
|
if last_args:
|
||||||
args = last_args
|
args = last_args
|
||||||
if lastStraight:
|
if lastStraight:
|
||||||
yield ('rrcurveto', [args[0], 0, args[1], args[2], args[4], args[3]])
|
yield ("rrcurveto", [args[0], 0, args[1], args[2], args[4], args[3]])
|
||||||
else:
|
else:
|
||||||
yield ('rrcurveto', [0, args[0], args[1], args[2], args[3], args[4]])
|
yield ("rrcurveto", [0, args[0], args[1], args[2], args[3], args[4]])
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def vhcurveto(args):
|
def vhcurveto(args):
|
||||||
if len(args) < 4 or len(args) % 8 not in {0,1,4,5}: raise ValueError(args)
|
if len(args) < 4 or len(args) % 8 not in {0, 1, 4, 5}:
|
||||||
|
raise ValueError(args)
|
||||||
last_args = None
|
last_args = None
|
||||||
if len(args) % 2 == 1:
|
if len(args) % 2 == 1:
|
||||||
lastStraight = len(args) % 8 == 5
|
lastStraight = len(args) % 8 == 5
|
||||||
@ -245,32 +274,36 @@ class _GeneralizerDecombinerCommandsMap(object):
|
|||||||
try:
|
try:
|
||||||
while True:
|
while True:
|
||||||
args = next(it)
|
args = next(it)
|
||||||
yield ('rrcurveto', [0, args[0], args[1], args[2], args[3], 0])
|
yield ("rrcurveto", [0, args[0], args[1], args[2], args[3], 0])
|
||||||
args = next(it)
|
args = next(it)
|
||||||
yield ('rrcurveto', [args[0], 0, args[1], args[2], 0, args[3]])
|
yield ("rrcurveto", [args[0], 0, args[1], args[2], 0, args[3]])
|
||||||
except StopIteration:
|
except StopIteration:
|
||||||
pass
|
pass
|
||||||
if last_args:
|
if last_args:
|
||||||
args = last_args
|
args = last_args
|
||||||
if lastStraight:
|
if lastStraight:
|
||||||
yield ('rrcurveto', [0, args[0], args[1], args[2], args[3], args[4]])
|
yield ("rrcurveto", [0, args[0], args[1], args[2], args[3], args[4]])
|
||||||
else:
|
else:
|
||||||
yield ('rrcurveto', [args[0], 0, args[1], args[2], args[4], args[3]])
|
yield ("rrcurveto", [args[0], 0, args[1], args[2], args[4], args[3]])
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def rcurveline(args):
|
def rcurveline(args):
|
||||||
if len(args) < 8 or len(args) % 6 != 2: raise ValueError(args)
|
if len(args) < 8 or len(args) % 6 != 2:
|
||||||
|
raise ValueError(args)
|
||||||
args, last_args = args[:-2], args[-2:]
|
args, last_args = args[:-2], args[-2:]
|
||||||
for args in _everyN(args, 6):
|
for args in _everyN(args, 6):
|
||||||
yield ('rrcurveto', args)
|
yield ("rrcurveto", args)
|
||||||
yield ('rlineto', last_args)
|
yield ("rlineto", last_args)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def rlinecurve(args):
|
def rlinecurve(args):
|
||||||
if len(args) < 8 or len(args) % 2 != 0: raise ValueError(args)
|
if len(args) < 8 or len(args) % 2 != 0:
|
||||||
|
raise ValueError(args)
|
||||||
args, last_args = args[:-6], args[-6:]
|
args, last_args = args[:-6], args[-6:]
|
||||||
for args in _everyN(args, 2):
|
for args in _everyN(args, 2):
|
||||||
yield ('rlineto', args)
|
yield ("rlineto", args)
|
||||||
yield ('rrcurveto', last_args)
|
yield ("rrcurveto", last_args)
|
||||||
|
|
||||||
|
|
||||||
def _convertBlendOpToArgs(blendList):
|
def _convertBlendOpToArgs(blendList):
|
||||||
# args is list of blend op args. Since we are supporting
|
# args is list of blend op args. Since we are supporting
|
||||||
@ -278,8 +311,11 @@ def _convertBlendOpToArgs(blendList):
|
|||||||
# be a list of blend op args, and need to be converted before
|
# be a list of blend op args, and need to be converted before
|
||||||
# we convert the current list.
|
# we convert the current list.
|
||||||
if any([isinstance(arg, list) for arg in blendList]):
|
if any([isinstance(arg, list) for arg in blendList]):
|
||||||
args = [i for e in blendList for i in
|
args = [
|
||||||
(_convertBlendOpToArgs(e) if isinstance(e,list) else [e]) ]
|
i
|
||||||
|
for e in blendList
|
||||||
|
for i in (_convertBlendOpToArgs(e) if isinstance(e, list) else [e])
|
||||||
|
]
|
||||||
else:
|
else:
|
||||||
args = blendList
|
args = blendList
|
||||||
|
|
||||||
@ -303,10 +339,13 @@ def _convertBlendOpToArgs(blendList):
|
|||||||
defaultArgs = [[arg] for arg in args[:numBlends]]
|
defaultArgs = [[arg] for arg in args[:numBlends]]
|
||||||
deltaArgs = args[numBlends:]
|
deltaArgs = args[numBlends:]
|
||||||
numDeltaValues = len(deltaArgs)
|
numDeltaValues = len(deltaArgs)
|
||||||
deltaList = [ deltaArgs[i:i + numRegions] for i in range(0, numDeltaValues, numRegions) ]
|
deltaList = [
|
||||||
|
deltaArgs[i : i + numRegions] for i in range(0, numDeltaValues, numRegions)
|
||||||
|
]
|
||||||
blend_args = [a + b + [1] for a, b in zip(defaultArgs, deltaList)]
|
blend_args = [a + b + [1] for a, b in zip(defaultArgs, deltaList)]
|
||||||
return blend_args
|
return blend_args
|
||||||
|
|
||||||
|
|
||||||
def generalizeCommands(commands, ignoreErrors=False):
|
def generalizeCommands(commands, ignoreErrors=False):
|
||||||
result = []
|
result = []
|
||||||
mapping = _GeneralizerDecombinerCommandsMap
|
mapping = _GeneralizerDecombinerCommandsMap
|
||||||
@ -314,13 +353,19 @@ def generalizeCommands(commands, ignoreErrors=False):
|
|||||||
# First, generalize any blend args in the arg list.
|
# First, generalize any blend args in the arg list.
|
||||||
if any([isinstance(arg, list) for arg in args]):
|
if any([isinstance(arg, list) for arg in args]):
|
||||||
try:
|
try:
|
||||||
args = [n for arg in args for n in (_convertBlendOpToArgs(arg) if isinstance(arg, list) else [arg])]
|
args = [
|
||||||
|
n
|
||||||
|
for arg in args
|
||||||
|
for n in (
|
||||||
|
_convertBlendOpToArgs(arg) if isinstance(arg, list) else [arg]
|
||||||
|
)
|
||||||
|
]
|
||||||
except ValueError:
|
except ValueError:
|
||||||
if ignoreErrors:
|
if ignoreErrors:
|
||||||
# Store op as data, such that consumers of commands do not have to
|
# Store op as data, such that consumers of commands do not have to
|
||||||
# deal with incorrect number of arguments.
|
# deal with incorrect number of arguments.
|
||||||
result.append(('', args))
|
result.append(("", args))
|
||||||
result.append(('', [op]))
|
result.append(("", [op]))
|
||||||
else:
|
else:
|
||||||
raise
|
raise
|
||||||
|
|
||||||
@ -335,14 +380,17 @@ def generalizeCommands(commands, ignoreErrors=False):
|
|||||||
if ignoreErrors:
|
if ignoreErrors:
|
||||||
# Store op as data, such that consumers of commands do not have to
|
# Store op as data, such that consumers of commands do not have to
|
||||||
# deal with incorrect number of arguments.
|
# deal with incorrect number of arguments.
|
||||||
result.append(('', args))
|
result.append(("", args))
|
||||||
result.append(('', [op]))
|
result.append(("", [op]))
|
||||||
else:
|
else:
|
||||||
raise
|
raise
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def generalizeProgram(program, getNumRegions=None, **kwargs):
|
def generalizeProgram(program, getNumRegions=None, **kwargs):
|
||||||
return commandsToProgram(generalizeCommands(programToCommands(program, getNumRegions), **kwargs))
|
return commandsToProgram(
|
||||||
|
generalizeCommands(programToCommands(program, getNumRegions), **kwargs)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def _categorizeVector(v):
|
def _categorizeVector(v):
|
||||||
@ -362,27 +410,35 @@ def _categorizeVector(v):
|
|||||||
"""
|
"""
|
||||||
if not v[0]:
|
if not v[0]:
|
||||||
if not v[1]:
|
if not v[1]:
|
||||||
return '0', v[:1]
|
return "0", v[:1]
|
||||||
else:
|
else:
|
||||||
return 'v', v[1:]
|
return "v", v[1:]
|
||||||
else:
|
else:
|
||||||
if not v[1]:
|
if not v[1]:
|
||||||
return 'h', v[:1]
|
return "h", v[:1]
|
||||||
else:
|
else:
|
||||||
return 'r', v
|
return "r", v
|
||||||
|
|
||||||
|
|
||||||
def _mergeCategories(a, b):
|
def _mergeCategories(a, b):
|
||||||
if a == '0': return b
|
if a == "0":
|
||||||
if b == '0': return a
|
return b
|
||||||
if a == b: return a
|
if b == "0":
|
||||||
|
return a
|
||||||
|
if a == b:
|
||||||
|
return a
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def _negateCategory(a):
|
def _negateCategory(a):
|
||||||
if a == 'h': return 'v'
|
if a == "h":
|
||||||
if a == 'v': return 'h'
|
return "v"
|
||||||
assert a in '0r'
|
if a == "v":
|
||||||
|
return "h"
|
||||||
|
assert a in "0r"
|
||||||
return a
|
return a
|
||||||
|
|
||||||
|
|
||||||
def _convertToBlendCmds(args):
|
def _convertToBlendCmds(args):
|
||||||
# return a list of blend commands, and
|
# return a list of blend commands, and
|
||||||
# the remaining non-blended args, if any.
|
# the remaining non-blended args, if any.
|
||||||
@ -435,6 +491,7 @@ def _convertToBlendCmds(args):
|
|||||||
|
|
||||||
return new_args
|
return new_args
|
||||||
|
|
||||||
|
|
||||||
def _addArgs(a, b):
|
def _addArgs(a, b):
|
||||||
if isinstance(b, list):
|
if isinstance(b, list):
|
||||||
if isinstance(a, list):
|
if isinstance(a, list):
|
||||||
@ -449,11 +506,13 @@ def _addArgs(a, b):
|
|||||||
return a + b
|
return a + b
|
||||||
|
|
||||||
|
|
||||||
def specializeCommands(commands,
|
def specializeCommands(
|
||||||
|
commands,
|
||||||
ignoreErrors=False,
|
ignoreErrors=False,
|
||||||
generalizeFirst=True,
|
generalizeFirst=True,
|
||||||
preserveTopology=False,
|
preserveTopology=False,
|
||||||
maxstack=48):
|
maxstack=48,
|
||||||
|
):
|
||||||
|
|
||||||
# We perform several rounds of optimizations. They are carefully ordered and are:
|
# We perform several rounds of optimizations. They are carefully ordered and are:
|
||||||
#
|
#
|
||||||
@ -487,7 +546,6 @@ def specializeCommands(commands,
|
|||||||
#
|
#
|
||||||
# 7. For any args which are blend lists, convert them to a blend command.
|
# 7. For any args which are blend lists, convert them to a blend command.
|
||||||
|
|
||||||
|
|
||||||
# 0. Generalize commands.
|
# 0. Generalize commands.
|
||||||
if generalizeFirst:
|
if generalizeFirst:
|
||||||
commands = generalizeCommands(commands, ignoreErrors=ignoreErrors)
|
commands = generalizeCommands(commands, ignoreErrors=ignoreErrors)
|
||||||
@ -496,9 +554,9 @@ def specializeCommands(commands,
|
|||||||
|
|
||||||
# 1. Combine successive rmoveto operations.
|
# 1. Combine successive rmoveto operations.
|
||||||
for i in range(len(commands) - 1, 0, -1):
|
for i in range(len(commands) - 1, 0, -1):
|
||||||
if 'rmoveto' == commands[i][0] == commands[i-1][0]:
|
if "rmoveto" == commands[i][0] == commands[i - 1][0]:
|
||||||
v1, v2 = commands[i - 1][1], commands[i][1]
|
v1, v2 = commands[i - 1][1], commands[i][1]
|
||||||
commands[i-1] = ('rmoveto', [v1[0]+v2[0], v1[1]+v2[1]])
|
commands[i - 1] = ("rmoveto", [v1[0] + v2[0], v1[1] + v2[1]])
|
||||||
del commands[i]
|
del commands[i]
|
||||||
|
|
||||||
# 2. Specialize rmoveto/rlineto/rrcurveto operators into horizontal/vertical variants.
|
# 2. Specialize rmoveto/rlineto/rrcurveto operators into horizontal/vertical variants.
|
||||||
@ -550,15 +608,15 @@ def specializeCommands(commands,
|
|||||||
for i in range(len(commands)):
|
for i in range(len(commands)):
|
||||||
op, args = commands[i]
|
op, args = commands[i]
|
||||||
|
|
||||||
if op in {'rmoveto', 'rlineto'}:
|
if op in {"rmoveto", "rlineto"}:
|
||||||
c, args = _categorizeVector(args)
|
c, args = _categorizeVector(args)
|
||||||
commands[i] = c + op[1:], args
|
commands[i] = c + op[1:], args
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if op == 'rrcurveto':
|
if op == "rrcurveto":
|
||||||
c1, args1 = _categorizeVector(args[:2])
|
c1, args1 = _categorizeVector(args[:2])
|
||||||
c2, args2 = _categorizeVector(args[-2:])
|
c2, args2 = _categorizeVector(args[-2:])
|
||||||
commands[i] = c1+c2+'curveto', args1+args[2:4]+args2
|
commands[i] = c1 + c2 + "curveto", args1 + args[2:4] + args2
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# 3. Merge or delete redundant operations, to the extent requested.
|
# 3. Merge or delete redundant operations, to the extent requested.
|
||||||
@ -581,22 +639,21 @@ def specializeCommands(commands,
|
|||||||
# For Type2 CharStrings the sequence is:
|
# For Type2 CharStrings the sequence is:
|
||||||
# w? {hs* vs* cm* hm* mt subpath}? {mt subpath}* endchar"
|
# w? {hs* vs* cm* hm* mt subpath}? {mt subpath}* endchar"
|
||||||
|
|
||||||
|
|
||||||
# Some other redundancies change topology (point numbers).
|
# Some other redundancies change topology (point numbers).
|
||||||
if not preserveTopology:
|
if not preserveTopology:
|
||||||
for i in range(len(commands) - 1, -1, -1):
|
for i in range(len(commands) - 1, -1, -1):
|
||||||
op, args = commands[i]
|
op, args = commands[i]
|
||||||
|
|
||||||
# A 00curveto is demoted to a (specialized) lineto.
|
# A 00curveto is demoted to a (specialized) lineto.
|
||||||
if op == '00curveto':
|
if op == "00curveto":
|
||||||
assert len(args) == 4
|
assert len(args) == 4
|
||||||
c, args = _categorizeVector(args[1:3])
|
c, args = _categorizeVector(args[1:3])
|
||||||
op = c+'lineto'
|
op = c + "lineto"
|
||||||
commands[i] = op, args
|
commands[i] = op, args
|
||||||
# and then...
|
# and then...
|
||||||
|
|
||||||
# A 0lineto can be deleted.
|
# A 0lineto can be deleted.
|
||||||
if op == '0lineto':
|
if op == "0lineto":
|
||||||
del commands[i]
|
del commands[i]
|
||||||
continue
|
continue
|
||||||
|
|
||||||
@ -604,8 +661,7 @@ def specializeCommands(commands,
|
|||||||
# In CFF2 charstrings from variable fonts, each
|
# In CFF2 charstrings from variable fonts, each
|
||||||
# arg item may be a list of blendable values, one from
|
# arg item may be a list of blendable values, one from
|
||||||
# each source font.
|
# each source font.
|
||||||
if (i and op in {'hlineto', 'vlineto'} and
|
if i and op in {"hlineto", "vlineto"} and (op == commands[i - 1][0]):
|
||||||
(op == commands[i-1][0])):
|
|
||||||
_, other_args = commands[i - 1]
|
_, other_args = commands[i - 1]
|
||||||
assert len(args) == 1 and len(other_args) == 1
|
assert len(args) == 1 and len(other_args) == 1
|
||||||
try:
|
try:
|
||||||
@ -622,25 +678,25 @@ def specializeCommands(commands,
|
|||||||
op, args = commands[i]
|
op, args = commands[i]
|
||||||
prv, nxt = commands[i - 1][0], commands[i + 1][0]
|
prv, nxt = commands[i - 1][0], commands[i + 1][0]
|
||||||
|
|
||||||
if op in {'0lineto', 'hlineto', 'vlineto'} and prv == nxt == 'rlineto':
|
if op in {"0lineto", "hlineto", "vlineto"} and prv == nxt == "rlineto":
|
||||||
assert len(args) == 1
|
assert len(args) == 1
|
||||||
args = [0, args[0]] if op[0] == 'v' else [args[0], 0]
|
args = [0, args[0]] if op[0] == "v" else [args[0], 0]
|
||||||
commands[i] = ('rlineto', args)
|
commands[i] = ("rlineto", args)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if op[2:] == 'curveto' and len(args) == 5 and prv == nxt == 'rrcurveto':
|
if op[2:] == "curveto" and len(args) == 5 and prv == nxt == "rrcurveto":
|
||||||
assert (op[0] == 'r') ^ (op[1] == 'r')
|
assert (op[0] == "r") ^ (op[1] == "r")
|
||||||
if op[0] == 'v':
|
if op[0] == "v":
|
||||||
pos = 0
|
pos = 0
|
||||||
elif op[0] != 'r':
|
elif op[0] != "r":
|
||||||
pos = 1
|
pos = 1
|
||||||
elif op[1] == 'v':
|
elif op[1] == "v":
|
||||||
pos = 4
|
pos = 4
|
||||||
else:
|
else:
|
||||||
pos = 5
|
pos = 5
|
||||||
# Insert, while maintaining the type of args (can be tuple or list).
|
# Insert, while maintaining the type of args (can be tuple or list).
|
||||||
args = args[:pos] + type(args)((0,)) + args[pos:]
|
args = args[:pos] + type(args)((0,)) + args[pos:]
|
||||||
commands[i] = ('rrcurveto', args)
|
commands[i] = ("rrcurveto", args)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# 5. Combine adjacent operators when possible, minding not to go over max stack size.
|
# 5. Combine adjacent operators when possible, minding not to go over max stack size.
|
||||||
@ -650,42 +706,46 @@ def specializeCommands(commands,
|
|||||||
new_op = None
|
new_op = None
|
||||||
|
|
||||||
# Merge logic...
|
# Merge logic...
|
||||||
if {op1, op2} <= {'rlineto', 'rrcurveto'}:
|
if {op1, op2} <= {"rlineto", "rrcurveto"}:
|
||||||
if op1 == op2:
|
if op1 == op2:
|
||||||
new_op = op1
|
new_op = op1
|
||||||
else:
|
else:
|
||||||
if op2 == 'rrcurveto' and len(args2) == 6:
|
if op2 == "rrcurveto" and len(args2) == 6:
|
||||||
new_op = 'rlinecurve'
|
new_op = "rlinecurve"
|
||||||
elif len(args2) == 2:
|
elif len(args2) == 2:
|
||||||
new_op = 'rcurveline'
|
new_op = "rcurveline"
|
||||||
|
|
||||||
elif (op1, op2) in {('rlineto', 'rlinecurve'), ('rrcurveto', 'rcurveline')}:
|
elif (op1, op2) in {("rlineto", "rlinecurve"), ("rrcurveto", "rcurveline")}:
|
||||||
new_op = op2
|
new_op = op2
|
||||||
|
|
||||||
elif {op1, op2} == {'vlineto', 'hlineto'}:
|
elif {op1, op2} == {"vlineto", "hlineto"}:
|
||||||
new_op = op1
|
new_op = op1
|
||||||
|
|
||||||
elif 'curveto' == op1[2:] == op2[2:]:
|
elif "curveto" == op1[2:] == op2[2:]:
|
||||||
d0, d1 = op1[:2]
|
d0, d1 = op1[:2]
|
||||||
d2, d3 = op2[:2]
|
d2, d3 = op2[:2]
|
||||||
|
|
||||||
if d1 == 'r' or d2 == 'r' or d0 == d3 == 'r':
|
if d1 == "r" or d2 == "r" or d0 == d3 == "r":
|
||||||
continue
|
continue
|
||||||
|
|
||||||
d = _mergeCategories(d1, d2)
|
d = _mergeCategories(d1, d2)
|
||||||
if d is None: continue
|
if d is None:
|
||||||
if d0 == 'r':
|
continue
|
||||||
|
if d0 == "r":
|
||||||
d = _mergeCategories(d, d3)
|
d = _mergeCategories(d, d3)
|
||||||
if d is None: continue
|
if d is None:
|
||||||
new_op = 'r'+d+'curveto'
|
continue
|
||||||
elif d3 == 'r':
|
new_op = "r" + d + "curveto"
|
||||||
|
elif d3 == "r":
|
||||||
d0 = _mergeCategories(d0, _negateCategory(d))
|
d0 = _mergeCategories(d0, _negateCategory(d))
|
||||||
if d0 is None: continue
|
if d0 is None:
|
||||||
new_op = d0+'r'+'curveto'
|
continue
|
||||||
|
new_op = d0 + "r" + "curveto"
|
||||||
else:
|
else:
|
||||||
d0 = _mergeCategories(d0, d3)
|
d0 = _mergeCategories(d0, d3)
|
||||||
if d0 is None: continue
|
if d0 is None:
|
||||||
new_op = d0+d+'curveto'
|
continue
|
||||||
|
new_op = d0 + d + "curveto"
|
||||||
|
|
||||||
# Make sure the stack depth does not exceed (maxstack - 1), so
|
# Make sure the stack depth does not exceed (maxstack - 1), so
|
||||||
# that subroutinizer can insert subroutine calls at any point.
|
# that subroutinizer can insert subroutine calls at any point.
|
||||||
@ -697,31 +757,35 @@ def specializeCommands(commands,
|
|||||||
for i in range(len(commands)):
|
for i in range(len(commands)):
|
||||||
op, args = commands[i]
|
op, args = commands[i]
|
||||||
|
|
||||||
if op in {'0moveto', '0lineto'}:
|
if op in {"0moveto", "0lineto"}:
|
||||||
commands[i] = 'h'+op[1:], args
|
commands[i] = "h" + op[1:], args
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if op[2:] == 'curveto' and op[:2] not in {'rr', 'hh', 'vv', 'vh', 'hv'}:
|
if op[2:] == "curveto" and op[:2] not in {"rr", "hh", "vv", "vh", "hv"}:
|
||||||
op0, op1 = op[:2]
|
op0, op1 = op[:2]
|
||||||
if (op0 == 'r') ^ (op1 == 'r'):
|
if (op0 == "r") ^ (op1 == "r"):
|
||||||
assert len(args) % 2 == 1
|
assert len(args) % 2 == 1
|
||||||
if op0 == '0': op0 = 'h'
|
if op0 == "0":
|
||||||
if op1 == '0': op1 = 'h'
|
op0 = "h"
|
||||||
if op0 == 'r': op0 = op1
|
if op1 == "0":
|
||||||
if op1 == 'r': op1 = _negateCategory(op0)
|
op1 = "h"
|
||||||
assert {op0,op1} <= {'h','v'}, (op0, op1)
|
if op0 == "r":
|
||||||
|
op0 = op1
|
||||||
|
if op1 == "r":
|
||||||
|
op1 = _negateCategory(op0)
|
||||||
|
assert {op0, op1} <= {"h", "v"}, (op0, op1)
|
||||||
|
|
||||||
if len(args) % 2:
|
if len(args) % 2:
|
||||||
if op0 != op1: # vhcurveto / hvcurveto
|
if op0 != op1: # vhcurveto / hvcurveto
|
||||||
if (op0 == 'h') ^ (len(args) % 8 == 1):
|
if (op0 == "h") ^ (len(args) % 8 == 1):
|
||||||
# Swap last two args order
|
# Swap last two args order
|
||||||
args = args[:-2] + args[-1:] + args[-2:-1]
|
args = args[:-2] + args[-1:] + args[-2:-1]
|
||||||
else: # hhcurveto / vvcurveto
|
else: # hhcurveto / vvcurveto
|
||||||
if op0 == 'h': # hhcurveto
|
if op0 == "h": # hhcurveto
|
||||||
# Swap first two args order
|
# Swap first two args order
|
||||||
args = args[1:2] + args[:1] + args[2:]
|
args = args[1:2] + args[:1] + args[2:]
|
||||||
|
|
||||||
commands[i] = op0+op1+'curveto', args
|
commands[i] = op0 + op1 + "curveto", args
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# 7. For any series of args which are blend lists, convert the series to a single blend arg.
|
# 7. For any series of args which are blend lists, convert the series to a single blend arg.
|
||||||
@ -732,36 +796,55 @@ def specializeCommands(commands,
|
|||||||
|
|
||||||
return commands
|
return commands
|
||||||
|
|
||||||
|
|
||||||
def specializeProgram(program, getNumRegions=None, **kwargs):
|
def specializeProgram(program, getNumRegions=None, **kwargs):
|
||||||
return commandsToProgram(specializeCommands(programToCommands(program, getNumRegions), **kwargs))
|
return commandsToProgram(
|
||||||
|
specializeCommands(programToCommands(program, getNumRegions), **kwargs)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == "__main__":
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
if len(sys.argv) == 1:
|
if len(sys.argv) == 1:
|
||||||
import doctest
|
import doctest
|
||||||
|
|
||||||
sys.exit(doctest.testmod().failed)
|
sys.exit(doctest.testmod().failed)
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
"fonttools cffLib.specialer", description="CFF CharString generalizer/specializer")
|
"fonttools cffLib.specialer",
|
||||||
|
description="CFF CharString generalizer/specializer",
|
||||||
|
)
|
||||||
|
parser.add_argument("program", metavar="command", nargs="*", help="Commands.")
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"program", metavar="command", nargs="*", help="Commands.")
|
"--num-regions",
|
||||||
parser.add_argument(
|
metavar="NumRegions",
|
||||||
"--num-regions", metavar="NumRegions", nargs="*", default=None,
|
nargs="*",
|
||||||
help="Number of variable-font regions for blend opertaions.")
|
default=None,
|
||||||
|
help="Number of variable-font regions for blend opertaions.",
|
||||||
|
)
|
||||||
|
|
||||||
options = parser.parse_args(sys.argv[1:])
|
options = parser.parse_args(sys.argv[1:])
|
||||||
|
|
||||||
getNumRegions = None if options.num_regions is None else lambda vsIndex: int(options.num_regions[0 if vsIndex is None else vsIndex])
|
getNumRegions = (
|
||||||
|
None
|
||||||
|
if options.num_regions is None
|
||||||
|
else lambda vsIndex: int(options.num_regions[0 if vsIndex is None else vsIndex])
|
||||||
|
)
|
||||||
|
|
||||||
program = stringToProgram(options.program)
|
program = stringToProgram(options.program)
|
||||||
print("Program:"); print(programToString(program))
|
print("Program:")
|
||||||
|
print(programToString(program))
|
||||||
commands = programToCommands(program, getNumRegions)
|
commands = programToCommands(program, getNumRegions)
|
||||||
print("Commands:"); print(commands)
|
print("Commands:")
|
||||||
|
print(commands)
|
||||||
program2 = commandsToProgram(commands)
|
program2 = commandsToProgram(commands)
|
||||||
print("Program from commands:"); print(programToString(program2))
|
print("Program from commands:")
|
||||||
|
print(programToString(program2))
|
||||||
assert program == program2
|
assert program == program2
|
||||||
print("Generalized program:"); print(programToString(generalizeProgram(program, getNumRegions)))
|
print("Generalized program:")
|
||||||
print("Specialized program:"); print(programToString(specializeProgram(program, getNumRegions)))
|
print(programToString(generalizeProgram(program, getNumRegions)))
|
||||||
|
print("Specialized program:")
|
||||||
|
print(programToString(specializeProgram(program, getNumRegions)))
|
||||||
|
@ -16,9 +16,11 @@ from functools import reduce
|
|||||||
class missingdict(dict):
|
class missingdict(dict):
|
||||||
def __init__(self, missing_func):
|
def __init__(self, missing_func):
|
||||||
self.missing_func = missing_func
|
self.missing_func = missing_func
|
||||||
|
|
||||||
def __missing__(self, v):
|
def __missing__(self, v):
|
||||||
return self.missing_func(v)
|
return self.missing_func(v)
|
||||||
|
|
||||||
|
|
||||||
def cumSum(f, op=add, start=0, decreasing=False):
|
def cumSum(f, op=add, start=0, decreasing=False):
|
||||||
|
|
||||||
keys = sorted(f.keys())
|
keys = sorted(f.keys())
|
||||||
@ -42,9 +44,10 @@ def cumSum(f, op=add, start=0, decreasing=False):
|
|||||||
|
|
||||||
return out
|
return out
|
||||||
|
|
||||||
|
|
||||||
def byteCost(widths, default, nominal):
|
def byteCost(widths, default, nominal):
|
||||||
|
|
||||||
if not hasattr(widths, 'items'):
|
if not hasattr(widths, "items"):
|
||||||
d = defaultdict(int)
|
d = defaultdict(int)
|
||||||
for w in widths:
|
for w in widths:
|
||||||
d[w] += 1
|
d[w] += 1
|
||||||
@ -52,7 +55,8 @@ def byteCost(widths, default, nominal):
|
|||||||
|
|
||||||
cost = 0
|
cost = 0
|
||||||
for w, freq in widths.items():
|
for w, freq in widths.items():
|
||||||
if w == default: continue
|
if w == default:
|
||||||
|
continue
|
||||||
diff = abs(w - nominal)
|
diff = abs(w - nominal)
|
||||||
if diff <= 107:
|
if diff <= 107:
|
||||||
cost += freq
|
cost += freq
|
||||||
@ -98,7 +102,7 @@ def optimizeWidths(widths):
|
|||||||
|
|
||||||
This algorithm is linear in UPEM+numGlyphs."""
|
This algorithm is linear in UPEM+numGlyphs."""
|
||||||
|
|
||||||
if not hasattr(widths, 'items'):
|
if not hasattr(widths, "items"):
|
||||||
d = defaultdict(int)
|
d = defaultdict(int)
|
||||||
for w in widths:
|
for w in widths:
|
||||||
d[w] += 1
|
d[w] += 1
|
||||||
@ -115,13 +119,21 @@ def optimizeWidths(widths):
|
|||||||
cumMaxD = cumSum(widths, op=max, decreasing=True)
|
cumMaxD = cumSum(widths, op=max, decreasing=True)
|
||||||
|
|
||||||
# Cost per nominal choice, without default consideration.
|
# Cost per nominal choice, without default consideration.
|
||||||
nomnCostU = missingdict(lambda x: cumFrqU[x] + cumFrqU[x-108] + cumFrqU[x-1132]*3)
|
nomnCostU = missingdict(
|
||||||
nomnCostD = missingdict(lambda x: cumFrqD[x] + cumFrqD[x+108] + cumFrqD[x+1132]*3)
|
lambda x: cumFrqU[x] + cumFrqU[x - 108] + cumFrqU[x - 1132] * 3
|
||||||
|
)
|
||||||
|
nomnCostD = missingdict(
|
||||||
|
lambda x: cumFrqD[x] + cumFrqD[x + 108] + cumFrqD[x + 1132] * 3
|
||||||
|
)
|
||||||
nomnCost = missingdict(lambda x: nomnCostU[x] + nomnCostD[x] - widths[x])
|
nomnCost = missingdict(lambda x: nomnCostU[x] + nomnCostD[x] - widths[x])
|
||||||
|
|
||||||
# Cost-saving per nominal choice, by best default choice.
|
# Cost-saving per nominal choice, by best default choice.
|
||||||
dfltCostU = missingdict(lambda x: max(cumMaxU[x], cumMaxU[x-108]*2, cumMaxU[x-1132]*5))
|
dfltCostU = missingdict(
|
||||||
dfltCostD = missingdict(lambda x: max(cumMaxD[x], cumMaxD[x+108]*2, cumMaxD[x+1132]*5))
|
lambda x: max(cumMaxU[x], cumMaxU[x - 108] * 2, cumMaxU[x - 1132] * 5)
|
||||||
|
)
|
||||||
|
dfltCostD = missingdict(
|
||||||
|
lambda x: max(cumMaxD[x], cumMaxD[x + 108] * 2, cumMaxD[x + 1132] * 5)
|
||||||
|
)
|
||||||
dfltCost = missingdict(lambda x: max(dfltCostU[x], dfltCostD[x]))
|
dfltCost = missingdict(lambda x: max(dfltCostU[x], dfltCostD[x]))
|
||||||
|
|
||||||
# Combined cost per nominal choice.
|
# Combined cost per nominal choice.
|
||||||
@ -150,34 +162,48 @@ def optimizeWidths(widths):
|
|||||||
|
|
||||||
return default, nominal
|
return default, nominal
|
||||||
|
|
||||||
|
|
||||||
def main(args=None):
|
def main(args=None):
|
||||||
"""Calculate optimum defaultWidthX/nominalWidthX values"""
|
"""Calculate optimum defaultWidthX/nominalWidthX values"""
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
"fonttools cffLib.width",
|
"fonttools cffLib.width",
|
||||||
description=main.__doc__,
|
description=main.__doc__,
|
||||||
)
|
)
|
||||||
parser.add_argument('inputs', metavar='FILE', type=str, nargs='+',
|
parser.add_argument(
|
||||||
help="Input TTF files")
|
"inputs", metavar="FILE", type=str, nargs="+", help="Input TTF files"
|
||||||
parser.add_argument('-b', '--brute-force', dest="brute", action="store_true",
|
)
|
||||||
help="Use brute-force approach (VERY slow)")
|
parser.add_argument(
|
||||||
|
"-b",
|
||||||
|
"--brute-force",
|
||||||
|
dest="brute",
|
||||||
|
action="store_true",
|
||||||
|
help="Use brute-force approach (VERY slow)",
|
||||||
|
)
|
||||||
|
|
||||||
args = parser.parse_args(args)
|
args = parser.parse_args(args)
|
||||||
|
|
||||||
for fontfile in args.inputs:
|
for fontfile in args.inputs:
|
||||||
font = TTFont(fontfile)
|
font = TTFont(fontfile)
|
||||||
hmtx = font['hmtx']
|
hmtx = font["hmtx"]
|
||||||
widths = [m[0] for m in hmtx.metrics.values()]
|
widths = [m[0] for m in hmtx.metrics.values()]
|
||||||
if args.brute:
|
if args.brute:
|
||||||
default, nominal = optimizeWidthsBruteforce(widths)
|
default, nominal = optimizeWidthsBruteforce(widths)
|
||||||
else:
|
else:
|
||||||
default, nominal = optimizeWidths(widths)
|
default, nominal = optimizeWidths(widths)
|
||||||
print("glyphs=%d default=%d nominal=%d byteCost=%d" % (len(widths), default, nominal, byteCost(widths, default, nominal)))
|
print(
|
||||||
|
"glyphs=%d default=%d nominal=%d byteCost=%d"
|
||||||
|
% (len(widths), default, nominal, byteCost(widths, default, nominal))
|
||||||
|
)
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
|
if __name__ == "__main__":
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
if len(sys.argv) == 1:
|
if len(sys.argv) == 1:
|
||||||
import doctest
|
import doctest
|
||||||
|
|
||||||
sys.exit(doctest.testmod().failed)
|
sys.exit(doctest.testmod().failed)
|
||||||
main()
|
main()
|
||||||
|
@ -1,3 +1,2 @@
|
|||||||
|
|
||||||
class ColorLibError(Exception):
|
class ColorLibError(Exception):
|
||||||
pass
|
pass
|
||||||
|
@ -67,9 +67,7 @@ def _split_format(cls, source):
|
|||||||
assert isinstance(
|
assert isinstance(
|
||||||
fmt, collections.abc.Hashable
|
fmt, collections.abc.Hashable
|
||||||
), f"{cls} Format is not hashable: {fmt!r}"
|
), f"{cls} Format is not hashable: {fmt!r}"
|
||||||
assert (
|
assert fmt in cls.convertersByName, f"{cls} invalid Format: {fmt!r}"
|
||||||
fmt in cls.convertersByName
|
|
||||||
), f"{cls} invalid Format: {fmt!r}"
|
|
||||||
|
|
||||||
return fmt, remainder
|
return fmt, remainder
|
||||||
|
|
||||||
|
@ -6,44 +6,52 @@ import timeit
|
|||||||
|
|
||||||
MAX_ERR = 5
|
MAX_ERR = 5
|
||||||
|
|
||||||
|
|
||||||
def generate_curve():
|
def generate_curve():
|
||||||
return [
|
return [
|
||||||
tuple(float(random.randint(0, 2048)) for coord in range(2))
|
tuple(float(random.randint(0, 2048)) for coord in range(2))
|
||||||
for point in range(4)]
|
for point in range(4)
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
def setup_curve_to_quadratic():
|
def setup_curve_to_quadratic():
|
||||||
return generate_curve(), MAX_ERR
|
return generate_curve(), MAX_ERR
|
||||||
|
|
||||||
|
|
||||||
def setup_curves_to_quadratic():
|
def setup_curves_to_quadratic():
|
||||||
num_curves = 3
|
num_curves = 3
|
||||||
return (
|
return ([generate_curve() for curve in range(num_curves)], [MAX_ERR] * num_curves)
|
||||||
[generate_curve() for curve in range(num_curves)],
|
|
||||||
[MAX_ERR] * num_curves)
|
|
||||||
|
|
||||||
def run_benchmark(
|
def run_benchmark(
|
||||||
benchmark_module, module, function, setup_suffix='', repeat=5, number=1000):
|
benchmark_module, module, function, setup_suffix="", repeat=5, number=1000
|
||||||
setup_func = 'setup_' + function
|
):
|
||||||
|
setup_func = "setup_" + function
|
||||||
if setup_suffix:
|
if setup_suffix:
|
||||||
print('%s with %s:' % (function, setup_suffix), end='')
|
print("%s with %s:" % (function, setup_suffix), end="")
|
||||||
setup_func += '_' + setup_suffix
|
setup_func += "_" + setup_suffix
|
||||||
else:
|
else:
|
||||||
print('%s:' % function, end='')
|
print("%s:" % function, end="")
|
||||||
|
|
||||||
def wrapper(function, setup_func):
|
def wrapper(function, setup_func):
|
||||||
function = globals()[function]
|
function = globals()[function]
|
||||||
setup_func = globals()[setup_func]
|
setup_func = globals()[setup_func]
|
||||||
|
|
||||||
def wrapped():
|
def wrapped():
|
||||||
return function(*setup_func())
|
return function(*setup_func())
|
||||||
|
|
||||||
return wrapped
|
return wrapped
|
||||||
|
|
||||||
results = timeit.repeat(wrapper(function, setup_func), repeat=repeat, number=number)
|
results = timeit.repeat(wrapper(function, setup_func), repeat=repeat, number=number)
|
||||||
print('\t%5.1fus' % (min(results) * 1000000. / number))
|
print("\t%5.1fus" % (min(results) * 1000000.0 / number))
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
"""Benchmark the cu2qu algorithm performance."""
|
"""Benchmark the cu2qu algorithm performance."""
|
||||||
run_benchmark('cu2qu.benchmark', 'cu2qu', 'curve_to_quadratic')
|
run_benchmark("cu2qu.benchmark", "cu2qu", "curve_to_quadratic")
|
||||||
run_benchmark('cu2qu.benchmark', 'cu2qu', 'curves_to_quadratic')
|
run_benchmark("cu2qu.benchmark", "cu2qu", "curves_to_quadratic")
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == "__main__":
|
||||||
random.seed(1)
|
random.seed(1)
|
||||||
main()
|
main()
|
||||||
|
@ -37,7 +37,7 @@ def open_ufo(path):
|
|||||||
|
|
||||||
def _font_to_quadratic(input_path, output_path=None, **kwargs):
|
def _font_to_quadratic(input_path, output_path=None, **kwargs):
|
||||||
ufo = open_ufo(input_path)
|
ufo = open_ufo(input_path)
|
||||||
logger.info('Converting curves for %s', input_path)
|
logger.info("Converting curves for %s", input_path)
|
||||||
if font_to_quadratic(ufo, **kwargs):
|
if font_to_quadratic(ufo, **kwargs):
|
||||||
logger.info("Saving %s", output_path)
|
logger.info("Saving %s", output_path)
|
||||||
if output_path:
|
if output_path:
|
||||||
@ -67,13 +67,13 @@ def _copytree(input_path, output_path):
|
|||||||
def main(args=None):
|
def main(args=None):
|
||||||
"""Convert a UFO font from cubic to quadratic curves"""
|
"""Convert a UFO font from cubic to quadratic curves"""
|
||||||
parser = argparse.ArgumentParser(prog="cu2qu")
|
parser = argparse.ArgumentParser(prog="cu2qu")
|
||||||
parser.add_argument(
|
parser.add_argument("--version", action="version", version=fontTools.__version__)
|
||||||
"--version", action="version", version=fontTools.__version__)
|
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"infiles",
|
"infiles",
|
||||||
nargs="+",
|
nargs="+",
|
||||||
metavar="INPUT",
|
metavar="INPUT",
|
||||||
help="one or more input UFO source file(s).")
|
help="one or more input UFO source file(s).",
|
||||||
|
)
|
||||||
parser.add_argument("-v", "--verbose", action="count", default=0)
|
parser.add_argument("-v", "--verbose", action="count", default=0)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"-e",
|
"-e",
|
||||||
@ -81,19 +81,21 @@ def main(args=None):
|
|||||||
type=float,
|
type=float,
|
||||||
metavar="ERROR",
|
metavar="ERROR",
|
||||||
default=None,
|
default=None,
|
||||||
help="maxiumum approximation error measured in EM (default: 0.001)")
|
help="maxiumum approximation error measured in EM (default: 0.001)",
|
||||||
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--keep-direction",
|
"--keep-direction",
|
||||||
dest="reverse_direction",
|
dest="reverse_direction",
|
||||||
action="store_false",
|
action="store_false",
|
||||||
help="do not reverse the contour direction")
|
help="do not reverse the contour direction",
|
||||||
|
)
|
||||||
|
|
||||||
mode_parser = parser.add_mutually_exclusive_group()
|
mode_parser = parser.add_mutually_exclusive_group()
|
||||||
mode_parser.add_argument(
|
mode_parser.add_argument(
|
||||||
"-i",
|
"-i",
|
||||||
"--interpolatable",
|
"--interpolatable",
|
||||||
action="store_true",
|
action="store_true",
|
||||||
help="whether curve conversion should keep interpolation compatibility"
|
help="whether curve conversion should keep interpolation compatibility",
|
||||||
)
|
)
|
||||||
mode_parser.add_argument(
|
mode_parser.add_argument(
|
||||||
"-j",
|
"-j",
|
||||||
@ -103,7 +105,8 @@ def main(args=None):
|
|||||||
default=1,
|
default=1,
|
||||||
const=_cpu_count(),
|
const=_cpu_count(),
|
||||||
metavar="N",
|
metavar="N",
|
||||||
help="Convert using N multiple processes (default: %(default)s)")
|
help="Convert using N multiple processes (default: %(default)s)",
|
||||||
|
)
|
||||||
|
|
||||||
output_parser = parser.add_mutually_exclusive_group()
|
output_parser = parser.add_mutually_exclusive_group()
|
||||||
output_parser.add_argument(
|
output_parser.add_argument(
|
||||||
@ -111,14 +114,18 @@ def main(args=None):
|
|||||||
"--output-file",
|
"--output-file",
|
||||||
default=None,
|
default=None,
|
||||||
metavar="OUTPUT",
|
metavar="OUTPUT",
|
||||||
help=("output filename for the converted UFO. By default fonts are "
|
help=(
|
||||||
"modified in place. This only works with a single input."))
|
"output filename for the converted UFO. By default fonts are "
|
||||||
|
"modified in place. This only works with a single input."
|
||||||
|
),
|
||||||
|
)
|
||||||
output_parser.add_argument(
|
output_parser.add_argument(
|
||||||
"-d",
|
"-d",
|
||||||
"--output-dir",
|
"--output-dir",
|
||||||
default=None,
|
default=None,
|
||||||
metavar="DIRECTORY",
|
metavar="DIRECTORY",
|
||||||
help="output directory where to save converted UFOs")
|
help="output directory where to save converted UFOs",
|
||||||
|
)
|
||||||
|
|
||||||
options = parser.parse_args(args)
|
options = parser.parse_args(args)
|
||||||
|
|
||||||
@ -143,8 +150,7 @@ def main(args=None):
|
|||||||
elif not os.path.isdir(output_dir):
|
elif not os.path.isdir(output_dir):
|
||||||
parser.error("'%s' is not a directory" % output_dir)
|
parser.error("'%s' is not a directory" % output_dir)
|
||||||
output_paths = [
|
output_paths = [
|
||||||
os.path.join(output_dir, os.path.basename(p))
|
os.path.join(output_dir, os.path.basename(p)) for p in options.infiles
|
||||||
for p in options.infiles
|
|
||||||
]
|
]
|
||||||
elif options.output_file:
|
elif options.output_file:
|
||||||
output_paths = [options.output_file]
|
output_paths = [options.output_file]
|
||||||
@ -152,12 +158,14 @@ def main(args=None):
|
|||||||
# save in-place
|
# save in-place
|
||||||
output_paths = [None] * len(options.infiles)
|
output_paths = [None] * len(options.infiles)
|
||||||
|
|
||||||
kwargs = dict(dump_stats=options.verbose > 0,
|
kwargs = dict(
|
||||||
|
dump_stats=options.verbose > 0,
|
||||||
max_err_em=options.conversion_error,
|
max_err_em=options.conversion_error,
|
||||||
reverse_direction=options.reverse_direction)
|
reverse_direction=options.reverse_direction,
|
||||||
|
)
|
||||||
|
|
||||||
if options.interpolatable:
|
if options.interpolatable:
|
||||||
logger.info('Converting curves compatibly')
|
logger.info("Converting curves compatibly")
|
||||||
ufos = [open_ufo(infile) for infile in options.infiles]
|
ufos = [open_ufo(infile) for infile in options.infiles]
|
||||||
if fonts_to_quadratic(ufos, **kwargs):
|
if fonts_to_quadratic(ufos, **kwargs):
|
||||||
for ufo, output_path in zip(ufos, output_paths):
|
for ufo, output_path in zip(ufos, output_paths):
|
||||||
@ -171,11 +179,10 @@ def main(args=None):
|
|||||||
if output_path:
|
if output_path:
|
||||||
_copytree(input_path, output_path)
|
_copytree(input_path, output_path)
|
||||||
else:
|
else:
|
||||||
jobs = min(len(options.infiles),
|
jobs = min(len(options.infiles), options.jobs) if options.jobs > 1 else 1
|
||||||
options.jobs) if options.jobs > 1 else 1
|
|
||||||
if jobs > 1:
|
if jobs > 1:
|
||||||
func = partial(_font_to_quadratic, **kwargs)
|
func = partial(_font_to_quadratic, **kwargs)
|
||||||
logger.info('Running %d parallel processes', jobs)
|
logger.info("Running %d parallel processes", jobs)
|
||||||
with closing(mp.Pool(jobs)) as pool:
|
with closing(mp.Pool(jobs)) as pool:
|
||||||
pool.starmap(func, zip(options.infiles, output_paths))
|
pool.starmap(func, zip(options.infiles, output_paths))
|
||||||
else:
|
else:
|
||||||
|
@ -26,7 +26,7 @@ import math
|
|||||||
from .errors import Error as Cu2QuError, ApproxNotFoundError
|
from .errors import Error as Cu2QuError, ApproxNotFoundError
|
||||||
|
|
||||||
|
|
||||||
__all__ = ['curve_to_quadratic', 'curves_to_quadratic']
|
__all__ = ["curve_to_quadratic", "curves_to_quadratic"]
|
||||||
|
|
||||||
MAX_N = 100
|
MAX_N = 100
|
||||||
|
|
||||||
@ -61,7 +61,9 @@ def dot(v1, v2):
|
|||||||
@cython.cfunc
|
@cython.cfunc
|
||||||
@cython.inline
|
@cython.inline
|
||||||
@cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex)
|
@cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex)
|
||||||
@cython.locals(_1=cython.complex, _2=cython.complex, _3=cython.complex, _4=cython.complex)
|
@cython.locals(
|
||||||
|
_1=cython.complex, _2=cython.complex, _3=cython.complex, _4=cython.complex
|
||||||
|
)
|
||||||
def calc_cubic_points(a, b, c, d):
|
def calc_cubic_points(a, b, c, d):
|
||||||
_1 = d
|
_1 = d
|
||||||
_2 = (c / 3.0) + d
|
_2 = (c / 3.0) + d
|
||||||
@ -72,7 +74,9 @@ def calc_cubic_points(a, b, c, d):
|
|||||||
|
|
||||||
@cython.cfunc
|
@cython.cfunc
|
||||||
@cython.inline
|
@cython.inline
|
||||||
@cython.locals(p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex)
|
@cython.locals(
|
||||||
|
p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex
|
||||||
|
)
|
||||||
@cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex)
|
@cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex)
|
||||||
def calc_cubic_parameters(p0, p1, p2, p3):
|
def calc_cubic_parameters(p0, p1, p2, p3):
|
||||||
c = (p1 - p0) * 3.0
|
c = (p1 - p0) * 3.0
|
||||||
@ -83,7 +87,9 @@ def calc_cubic_parameters(p0, p1, p2, p3):
|
|||||||
|
|
||||||
|
|
||||||
@cython.cfunc
|
@cython.cfunc
|
||||||
@cython.locals(p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex)
|
@cython.locals(
|
||||||
|
p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex
|
||||||
|
)
|
||||||
def split_cubic_into_n_iter(p0, p1, p2, p3, n):
|
def split_cubic_into_n_iter(p0, p1, p2, p3, n):
|
||||||
"""Split a cubic Bezier into n equal parts.
|
"""Split a cubic Bezier into n equal parts.
|
||||||
|
|
||||||
@ -115,10 +121,20 @@ def split_cubic_into_n_iter(p0, p1, p2, p3, n):
|
|||||||
return _split_cubic_into_n_gen(p0, p1, p2, p3, n)
|
return _split_cubic_into_n_gen(p0, p1, p2, p3, n)
|
||||||
|
|
||||||
|
|
||||||
@cython.locals(p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex, n=cython.int)
|
@cython.locals(
|
||||||
|
p0=cython.complex,
|
||||||
|
p1=cython.complex,
|
||||||
|
p2=cython.complex,
|
||||||
|
p3=cython.complex,
|
||||||
|
n=cython.int,
|
||||||
|
)
|
||||||
@cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex)
|
@cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex)
|
||||||
@cython.locals(dt=cython.double, delta_2=cython.double, delta_3=cython.double, i=cython.int)
|
@cython.locals(
|
||||||
@cython.locals(a1=cython.complex, b1=cython.complex, c1=cython.complex, d1=cython.complex)
|
dt=cython.double, delta_2=cython.double, delta_3=cython.double, i=cython.int
|
||||||
|
)
|
||||||
|
@cython.locals(
|
||||||
|
a1=cython.complex, b1=cython.complex, c1=cython.complex, d1=cython.complex
|
||||||
|
)
|
||||||
def _split_cubic_into_n_gen(p0, p1, p2, p3, n):
|
def _split_cubic_into_n_gen(p0, p1, p2, p3, n):
|
||||||
a, b, c, d = calc_cubic_parameters(p0, p1, p2, p3)
|
a, b, c, d = calc_cubic_parameters(p0, p1, p2, p3)
|
||||||
dt = 1 / n
|
dt = 1 / n
|
||||||
@ -135,7 +151,9 @@ def _split_cubic_into_n_gen(p0, p1, p2, p3, n):
|
|||||||
yield calc_cubic_points(a1, b1, c1, d1)
|
yield calc_cubic_points(a1, b1, c1, d1)
|
||||||
|
|
||||||
|
|
||||||
@cython.locals(p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex)
|
@cython.locals(
|
||||||
|
p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex
|
||||||
|
)
|
||||||
@cython.locals(mid=cython.complex, deriv3=cython.complex)
|
@cython.locals(mid=cython.complex, deriv3=cython.complex)
|
||||||
def split_cubic_into_two(p0, p1, p2, p3):
|
def split_cubic_into_two(p0, p1, p2, p3):
|
||||||
"""Split a cubic Bezier into two equal parts.
|
"""Split a cubic Bezier into two equal parts.
|
||||||
@ -152,14 +170,27 @@ def split_cubic_into_two(p0, p1, p2, p3):
|
|||||||
tuple: Two cubic Beziers (each expressed as a tuple of four complex
|
tuple: Two cubic Beziers (each expressed as a tuple of four complex
|
||||||
values).
|
values).
|
||||||
"""
|
"""
|
||||||
mid = (p0 + 3 * (p1 + p2) + p3) * .125
|
mid = (p0 + 3 * (p1 + p2) + p3) * 0.125
|
||||||
deriv3 = (p3 + p2 - p1 - p0) * .125
|
deriv3 = (p3 + p2 - p1 - p0) * 0.125
|
||||||
return ((p0, (p0 + p1) * .5, mid - deriv3, mid),
|
return (
|
||||||
(mid, mid + deriv3, (p2 + p3) * .5, p3))
|
(p0, (p0 + p1) * 0.5, mid - deriv3, mid),
|
||||||
|
(mid, mid + deriv3, (p2 + p3) * 0.5, p3),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@cython.locals(p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex, _27=cython.double)
|
@cython.locals(
|
||||||
@cython.locals(mid1=cython.complex, deriv1=cython.complex, mid2=cython.complex, deriv2=cython.complex)
|
p0=cython.complex,
|
||||||
|
p1=cython.complex,
|
||||||
|
p2=cython.complex,
|
||||||
|
p3=cython.complex,
|
||||||
|
_27=cython.double,
|
||||||
|
)
|
||||||
|
@cython.locals(
|
||||||
|
mid1=cython.complex,
|
||||||
|
deriv1=cython.complex,
|
||||||
|
mid2=cython.complex,
|
||||||
|
deriv2=cython.complex,
|
||||||
|
)
|
||||||
def split_cubic_into_three(p0, p1, p2, p3, _27=1 / 27):
|
def split_cubic_into_three(p0, p1, p2, p3, _27=1 / 27):
|
||||||
"""Split a cubic Bezier into three equal parts.
|
"""Split a cubic Bezier into three equal parts.
|
||||||
|
|
||||||
@ -181,13 +212,21 @@ def split_cubic_into_three(p0, p1, p2, p3, _27=1/27):
|
|||||||
deriv1 = (p3 + 3 * p2 - 4 * p0) * _27
|
deriv1 = (p3 + 3 * p2 - 4 * p0) * _27
|
||||||
mid2 = (p0 + 6 * p1 + 12 * p2 + 8 * p3) * _27
|
mid2 = (p0 + 6 * p1 + 12 * p2 + 8 * p3) * _27
|
||||||
deriv2 = (4 * p3 - 3 * p1 - p0) * _27
|
deriv2 = (4 * p3 - 3 * p1 - p0) * _27
|
||||||
return ((p0, (2*p0 + p1) / 3.0, mid1 - deriv1, mid1),
|
return (
|
||||||
|
(p0, (2 * p0 + p1) / 3.0, mid1 - deriv1, mid1),
|
||||||
(mid1, mid1 + deriv1, mid2 - deriv2, mid2),
|
(mid1, mid1 + deriv1, mid2 - deriv2, mid2),
|
||||||
(mid2, mid2 + deriv2, (p2 + 2*p3) / 3.0, p3))
|
(mid2, mid2 + deriv2, (p2 + 2 * p3) / 3.0, p3),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@cython.returns(cython.complex)
|
@cython.returns(cython.complex)
|
||||||
@cython.locals(t=cython.double, p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex)
|
@cython.locals(
|
||||||
|
t=cython.double,
|
||||||
|
p0=cython.complex,
|
||||||
|
p1=cython.complex,
|
||||||
|
p2=cython.complex,
|
||||||
|
p3=cython.complex,
|
||||||
|
)
|
||||||
@cython.locals(_p1=cython.complex, _p2=cython.complex)
|
@cython.locals(_p1=cython.complex, _p2=cython.complex)
|
||||||
def cubic_approx_control(t, p0, p1, p2, p3):
|
def cubic_approx_control(t, p0, p1, p2, p3):
|
||||||
"""Approximate a cubic Bezier using a quadratic one.
|
"""Approximate a cubic Bezier using a quadratic one.
|
||||||
@ -235,7 +274,13 @@ def calc_intersect(a, b, c, d):
|
|||||||
|
|
||||||
@cython.cfunc
|
@cython.cfunc
|
||||||
@cython.returns(cython.int)
|
@cython.returns(cython.int)
|
||||||
@cython.locals(tolerance=cython.double, p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex)
|
@cython.locals(
|
||||||
|
tolerance=cython.double,
|
||||||
|
p0=cython.complex,
|
||||||
|
p1=cython.complex,
|
||||||
|
p2=cython.complex,
|
||||||
|
p3=cython.complex,
|
||||||
|
)
|
||||||
@cython.locals(mid=cython.complex, deriv3=cython.complex)
|
@cython.locals(mid=cython.complex, deriv3=cython.complex)
|
||||||
def cubic_farthest_fit_inside(p0, p1, p2, p3, tolerance):
|
def cubic_farthest_fit_inside(p0, p1, p2, p3, tolerance):
|
||||||
"""Check if a cubic Bezier lies within a given distance of the origin.
|
"""Check if a cubic Bezier lies within a given distance of the origin.
|
||||||
@ -260,17 +305,24 @@ def cubic_farthest_fit_inside(p0, p1, p2, p3, tolerance):
|
|||||||
return True
|
return True
|
||||||
|
|
||||||
# Split.
|
# Split.
|
||||||
mid = (p0 + 3 * (p1 + p2) + p3) * .125
|
mid = (p0 + 3 * (p1 + p2) + p3) * 0.125
|
||||||
if abs(mid) > tolerance:
|
if abs(mid) > tolerance:
|
||||||
return False
|
return False
|
||||||
deriv3 = (p3 + p2 - p1 - p0) * .125
|
deriv3 = (p3 + p2 - p1 - p0) * 0.125
|
||||||
return (cubic_farthest_fit_inside(p0, (p0+p1)*.5, mid-deriv3, mid, tolerance) and
|
return cubic_farthest_fit_inside(
|
||||||
cubic_farthest_fit_inside(mid, mid+deriv3, (p2+p3)*.5, p3, tolerance))
|
p0, (p0 + p1) * 0.5, mid - deriv3, mid, tolerance
|
||||||
|
) and cubic_farthest_fit_inside(mid, mid + deriv3, (p2 + p3) * 0.5, p3, tolerance)
|
||||||
|
|
||||||
|
|
||||||
@cython.cfunc
|
@cython.cfunc
|
||||||
@cython.locals(tolerance=cython.double, _2_3=cython.double)
|
@cython.locals(tolerance=cython.double, _2_3=cython.double)
|
||||||
@cython.locals(q1=cython.complex, c0=cython.complex, c1=cython.complex, c2=cython.complex, c3=cython.complex)
|
@cython.locals(
|
||||||
|
q1=cython.complex,
|
||||||
|
c0=cython.complex,
|
||||||
|
c1=cython.complex,
|
||||||
|
c2=cython.complex,
|
||||||
|
c3=cython.complex,
|
||||||
|
)
|
||||||
def cubic_approx_quadratic(cubic, tolerance, _2_3=2 / 3):
|
def cubic_approx_quadratic(cubic, tolerance, _2_3=2 / 3):
|
||||||
"""Approximate a cubic Bezier with a single quadratic within a given tolerance.
|
"""Approximate a cubic Bezier with a single quadratic within a given tolerance.
|
||||||
|
|
||||||
@ -294,10 +346,7 @@ def cubic_approx_quadratic(cubic, tolerance, _2_3=2/3):
|
|||||||
c3 = cubic[3]
|
c3 = cubic[3]
|
||||||
c1 = c0 + (q1 - c0) * _2_3
|
c1 = c0 + (q1 - c0) * _2_3
|
||||||
c2 = c3 + (q1 - c3) * _2_3
|
c2 = c3 + (q1 - c3) * _2_3
|
||||||
if not cubic_farthest_fit_inside(0,
|
if not cubic_farthest_fit_inside(0, c1 - cubic[1], c2 - cubic[2], 0, tolerance):
|
||||||
c1 - cubic[1],
|
|
||||||
c2 - cubic[2],
|
|
||||||
0, tolerance):
|
|
||||||
return None
|
return None
|
||||||
return c0, q1, c3
|
return c0, q1, c3
|
||||||
|
|
||||||
@ -305,8 +354,16 @@ def cubic_approx_quadratic(cubic, tolerance, _2_3=2/3):
|
|||||||
@cython.cfunc
|
@cython.cfunc
|
||||||
@cython.locals(n=cython.int, tolerance=cython.double, _2_3=cython.double)
|
@cython.locals(n=cython.int, tolerance=cython.double, _2_3=cython.double)
|
||||||
@cython.locals(i=cython.int)
|
@cython.locals(i=cython.int)
|
||||||
@cython.locals(c0=cython.complex, c1=cython.complex, c2=cython.complex, c3=cython.complex)
|
@cython.locals(
|
||||||
@cython.locals(q0=cython.complex, q1=cython.complex, next_q1=cython.complex, q2=cython.complex, d1=cython.complex)
|
c0=cython.complex, c1=cython.complex, c2=cython.complex, c3=cython.complex
|
||||||
|
)
|
||||||
|
@cython.locals(
|
||||||
|
q0=cython.complex,
|
||||||
|
q1=cython.complex,
|
||||||
|
next_q1=cython.complex,
|
||||||
|
q2=cython.complex,
|
||||||
|
d1=cython.complex,
|
||||||
|
)
|
||||||
def cubic_approx_spline(cubic, n, tolerance, _2_3=2 / 3):
|
def cubic_approx_spline(cubic, n, tolerance, _2_3=2 / 3):
|
||||||
"""Approximate a cubic Bezier curve with a spline of n quadratics.
|
"""Approximate a cubic Bezier curve with a spline of n quadratics.
|
||||||
|
|
||||||
@ -347,7 +404,7 @@ def cubic_approx_spline(cubic, n, tolerance, _2_3=2/3):
|
|||||||
next_cubic = next(cubics)
|
next_cubic = next(cubics)
|
||||||
next_q1 = cubic_approx_control(i / (n - 1), *next_cubic)
|
next_q1 = cubic_approx_control(i / (n - 1), *next_cubic)
|
||||||
spline.append(next_q1)
|
spline.append(next_q1)
|
||||||
q2 = (q1 + next_q1) * .5
|
q2 = (q1 + next_q1) * 0.5
|
||||||
else:
|
else:
|
||||||
q2 = c3
|
q2 = c3
|
||||||
|
|
||||||
@ -355,12 +412,9 @@ def cubic_approx_spline(cubic, n, tolerance, _2_3=2/3):
|
|||||||
d0 = d1
|
d0 = d1
|
||||||
d1 = q2 - c3
|
d1 = q2 - c3
|
||||||
|
|
||||||
if (abs(d1) > tolerance or
|
if abs(d1) > tolerance or not cubic_farthest_fit_inside(
|
||||||
not cubic_farthest_fit_inside(d0,
|
d0, q0 + (q1 - q0) * _2_3 - c1, q2 + (q1 - q2) * _2_3 - c2, d1, tolerance
|
||||||
q0 + (q1 - q0) * _2_3 - c1,
|
):
|
||||||
q2 + (q1 - q2) * _2_3 - c2,
|
|
||||||
d1,
|
|
||||||
tolerance)):
|
|
||||||
return None
|
return None
|
||||||
spline.append(cubic[3])
|
spline.append(cubic[3])
|
||||||
|
|
||||||
@ -394,7 +448,6 @@ def curve_to_quadratic(curve, max_err):
|
|||||||
raise ApproxNotFoundError(curve)
|
raise ApproxNotFoundError(curve)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@cython.locals(l=cython.int, last_i=cython.int, i=cython.int)
|
@cython.locals(l=cython.int, last_i=cython.int, i=cython.int)
|
||||||
def curves_to_quadratic(curves, max_errors):
|
def curves_to_quadratic(curves, max_errors):
|
||||||
"""Return quadratic Bezier splines approximating the input cubic Beziers.
|
"""Return quadratic Bezier splines approximating the input cubic Beziers.
|
||||||
@ -448,5 +501,3 @@ def curves_to_quadratic(curves, max_errors):
|
|||||||
return [[(s.real, s.imag) for s in spline] for spline in splines]
|
return [[(s.real, s.imag) for s in spline] for spline in splines]
|
||||||
|
|
||||||
raise ApproxNotFoundError(curves)
|
raise ApproxNotFoundError(curves)
|
||||||
|
|
||||||
|
|
||||||
|
@ -12,6 +12,7 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
|
|
||||||
class Error(Exception):
|
class Error(Exception):
|
||||||
"""Base Cu2Qu exception class for all other errors."""
|
"""Base Cu2Qu exception class for all other errors."""
|
||||||
|
|
||||||
|
@ -30,12 +30,15 @@ from fontTools.pens.reverseContourPen import ReverseContourPen
|
|||||||
|
|
||||||
from . import curves_to_quadratic
|
from . import curves_to_quadratic
|
||||||
from .errors import (
|
from .errors import (
|
||||||
UnequalZipLengthsError, IncompatibleSegmentNumberError,
|
UnequalZipLengthsError,
|
||||||
IncompatibleSegmentTypesError, IncompatibleGlyphsError,
|
IncompatibleSegmentNumberError,
|
||||||
IncompatibleFontsError)
|
IncompatibleSegmentTypesError,
|
||||||
|
IncompatibleGlyphsError,
|
||||||
|
IncompatibleFontsError,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
__all__ = ['fonts_to_quadratic', 'font_to_quadratic']
|
__all__ = ["fonts_to_quadratic", "font_to_quadratic"]
|
||||||
|
|
||||||
# The default approximation error below is a relative value (1/1000 of the EM square).
|
# The default approximation error below is a relative value (1/1000 of the EM square).
|
||||||
# Later on, we convert it to absolute font units by multiplying it by a font's UPEM
|
# Later on, we convert it to absolute font units by multiplying it by a font's UPEM
|
||||||
@ -47,6 +50,8 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
|
|
||||||
_zip = zip
|
_zip = zip
|
||||||
|
|
||||||
|
|
||||||
def zip(*args):
|
def zip(*args):
|
||||||
"""Ensure each argument to zip has the same length. Also make sure a list is
|
"""Ensure each argument to zip has the same length. Also make sure a list is
|
||||||
returned for python 2/3 compatibility.
|
returned for python 2/3 compatibility.
|
||||||
@ -69,27 +74,27 @@ class GetSegmentsPen(AbstractPen):
|
|||||||
self.segments = []
|
self.segments = []
|
||||||
|
|
||||||
def _add_segment(self, tag, *args):
|
def _add_segment(self, tag, *args):
|
||||||
if tag in ['move', 'line', 'qcurve', 'curve']:
|
if tag in ["move", "line", "qcurve", "curve"]:
|
||||||
self._last_pt = args[-1]
|
self._last_pt = args[-1]
|
||||||
self.segments.append((tag, args))
|
self.segments.append((tag, args))
|
||||||
|
|
||||||
def moveTo(self, pt):
|
def moveTo(self, pt):
|
||||||
self._add_segment('move', pt)
|
self._add_segment("move", pt)
|
||||||
|
|
||||||
def lineTo(self, pt):
|
def lineTo(self, pt):
|
||||||
self._add_segment('line', pt)
|
self._add_segment("line", pt)
|
||||||
|
|
||||||
def qCurveTo(self, *points):
|
def qCurveTo(self, *points):
|
||||||
self._add_segment('qcurve', self._last_pt, *points)
|
self._add_segment("qcurve", self._last_pt, *points)
|
||||||
|
|
||||||
def curveTo(self, *points):
|
def curveTo(self, *points):
|
||||||
self._add_segment('curve', self._last_pt, *points)
|
self._add_segment("curve", self._last_pt, *points)
|
||||||
|
|
||||||
def closePath(self):
|
def closePath(self):
|
||||||
self._add_segment('close')
|
self._add_segment("close")
|
||||||
|
|
||||||
def endPath(self):
|
def endPath(self):
|
||||||
self._add_segment('end')
|
self._add_segment("end")
|
||||||
|
|
||||||
def addComponent(self, glyphName, transformation):
|
def addComponent(self, glyphName, transformation):
|
||||||
pass
|
pass
|
||||||
@ -122,17 +127,17 @@ def _set_segments(glyph, segments, reverse_direction):
|
|||||||
if reverse_direction:
|
if reverse_direction:
|
||||||
pen = ReverseContourPen(pen)
|
pen = ReverseContourPen(pen)
|
||||||
for tag, args in segments:
|
for tag, args in segments:
|
||||||
if tag == 'move':
|
if tag == "move":
|
||||||
pen.moveTo(*args)
|
pen.moveTo(*args)
|
||||||
elif tag == 'line':
|
elif tag == "line":
|
||||||
pen.lineTo(*args)
|
pen.lineTo(*args)
|
||||||
elif tag == 'curve':
|
elif tag == "curve":
|
||||||
pen.curveTo(*args[1:])
|
pen.curveTo(*args[1:])
|
||||||
elif tag == 'qcurve':
|
elif tag == "qcurve":
|
||||||
pen.qCurveTo(*args[1:])
|
pen.qCurveTo(*args[1:])
|
||||||
elif tag == 'close':
|
elif tag == "close":
|
||||||
pen.closePath()
|
pen.closePath()
|
||||||
elif tag == 'end':
|
elif tag == "end":
|
||||||
pen.endPath()
|
pen.endPath()
|
||||||
else:
|
else:
|
||||||
raise AssertionError('Unhandled segment type "%s"' % tag)
|
raise AssertionError('Unhandled segment type "%s"' % tag)
|
||||||
@ -141,16 +146,16 @@ def _set_segments(glyph, segments, reverse_direction):
|
|||||||
def _segments_to_quadratic(segments, max_err, stats):
|
def _segments_to_quadratic(segments, max_err, stats):
|
||||||
"""Return quadratic approximations of cubic segments."""
|
"""Return quadratic approximations of cubic segments."""
|
||||||
|
|
||||||
assert all(s[0] == 'curve' for s in segments), 'Non-cubic given to convert'
|
assert all(s[0] == "curve" for s in segments), "Non-cubic given to convert"
|
||||||
|
|
||||||
new_points = curves_to_quadratic([s[1] for s in segments], max_err)
|
new_points = curves_to_quadratic([s[1] for s in segments], max_err)
|
||||||
n = len(new_points[0])
|
n = len(new_points[0])
|
||||||
assert all(len(s) == n for s in new_points[1:]), 'Converted incompatibly'
|
assert all(len(s) == n for s in new_points[1:]), "Converted incompatibly"
|
||||||
|
|
||||||
spline_length = str(n - 2)
|
spline_length = str(n - 2)
|
||||||
stats[spline_length] = stats.get(spline_length, 0) + 1
|
stats[spline_length] = stats.get(spline_length, 0) + 1
|
||||||
|
|
||||||
return [('qcurve', p) for p in new_points]
|
return [("qcurve", p) for p in new_points]
|
||||||
|
|
||||||
|
|
||||||
def _glyphs_to_quadratic(glyphs, max_err, reverse_direction, stats):
|
def _glyphs_to_quadratic(glyphs, max_err, reverse_direction, stats):
|
||||||
@ -176,7 +181,7 @@ def _glyphs_to_quadratic(glyphs, max_err, reverse_direction, stats):
|
|||||||
tag = segments[0][0]
|
tag = segments[0][0]
|
||||||
if not all(s[0] == tag for s in segments[1:]):
|
if not all(s[0] == tag for s in segments[1:]):
|
||||||
incompatible[i] = [s[0] for s in segments]
|
incompatible[i] = [s[0] for s in segments]
|
||||||
elif tag == 'curve':
|
elif tag == "curve":
|
||||||
segments = _segments_to_quadratic(segments, max_err, stats)
|
segments = _segments_to_quadratic(segments, max_err, stats)
|
||||||
glyphs_modified = True
|
glyphs_modified = True
|
||||||
new_segments_by_location.append(segments)
|
new_segments_by_location.append(segments)
|
||||||
@ -191,8 +196,7 @@ def _glyphs_to_quadratic(glyphs, max_err, reverse_direction, stats):
|
|||||||
return glyphs_modified
|
return glyphs_modified
|
||||||
|
|
||||||
|
|
||||||
def glyphs_to_quadratic(
|
def glyphs_to_quadratic(glyphs, max_err=None, reverse_direction=False, stats=None):
|
||||||
glyphs, max_err=None, reverse_direction=False, stats=None):
|
|
||||||
"""Convert the curves of a set of compatible of glyphs to quadratic.
|
"""Convert the curves of a set of compatible of glyphs to quadratic.
|
||||||
|
|
||||||
All curves will be converted to quadratic at once, ensuring interpolation
|
All curves will be converted to quadratic at once, ensuring interpolation
|
||||||
@ -220,8 +224,14 @@ def glyphs_to_quadratic(
|
|||||||
|
|
||||||
|
|
||||||
def fonts_to_quadratic(
|
def fonts_to_quadratic(
|
||||||
fonts, max_err_em=None, max_err=None, reverse_direction=False,
|
fonts,
|
||||||
stats=None, dump_stats=False, remember_curve_type=True):
|
max_err_em=None,
|
||||||
|
max_err=None,
|
||||||
|
reverse_direction=False,
|
||||||
|
stats=None,
|
||||||
|
dump_stats=False,
|
||||||
|
remember_curve_type=True,
|
||||||
|
):
|
||||||
"""Convert the curves of a collection of fonts to quadratic.
|
"""Convert the curves of a collection of fonts to quadratic.
|
||||||
|
|
||||||
All curves will be converted to quadratic at once, ensuring interpolation
|
All curves will be converted to quadratic at once, ensuring interpolation
|
||||||
@ -258,7 +268,7 @@ def fonts_to_quadratic(
|
|||||||
stats = {}
|
stats = {}
|
||||||
|
|
||||||
if max_err_em and max_err:
|
if max_err_em and max_err:
|
||||||
raise TypeError('Only one of max_err and max_err_em can be specified.')
|
raise TypeError("Only one of max_err and max_err_em can be specified.")
|
||||||
if not (max_err_em or max_err):
|
if not (max_err_em or max_err):
|
||||||
max_err_em = DEFAULT_MAX_ERR
|
max_err_em = DEFAULT_MAX_ERR
|
||||||
|
|
||||||
@ -270,8 +280,7 @@ def fonts_to_quadratic(
|
|||||||
|
|
||||||
if isinstance(max_err_em, (list, tuple)):
|
if isinstance(max_err_em, (list, tuple)):
|
||||||
assert len(fonts) == len(max_err_em)
|
assert len(fonts) == len(max_err_em)
|
||||||
max_errors = [f.info.unitsPerEm * e
|
max_errors = [f.info.unitsPerEm * e for f, e in zip(fonts, max_err_em)]
|
||||||
for f, e in zip(fonts, max_err_em)]
|
|
||||||
elif max_err_em:
|
elif max_err_em:
|
||||||
max_errors = [f.info.unitsPerEm * max_err_em for f in fonts]
|
max_errors = [f.info.unitsPerEm * max_err_em for f in fonts]
|
||||||
|
|
||||||
@ -286,7 +295,8 @@ def fonts_to_quadratic(
|
|||||||
cur_max_errors.append(error)
|
cur_max_errors.append(error)
|
||||||
try:
|
try:
|
||||||
modified |= _glyphs_to_quadratic(
|
modified |= _glyphs_to_quadratic(
|
||||||
glyphs, cur_max_errors, reverse_direction, stats)
|
glyphs, cur_max_errors, reverse_direction, stats
|
||||||
|
)
|
||||||
except IncompatibleGlyphsError as exc:
|
except IncompatibleGlyphsError as exc:
|
||||||
logger.error(exc)
|
logger.error(exc)
|
||||||
glyph_errors[name] = exc
|
glyph_errors[name] = exc
|
||||||
@ -296,8 +306,10 @@ def fonts_to_quadratic(
|
|||||||
|
|
||||||
if modified and dump_stats:
|
if modified and dump_stats:
|
||||||
spline_lengths = sorted(stats.keys())
|
spline_lengths = sorted(stats.keys())
|
||||||
logger.info('New spline lengths: %s' % (', '.join(
|
logger.info(
|
||||||
'%s: %d' % (l, stats[l]) for l in spline_lengths)))
|
"New spline lengths: %s"
|
||||||
|
% (", ".join("%s: %d" % (l, stats[l]) for l in spline_lengths))
|
||||||
|
)
|
||||||
|
|
||||||
if remember_curve_type:
|
if remember_curve_type:
|
||||||
for font in fonts:
|
for font in fonts:
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -1,36 +1,258 @@
|
|||||||
MacRoman = [
|
MacRoman = [
|
||||||
'NUL', 'Eth', 'eth', 'Lslash', 'lslash', 'Scaron', 'scaron', 'Yacute',
|
"NUL",
|
||||||
'yacute', 'HT', 'LF', 'Thorn', 'thorn', 'CR', 'Zcaron', 'zcaron', 'DLE', 'DC1',
|
"Eth",
|
||||||
'DC2', 'DC3', 'DC4', 'onehalf', 'onequarter', 'onesuperior', 'threequarters',
|
"eth",
|
||||||
'threesuperior', 'twosuperior', 'brokenbar', 'minus', 'multiply', 'RS', 'US',
|
"Lslash",
|
||||||
'space', 'exclam', 'quotedbl', 'numbersign', 'dollar', 'percent', 'ampersand',
|
"lslash",
|
||||||
'quotesingle', 'parenleft', 'parenright', 'asterisk', 'plus', 'comma',
|
"Scaron",
|
||||||
'hyphen', 'period', 'slash', 'zero', 'one', 'two', 'three', 'four', 'five',
|
"scaron",
|
||||||
'six', 'seven', 'eight', 'nine', 'colon', 'semicolon', 'less', 'equal',
|
"Yacute",
|
||||||
'greater', 'question', 'at', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J',
|
"yacute",
|
||||||
'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
|
"HT",
|
||||||
'bracketleft', 'backslash', 'bracketright', 'asciicircum', 'underscore',
|
"LF",
|
||||||
'grave', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n',
|
"Thorn",
|
||||||
'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'braceleft', 'bar',
|
"thorn",
|
||||||
'braceright', 'asciitilde', 'DEL', 'Adieresis', 'Aring', 'Ccedilla', 'Eacute',
|
"CR",
|
||||||
'Ntilde', 'Odieresis', 'Udieresis', 'aacute', 'agrave', 'acircumflex',
|
"Zcaron",
|
||||||
'adieresis', 'atilde', 'aring', 'ccedilla', 'eacute', 'egrave', 'ecircumflex',
|
"zcaron",
|
||||||
'edieresis', 'iacute', 'igrave', 'icircumflex', 'idieresis', 'ntilde',
|
"DLE",
|
||||||
'oacute', 'ograve', 'ocircumflex', 'odieresis', 'otilde', 'uacute', 'ugrave',
|
"DC1",
|
||||||
'ucircumflex', 'udieresis', 'dagger', 'degree', 'cent', 'sterling', 'section',
|
"DC2",
|
||||||
'bullet', 'paragraph', 'germandbls', 'registered', 'copyright', 'trademark',
|
"DC3",
|
||||||
'acute', 'dieresis', 'notequal', 'AE', 'Oslash', 'infinity', 'plusminus',
|
"DC4",
|
||||||
'lessequal', 'greaterequal', 'yen', 'mu', 'partialdiff', 'summation',
|
"onehalf",
|
||||||
'product', 'pi', 'integral', 'ordfeminine', 'ordmasculine', 'Omega', 'ae',
|
"onequarter",
|
||||||
'oslash', 'questiondown', 'exclamdown', 'logicalnot', 'radical', 'florin',
|
"onesuperior",
|
||||||
'approxequal', 'Delta', 'guillemotleft', 'guillemotright', 'ellipsis',
|
"threequarters",
|
||||||
'nbspace', 'Agrave', 'Atilde', 'Otilde', 'OE', 'oe', 'endash', 'emdash',
|
"threesuperior",
|
||||||
'quotedblleft', 'quotedblright', 'quoteleft', 'quoteright', 'divide', 'lozenge',
|
"twosuperior",
|
||||||
'ydieresis', 'Ydieresis', 'fraction', 'currency', 'guilsinglleft',
|
"brokenbar",
|
||||||
'guilsinglright', 'fi', 'fl', 'daggerdbl', 'periodcentered', 'quotesinglbase',
|
"minus",
|
||||||
'quotedblbase', 'perthousand', 'Acircumflex', 'Ecircumflex', 'Aacute',
|
"multiply",
|
||||||
'Edieresis', 'Egrave', 'Iacute', 'Icircumflex', 'Idieresis', 'Igrave', 'Oacute',
|
"RS",
|
||||||
'Ocircumflex', 'apple', 'Ograve', 'Uacute', 'Ucircumflex', 'Ugrave', 'dotlessi',
|
"US",
|
||||||
'circumflex', 'tilde', 'macron', 'breve', 'dotaccent', 'ring', 'cedilla',
|
"space",
|
||||||
'hungarumlaut', 'ogonek', 'caron'
|
"exclam",
|
||||||
|
"quotedbl",
|
||||||
|
"numbersign",
|
||||||
|
"dollar",
|
||||||
|
"percent",
|
||||||
|
"ampersand",
|
||||||
|
"quotesingle",
|
||||||
|
"parenleft",
|
||||||
|
"parenright",
|
||||||
|
"asterisk",
|
||||||
|
"plus",
|
||||||
|
"comma",
|
||||||
|
"hyphen",
|
||||||
|
"period",
|
||||||
|
"slash",
|
||||||
|
"zero",
|
||||||
|
"one",
|
||||||
|
"two",
|
||||||
|
"three",
|
||||||
|
"four",
|
||||||
|
"five",
|
||||||
|
"six",
|
||||||
|
"seven",
|
||||||
|
"eight",
|
||||||
|
"nine",
|
||||||
|
"colon",
|
||||||
|
"semicolon",
|
||||||
|
"less",
|
||||||
|
"equal",
|
||||||
|
"greater",
|
||||||
|
"question",
|
||||||
|
"at",
|
||||||
|
"A",
|
||||||
|
"B",
|
||||||
|
"C",
|
||||||
|
"D",
|
||||||
|
"E",
|
||||||
|
"F",
|
||||||
|
"G",
|
||||||
|
"H",
|
||||||
|
"I",
|
||||||
|
"J",
|
||||||
|
"K",
|
||||||
|
"L",
|
||||||
|
"M",
|
||||||
|
"N",
|
||||||
|
"O",
|
||||||
|
"P",
|
||||||
|
"Q",
|
||||||
|
"R",
|
||||||
|
"S",
|
||||||
|
"T",
|
||||||
|
"U",
|
||||||
|
"V",
|
||||||
|
"W",
|
||||||
|
"X",
|
||||||
|
"Y",
|
||||||
|
"Z",
|
||||||
|
"bracketleft",
|
||||||
|
"backslash",
|
||||||
|
"bracketright",
|
||||||
|
"asciicircum",
|
||||||
|
"underscore",
|
||||||
|
"grave",
|
||||||
|
"a",
|
||||||
|
"b",
|
||||||
|
"c",
|
||||||
|
"d",
|
||||||
|
"e",
|
||||||
|
"f",
|
||||||
|
"g",
|
||||||
|
"h",
|
||||||
|
"i",
|
||||||
|
"j",
|
||||||
|
"k",
|
||||||
|
"l",
|
||||||
|
"m",
|
||||||
|
"n",
|
||||||
|
"o",
|
||||||
|
"p",
|
||||||
|
"q",
|
||||||
|
"r",
|
||||||
|
"s",
|
||||||
|
"t",
|
||||||
|
"u",
|
||||||
|
"v",
|
||||||
|
"w",
|
||||||
|
"x",
|
||||||
|
"y",
|
||||||
|
"z",
|
||||||
|
"braceleft",
|
||||||
|
"bar",
|
||||||
|
"braceright",
|
||||||
|
"asciitilde",
|
||||||
|
"DEL",
|
||||||
|
"Adieresis",
|
||||||
|
"Aring",
|
||||||
|
"Ccedilla",
|
||||||
|
"Eacute",
|
||||||
|
"Ntilde",
|
||||||
|
"Odieresis",
|
||||||
|
"Udieresis",
|
||||||
|
"aacute",
|
||||||
|
"agrave",
|
||||||
|
"acircumflex",
|
||||||
|
"adieresis",
|
||||||
|
"atilde",
|
||||||
|
"aring",
|
||||||
|
"ccedilla",
|
||||||
|
"eacute",
|
||||||
|
"egrave",
|
||||||
|
"ecircumflex",
|
||||||
|
"edieresis",
|
||||||
|
"iacute",
|
||||||
|
"igrave",
|
||||||
|
"icircumflex",
|
||||||
|
"idieresis",
|
||||||
|
"ntilde",
|
||||||
|
"oacute",
|
||||||
|
"ograve",
|
||||||
|
"ocircumflex",
|
||||||
|
"odieresis",
|
||||||
|
"otilde",
|
||||||
|
"uacute",
|
||||||
|
"ugrave",
|
||||||
|
"ucircumflex",
|
||||||
|
"udieresis",
|
||||||
|
"dagger",
|
||||||
|
"degree",
|
||||||
|
"cent",
|
||||||
|
"sterling",
|
||||||
|
"section",
|
||||||
|
"bullet",
|
||||||
|
"paragraph",
|
||||||
|
"germandbls",
|
||||||
|
"registered",
|
||||||
|
"copyright",
|
||||||
|
"trademark",
|
||||||
|
"acute",
|
||||||
|
"dieresis",
|
||||||
|
"notequal",
|
||||||
|
"AE",
|
||||||
|
"Oslash",
|
||||||
|
"infinity",
|
||||||
|
"plusminus",
|
||||||
|
"lessequal",
|
||||||
|
"greaterequal",
|
||||||
|
"yen",
|
||||||
|
"mu",
|
||||||
|
"partialdiff",
|
||||||
|
"summation",
|
||||||
|
"product",
|
||||||
|
"pi",
|
||||||
|
"integral",
|
||||||
|
"ordfeminine",
|
||||||
|
"ordmasculine",
|
||||||
|
"Omega",
|
||||||
|
"ae",
|
||||||
|
"oslash",
|
||||||
|
"questiondown",
|
||||||
|
"exclamdown",
|
||||||
|
"logicalnot",
|
||||||
|
"radical",
|
||||||
|
"florin",
|
||||||
|
"approxequal",
|
||||||
|
"Delta",
|
||||||
|
"guillemotleft",
|
||||||
|
"guillemotright",
|
||||||
|
"ellipsis",
|
||||||
|
"nbspace",
|
||||||
|
"Agrave",
|
||||||
|
"Atilde",
|
||||||
|
"Otilde",
|
||||||
|
"OE",
|
||||||
|
"oe",
|
||||||
|
"endash",
|
||||||
|
"emdash",
|
||||||
|
"quotedblleft",
|
||||||
|
"quotedblright",
|
||||||
|
"quoteleft",
|
||||||
|
"quoteright",
|
||||||
|
"divide",
|
||||||
|
"lozenge",
|
||||||
|
"ydieresis",
|
||||||
|
"Ydieresis",
|
||||||
|
"fraction",
|
||||||
|
"currency",
|
||||||
|
"guilsinglleft",
|
||||||
|
"guilsinglright",
|
||||||
|
"fi",
|
||||||
|
"fl",
|
||||||
|
"daggerdbl",
|
||||||
|
"periodcentered",
|
||||||
|
"quotesinglbase",
|
||||||
|
"quotedblbase",
|
||||||
|
"perthousand",
|
||||||
|
"Acircumflex",
|
||||||
|
"Ecircumflex",
|
||||||
|
"Aacute",
|
||||||
|
"Edieresis",
|
||||||
|
"Egrave",
|
||||||
|
"Iacute",
|
||||||
|
"Icircumflex",
|
||||||
|
"Idieresis",
|
||||||
|
"Igrave",
|
||||||
|
"Oacute",
|
||||||
|
"Ocircumflex",
|
||||||
|
"apple",
|
||||||
|
"Ograve",
|
||||||
|
"Uacute",
|
||||||
|
"Ucircumflex",
|
||||||
|
"Ugrave",
|
||||||
|
"dotlessi",
|
||||||
|
"circumflex",
|
||||||
|
"tilde",
|
||||||
|
"macron",
|
||||||
|
"breve",
|
||||||
|
"dotaccent",
|
||||||
|
"ring",
|
||||||
|
"cedilla",
|
||||||
|
"hungarumlaut",
|
||||||
|
"ogonek",
|
||||||
|
"caron",
|
||||||
]
|
]
|
||||||
|
@ -1,48 +1,258 @@
|
|||||||
StandardEncoding = [
|
StandardEncoding = [
|
||||||
'.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
|
".notdef",
|
||||||
'.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
|
".notdef",
|
||||||
'.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
|
".notdef",
|
||||||
'.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
|
".notdef",
|
||||||
'.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
|
".notdef",
|
||||||
'.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
|
".notdef",
|
||||||
'.notdef', '.notdef', 'space', 'exclam', 'quotedbl',
|
".notdef",
|
||||||
'numbersign', 'dollar', 'percent', 'ampersand',
|
".notdef",
|
||||||
'quoteright', 'parenleft', 'parenright', 'asterisk', 'plus',
|
".notdef",
|
||||||
'comma', 'hyphen', 'period', 'slash', 'zero', 'one', 'two',
|
".notdef",
|
||||||
'three', 'four', 'five', 'six', 'seven', 'eight', 'nine',
|
".notdef",
|
||||||
'colon', 'semicolon', 'less', 'equal', 'greater',
|
".notdef",
|
||||||
'question', 'at', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',
|
".notdef",
|
||||||
'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T',
|
".notdef",
|
||||||
'U', 'V', 'W', 'X', 'Y', 'Z', 'bracketleft', 'backslash',
|
".notdef",
|
||||||
'bracketright', 'asciicircum', 'underscore', 'quoteleft',
|
".notdef",
|
||||||
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l',
|
".notdef",
|
||||||
'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x',
|
".notdef",
|
||||||
'y', 'z', 'braceleft', 'bar', 'braceright', 'asciitilde',
|
".notdef",
|
||||||
'.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
|
".notdef",
|
||||||
'.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
|
".notdef",
|
||||||
'.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
|
".notdef",
|
||||||
'.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
|
".notdef",
|
||||||
'.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
|
".notdef",
|
||||||
'.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
|
".notdef",
|
||||||
'.notdef', '.notdef', '.notdef', '.notdef', 'exclamdown',
|
".notdef",
|
||||||
'cent', 'sterling', 'fraction', 'yen', 'florin', 'section',
|
".notdef",
|
||||||
'currency', 'quotesingle', 'quotedblleft', 'guillemotleft',
|
".notdef",
|
||||||
'guilsinglleft', 'guilsinglright', 'fi', 'fl', '.notdef',
|
".notdef",
|
||||||
'endash', 'dagger', 'daggerdbl', 'periodcentered',
|
".notdef",
|
||||||
'.notdef', 'paragraph', 'bullet', 'quotesinglbase',
|
".notdef",
|
||||||
'quotedblbase', 'quotedblright', 'guillemotright',
|
".notdef",
|
||||||
'ellipsis', 'perthousand', '.notdef', 'questiondown',
|
"space",
|
||||||
'.notdef', 'grave', 'acute', 'circumflex', 'tilde',
|
"exclam",
|
||||||
'macron', 'breve', 'dotaccent', 'dieresis', '.notdef',
|
"quotedbl",
|
||||||
'ring', 'cedilla', '.notdef', 'hungarumlaut', 'ogonek',
|
"numbersign",
|
||||||
'caron', 'emdash', '.notdef', '.notdef', '.notdef',
|
"dollar",
|
||||||
'.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
|
"percent",
|
||||||
'.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
|
"ampersand",
|
||||||
'.notdef', '.notdef', '.notdef', 'AE', '.notdef',
|
"quoteright",
|
||||||
'ordfeminine', '.notdef', '.notdef', '.notdef', '.notdef',
|
"parenleft",
|
||||||
'Lslash', 'Oslash', 'OE', 'ordmasculine', '.notdef',
|
"parenright",
|
||||||
'.notdef', '.notdef', '.notdef', '.notdef', 'ae', '.notdef',
|
"asterisk",
|
||||||
'.notdef', '.notdef', 'dotlessi', '.notdef', '.notdef',
|
"plus",
|
||||||
'lslash', 'oslash', 'oe', 'germandbls', '.notdef',
|
"comma",
|
||||||
'.notdef', '.notdef', '.notdef'
|
"hyphen",
|
||||||
|
"period",
|
||||||
|
"slash",
|
||||||
|
"zero",
|
||||||
|
"one",
|
||||||
|
"two",
|
||||||
|
"three",
|
||||||
|
"four",
|
||||||
|
"five",
|
||||||
|
"six",
|
||||||
|
"seven",
|
||||||
|
"eight",
|
||||||
|
"nine",
|
||||||
|
"colon",
|
||||||
|
"semicolon",
|
||||||
|
"less",
|
||||||
|
"equal",
|
||||||
|
"greater",
|
||||||
|
"question",
|
||||||
|
"at",
|
||||||
|
"A",
|
||||||
|
"B",
|
||||||
|
"C",
|
||||||
|
"D",
|
||||||
|
"E",
|
||||||
|
"F",
|
||||||
|
"G",
|
||||||
|
"H",
|
||||||
|
"I",
|
||||||
|
"J",
|
||||||
|
"K",
|
||||||
|
"L",
|
||||||
|
"M",
|
||||||
|
"N",
|
||||||
|
"O",
|
||||||
|
"P",
|
||||||
|
"Q",
|
||||||
|
"R",
|
||||||
|
"S",
|
||||||
|
"T",
|
||||||
|
"U",
|
||||||
|
"V",
|
||||||
|
"W",
|
||||||
|
"X",
|
||||||
|
"Y",
|
||||||
|
"Z",
|
||||||
|
"bracketleft",
|
||||||
|
"backslash",
|
||||||
|
"bracketright",
|
||||||
|
"asciicircum",
|
||||||
|
"underscore",
|
||||||
|
"quoteleft",
|
||||||
|
"a",
|
||||||
|
"b",
|
||||||
|
"c",
|
||||||
|
"d",
|
||||||
|
"e",
|
||||||
|
"f",
|
||||||
|
"g",
|
||||||
|
"h",
|
||||||
|
"i",
|
||||||
|
"j",
|
||||||
|
"k",
|
||||||
|
"l",
|
||||||
|
"m",
|
||||||
|
"n",
|
||||||
|
"o",
|
||||||
|
"p",
|
||||||
|
"q",
|
||||||
|
"r",
|
||||||
|
"s",
|
||||||
|
"t",
|
||||||
|
"u",
|
||||||
|
"v",
|
||||||
|
"w",
|
||||||
|
"x",
|
||||||
|
"y",
|
||||||
|
"z",
|
||||||
|
"braceleft",
|
||||||
|
"bar",
|
||||||
|
"braceright",
|
||||||
|
"asciitilde",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
"exclamdown",
|
||||||
|
"cent",
|
||||||
|
"sterling",
|
||||||
|
"fraction",
|
||||||
|
"yen",
|
||||||
|
"florin",
|
||||||
|
"section",
|
||||||
|
"currency",
|
||||||
|
"quotesingle",
|
||||||
|
"quotedblleft",
|
||||||
|
"guillemotleft",
|
||||||
|
"guilsinglleft",
|
||||||
|
"guilsinglright",
|
||||||
|
"fi",
|
||||||
|
"fl",
|
||||||
|
".notdef",
|
||||||
|
"endash",
|
||||||
|
"dagger",
|
||||||
|
"daggerdbl",
|
||||||
|
"periodcentered",
|
||||||
|
".notdef",
|
||||||
|
"paragraph",
|
||||||
|
"bullet",
|
||||||
|
"quotesinglbase",
|
||||||
|
"quotedblbase",
|
||||||
|
"quotedblright",
|
||||||
|
"guillemotright",
|
||||||
|
"ellipsis",
|
||||||
|
"perthousand",
|
||||||
|
".notdef",
|
||||||
|
"questiondown",
|
||||||
|
".notdef",
|
||||||
|
"grave",
|
||||||
|
"acute",
|
||||||
|
"circumflex",
|
||||||
|
"tilde",
|
||||||
|
"macron",
|
||||||
|
"breve",
|
||||||
|
"dotaccent",
|
||||||
|
"dieresis",
|
||||||
|
".notdef",
|
||||||
|
"ring",
|
||||||
|
"cedilla",
|
||||||
|
".notdef",
|
||||||
|
"hungarumlaut",
|
||||||
|
"ogonek",
|
||||||
|
"caron",
|
||||||
|
"emdash",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
"AE",
|
||||||
|
".notdef",
|
||||||
|
"ordfeminine",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
"Lslash",
|
||||||
|
"Oslash",
|
||||||
|
"OE",
|
||||||
|
"ordmasculine",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
"ae",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
"dotlessi",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
"lslash",
|
||||||
|
"oslash",
|
||||||
|
"oe",
|
||||||
|
"germandbls",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
|
".notdef",
|
||||||
]
|
]
|
||||||
|
@ -4,15 +4,17 @@ but missing from Python. See https://github.com/fonttools/fonttools/issues/236
|
|||||||
import codecs
|
import codecs
|
||||||
import encodings
|
import encodings
|
||||||
|
|
||||||
class ExtendCodec(codecs.Codec):
|
|
||||||
|
|
||||||
|
class ExtendCodec(codecs.Codec):
|
||||||
def __init__(self, name, base_encoding, mapping):
|
def __init__(self, name, base_encoding, mapping):
|
||||||
self.name = name
|
self.name = name
|
||||||
self.base_encoding = base_encoding
|
self.base_encoding = base_encoding
|
||||||
self.mapping = mapping
|
self.mapping = mapping
|
||||||
self.reverse = {v: k for k, v in mapping.items()}
|
self.reverse = {v: k for k, v in mapping.items()}
|
||||||
self.max_len = max(len(v) for v in mapping.values())
|
self.max_len = max(len(v) for v in mapping.values())
|
||||||
self.info = codecs.CodecInfo(name=self.name, encode=self.encode, decode=self.decode)
|
self.info = codecs.CodecInfo(
|
||||||
|
name=self.name, encode=self.encode, decode=self.decode
|
||||||
|
)
|
||||||
codecs.register_error(name, self.error)
|
codecs.register_error(name, self.error)
|
||||||
|
|
||||||
def _map(self, mapper, output_type, exc_type, input, errors):
|
def _map(self, mapper, output_type, exc_type, input, errors):
|
||||||
@ -33,10 +35,10 @@ class ExtendCodec(codecs.Codec):
|
|||||||
input = input[pos:]
|
input = input[pos:]
|
||||||
return out, length
|
return out, length
|
||||||
|
|
||||||
def encode(self, input, errors='strict'):
|
def encode(self, input, errors="strict"):
|
||||||
return self._map(codecs.encode, bytes, UnicodeEncodeError, input, errors)
|
return self._map(codecs.encode, bytes, UnicodeEncodeError, input, errors)
|
||||||
|
|
||||||
def decode(self, input, errors='strict'):
|
def decode(self, input, errors="strict"):
|
||||||
return self._map(codecs.decode, str, UnicodeDecodeError, input, errors)
|
return self._map(codecs.decode, str, UnicodeDecodeError, input, errors)
|
||||||
|
|
||||||
def error(self, e):
|
def error(self, e):
|
||||||
@ -55,7 +57,9 @@ class ExtendCodec(codecs.Codec):
|
|||||||
|
|
||||||
|
|
||||||
_extended_encodings = {
|
_extended_encodings = {
|
||||||
"x_mac_japanese_ttx": ("shift_jis", {
|
"x_mac_japanese_ttx": (
|
||||||
|
"shift_jis",
|
||||||
|
{
|
||||||
b"\xFC": chr(0x007C),
|
b"\xFC": chr(0x007C),
|
||||||
b"\x7E": chr(0x007E),
|
b"\x7E": chr(0x007E),
|
||||||
b"\x80": chr(0x005C),
|
b"\x80": chr(0x005C),
|
||||||
@ -63,39 +67,50 @@ _extended_encodings = {
|
|||||||
b"\xFD": chr(0x00A9),
|
b"\xFD": chr(0x00A9),
|
||||||
b"\xFE": chr(0x2122),
|
b"\xFE": chr(0x2122),
|
||||||
b"\xFF": chr(0x2026),
|
b"\xFF": chr(0x2026),
|
||||||
}),
|
},
|
||||||
"x_mac_trad_chinese_ttx": ("big5", {
|
),
|
||||||
|
"x_mac_trad_chinese_ttx": (
|
||||||
|
"big5",
|
||||||
|
{
|
||||||
b"\x80": chr(0x005C),
|
b"\x80": chr(0x005C),
|
||||||
b"\xA0": chr(0x00A0),
|
b"\xA0": chr(0x00A0),
|
||||||
b"\xFD": chr(0x00A9),
|
b"\xFD": chr(0x00A9),
|
||||||
b"\xFE": chr(0x2122),
|
b"\xFE": chr(0x2122),
|
||||||
b"\xFF": chr(0x2026),
|
b"\xFF": chr(0x2026),
|
||||||
}),
|
},
|
||||||
"x_mac_korean_ttx": ("euc_kr", {
|
),
|
||||||
|
"x_mac_korean_ttx": (
|
||||||
|
"euc_kr",
|
||||||
|
{
|
||||||
b"\x80": chr(0x00A0),
|
b"\x80": chr(0x00A0),
|
||||||
b"\x81": chr(0x20A9),
|
b"\x81": chr(0x20A9),
|
||||||
b"\x82": chr(0x2014),
|
b"\x82": chr(0x2014),
|
||||||
b"\x83": chr(0x00A9),
|
b"\x83": chr(0x00A9),
|
||||||
b"\xFE": chr(0x2122),
|
b"\xFE": chr(0x2122),
|
||||||
b"\xFF": chr(0x2026),
|
b"\xFF": chr(0x2026),
|
||||||
}),
|
},
|
||||||
"x_mac_simp_chinese_ttx": ("gb2312", {
|
),
|
||||||
|
"x_mac_simp_chinese_ttx": (
|
||||||
|
"gb2312",
|
||||||
|
{
|
||||||
b"\x80": chr(0x00FC),
|
b"\x80": chr(0x00FC),
|
||||||
b"\xA0": chr(0x00A0),
|
b"\xA0": chr(0x00A0),
|
||||||
b"\xFD": chr(0x00A9),
|
b"\xFD": chr(0x00A9),
|
||||||
b"\xFE": chr(0x2122),
|
b"\xFE": chr(0x2122),
|
||||||
b"\xFF": chr(0x2026),
|
b"\xFF": chr(0x2026),
|
||||||
}),
|
},
|
||||||
|
),
|
||||||
}
|
}
|
||||||
|
|
||||||
_cache = {}
|
_cache = {}
|
||||||
|
|
||||||
|
|
||||||
def search_function(name):
|
def search_function(name):
|
||||||
name = encodings.normalize_encoding(name) # Rather undocumented...
|
name = encodings.normalize_encoding(name) # Rather undocumented...
|
||||||
if name in _extended_encodings:
|
if name in _extended_encodings:
|
||||||
if name not in _cache:
|
if name not in _cache:
|
||||||
base_encoding, mapping = _extended_encodings[name]
|
base_encoding, mapping = _extended_encodings[name]
|
||||||
assert(name[-4:] == "_ttx")
|
assert name[-4:] == "_ttx"
|
||||||
# Python 2 didn't have any of the encodings that we are implementing
|
# Python 2 didn't have any of the encodings that we are implementing
|
||||||
# in this file. Python 3 added aliases for the East Asian ones, mapping
|
# in this file. Python 3 added aliases for the East Asian ones, mapping
|
||||||
# them "temporarily" to the same base encoding as us, with a comment
|
# them "temporarily" to the same base encoding as us, with a comment
|
||||||
@ -116,4 +131,5 @@ def search_function(name):
|
|||||||
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
codecs.register(search_function)
|
codecs.register(search_function)
|
||||||
|
@ -768,8 +768,8 @@ class Builder(object):
|
|||||||
varidx_map = store.optimize()
|
varidx_map = store.optimize()
|
||||||
|
|
||||||
gdef.remap_device_varidxes(varidx_map)
|
gdef.remap_device_varidxes(varidx_map)
|
||||||
if 'GPOS' in self.font:
|
if "GPOS" in self.font:
|
||||||
self.font['GPOS'].table.remap_device_varidxes(varidx_map)
|
self.font["GPOS"].table.remap_device_varidxes(varidx_map)
|
||||||
VariableScalar.clear_cache()
|
VariableScalar.clear_cache()
|
||||||
if any(
|
if any(
|
||||||
(
|
(
|
||||||
@ -1339,7 +1339,9 @@ class Builder(object):
|
|||||||
# GSUB 5/6
|
# GSUB 5/6
|
||||||
def add_chain_context_subst(self, location, prefix, glyphs, suffix, lookups):
|
def add_chain_context_subst(self, location, prefix, glyphs, suffix, lookups):
|
||||||
if not all(glyphs) or not all(prefix) or not all(suffix):
|
if not all(glyphs) or not all(prefix) or not all(suffix):
|
||||||
raise FeatureLibError("Empty glyph class in contextual substitution", location)
|
raise FeatureLibError(
|
||||||
|
"Empty glyph class in contextual substitution", location
|
||||||
|
)
|
||||||
lookup = self.get_lookup_(location, ChainContextSubstBuilder)
|
lookup = self.get_lookup_(location, ChainContextSubstBuilder)
|
||||||
lookup.rules.append(
|
lookup.rules.append(
|
||||||
ChainContextualRule(
|
ChainContextualRule(
|
||||||
@ -1349,7 +1351,9 @@ class Builder(object):
|
|||||||
|
|
||||||
def add_single_subst_chained_(self, location, prefix, suffix, mapping):
|
def add_single_subst_chained_(self, location, prefix, suffix, mapping):
|
||||||
if not mapping or not all(prefix) or not all(suffix):
|
if not mapping or not all(prefix) or not all(suffix):
|
||||||
raise FeatureLibError("Empty glyph class in contextual substitution", location)
|
raise FeatureLibError(
|
||||||
|
"Empty glyph class in contextual substitution", location
|
||||||
|
)
|
||||||
# https://github.com/fonttools/fonttools/issues/512
|
# https://github.com/fonttools/fonttools/issues/512
|
||||||
chain = self.get_lookup_(location, ChainContextSubstBuilder)
|
chain = self.get_lookup_(location, ChainContextSubstBuilder)
|
||||||
sub = chain.find_chainable_single_subst(set(mapping.keys()))
|
sub = chain.find_chainable_single_subst(set(mapping.keys()))
|
||||||
@ -1377,8 +1381,12 @@ class Builder(object):
|
|||||||
lookup = self.get_lookup_(location, SinglePosBuilder)
|
lookup = self.get_lookup_(location, SinglePosBuilder)
|
||||||
for glyphs, value in pos:
|
for glyphs, value in pos:
|
||||||
if not glyphs:
|
if not glyphs:
|
||||||
raise FeatureLibError("Empty glyph class in positioning rule", location)
|
raise FeatureLibError(
|
||||||
otValueRecord = self.makeOpenTypeValueRecord(location, value, pairPosContext=False)
|
"Empty glyph class in positioning rule", location
|
||||||
|
)
|
||||||
|
otValueRecord = self.makeOpenTypeValueRecord(
|
||||||
|
location, value, pairPosContext=False
|
||||||
|
)
|
||||||
for glyph in glyphs:
|
for glyph in glyphs:
|
||||||
try:
|
try:
|
||||||
lookup.add_pos(location, glyph, otValueRecord)
|
lookup.add_pos(location, glyph, otValueRecord)
|
||||||
@ -1388,9 +1396,7 @@ class Builder(object):
|
|||||||
# GPOS 2
|
# GPOS 2
|
||||||
def add_class_pair_pos(self, location, glyphclass1, value1, glyphclass2, value2):
|
def add_class_pair_pos(self, location, glyphclass1, value1, glyphclass2, value2):
|
||||||
if not glyphclass1 or not glyphclass2:
|
if not glyphclass1 or not glyphclass2:
|
||||||
raise FeatureLibError(
|
raise FeatureLibError("Empty glyph class in positioning rule", location)
|
||||||
"Empty glyph class in positioning rule", location
|
|
||||||
)
|
|
||||||
lookup = self.get_lookup_(location, PairPosBuilder)
|
lookup = self.get_lookup_(location, PairPosBuilder)
|
||||||
v1 = self.makeOpenTypeValueRecord(location, value1, pairPosContext=True)
|
v1 = self.makeOpenTypeValueRecord(location, value1, pairPosContext=True)
|
||||||
v2 = self.makeOpenTypeValueRecord(location, value2, pairPosContext=True)
|
v2 = self.makeOpenTypeValueRecord(location, value2, pairPosContext=True)
|
||||||
@ -1458,7 +1464,9 @@ class Builder(object):
|
|||||||
# GPOS 7/8
|
# GPOS 7/8
|
||||||
def add_chain_context_pos(self, location, prefix, glyphs, suffix, lookups):
|
def add_chain_context_pos(self, location, prefix, glyphs, suffix, lookups):
|
||||||
if not all(glyphs) or not all(prefix) or not all(suffix):
|
if not all(glyphs) or not all(prefix) or not all(suffix):
|
||||||
raise FeatureLibError("Empty glyph class in contextual positioning rule", location)
|
raise FeatureLibError(
|
||||||
|
"Empty glyph class in contextual positioning rule", location
|
||||||
|
)
|
||||||
lookup = self.get_lookup_(location, ChainContextPosBuilder)
|
lookup = self.get_lookup_(location, ChainContextPosBuilder)
|
||||||
lookup.rules.append(
|
lookup.rules.append(
|
||||||
ChainContextualRule(
|
ChainContextualRule(
|
||||||
@ -1468,7 +1476,9 @@ class Builder(object):
|
|||||||
|
|
||||||
def add_single_pos_chained_(self, location, prefix, suffix, pos):
|
def add_single_pos_chained_(self, location, prefix, suffix, pos):
|
||||||
if not pos or not all(prefix) or not all(suffix):
|
if not pos or not all(prefix) or not all(suffix):
|
||||||
raise FeatureLibError("Empty glyph class in contextual positioning rule", location)
|
raise FeatureLibError(
|
||||||
|
"Empty glyph class in contextual positioning rule", location
|
||||||
|
)
|
||||||
# https://github.com/fonttools/fonttools/issues/514
|
# https://github.com/fonttools/fonttools/issues/514
|
||||||
chain = self.get_lookup_(location, ChainContextPosBuilder)
|
chain = self.get_lookup_(location, ChainContextPosBuilder)
|
||||||
targets = []
|
targets = []
|
||||||
@ -1479,7 +1489,9 @@ class Builder(object):
|
|||||||
if value is None:
|
if value is None:
|
||||||
subs.append(None)
|
subs.append(None)
|
||||||
continue
|
continue
|
||||||
otValue = self.makeOpenTypeValueRecord(location, value, pairPosContext=False)
|
otValue = self.makeOpenTypeValueRecord(
|
||||||
|
location, value, pairPosContext=False
|
||||||
|
)
|
||||||
sub = chain.find_chainable_single_pos(targets, glyphs, otValue)
|
sub = chain.find_chainable_single_pos(targets, glyphs, otValue)
|
||||||
if sub is None:
|
if sub is None:
|
||||||
sub = self.get_chained_lookup_(location, SinglePosBuilder)
|
sub = self.get_chained_lookup_(location, SinglePosBuilder)
|
||||||
@ -1498,7 +1510,9 @@ class Builder(object):
|
|||||||
for markClassDef in markClass.definitions:
|
for markClassDef in markClass.definitions:
|
||||||
for mark in markClassDef.glyphs.glyphSet():
|
for mark in markClassDef.glyphs.glyphSet():
|
||||||
if mark not in lookupBuilder.marks:
|
if mark not in lookupBuilder.marks:
|
||||||
otMarkAnchor = self.makeOpenTypeAnchor(location, markClassDef.anchor)
|
otMarkAnchor = self.makeOpenTypeAnchor(
|
||||||
|
location, markClassDef.anchor
|
||||||
|
)
|
||||||
lookupBuilder.marks[mark] = (markClass.name, otMarkAnchor)
|
lookupBuilder.marks[mark] = (markClass.name, otMarkAnchor)
|
||||||
else:
|
else:
|
||||||
existingMarkClass = lookupBuilder.marks[mark][0]
|
existingMarkClass = lookupBuilder.marks[mark][0]
|
||||||
@ -1592,9 +1606,13 @@ class Builder(object):
|
|||||||
if not isinstance(getattr(anchor, dim), VariableScalar):
|
if not isinstance(getattr(anchor, dim), VariableScalar):
|
||||||
continue
|
continue
|
||||||
if getattr(anchor, dim + "DeviceTable") is not None:
|
if getattr(anchor, dim + "DeviceTable") is not None:
|
||||||
raise FeatureLibError("Can't define a device coordinate and variable scalar", location)
|
raise FeatureLibError(
|
||||||
|
"Can't define a device coordinate and variable scalar", location
|
||||||
|
)
|
||||||
if not self.varstorebuilder:
|
if not self.varstorebuilder:
|
||||||
raise FeatureLibError("Can't define a variable scalar in a non-variable font", location)
|
raise FeatureLibError(
|
||||||
|
"Can't define a variable scalar in a non-variable font", location
|
||||||
|
)
|
||||||
varscalar = getattr(anchor, dim)
|
varscalar = getattr(anchor, dim)
|
||||||
varscalar.axes = self.axes
|
varscalar.axes = self.axes
|
||||||
default, index = varscalar.add_to_variation_store(self.varstorebuilder)
|
default, index = varscalar.add_to_variation_store(self.varstorebuilder)
|
||||||
@ -1606,7 +1624,9 @@ class Builder(object):
|
|||||||
deviceY = buildVarDevTable(index)
|
deviceY = buildVarDevTable(index)
|
||||||
variable = True
|
variable = True
|
||||||
|
|
||||||
otlanchor = otl.buildAnchor(anchor.x, anchor.y, anchor.contourpoint, deviceX, deviceY)
|
otlanchor = otl.buildAnchor(
|
||||||
|
anchor.x, anchor.y, anchor.contourpoint, deviceX, deviceY
|
||||||
|
)
|
||||||
if variable:
|
if variable:
|
||||||
otlanchor.Format = 3
|
otlanchor.Format = 3
|
||||||
return otlanchor
|
return otlanchor
|
||||||
@ -1617,7 +1637,6 @@ class Builder(object):
|
|||||||
if not name.startswith("Reserved")
|
if not name.startswith("Reserved")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def makeOpenTypeValueRecord(self, location, v, pairPosContext):
|
def makeOpenTypeValueRecord(self, location, v, pairPosContext):
|
||||||
"""ast.ValueRecord --> otBase.ValueRecord"""
|
"""ast.ValueRecord --> otBase.ValueRecord"""
|
||||||
if not v:
|
if not v:
|
||||||
@ -1635,9 +1654,14 @@ class Builder(object):
|
|||||||
otDeviceName = otName[0:4] + "Device"
|
otDeviceName = otName[0:4] + "Device"
|
||||||
feaDeviceName = otDeviceName[0].lower() + otDeviceName[1:]
|
feaDeviceName = otDeviceName[0].lower() + otDeviceName[1:]
|
||||||
if getattr(v, feaDeviceName):
|
if getattr(v, feaDeviceName):
|
||||||
raise FeatureLibError("Can't define a device coordinate and variable scalar", location)
|
raise FeatureLibError(
|
||||||
|
"Can't define a device coordinate and variable scalar", location
|
||||||
|
)
|
||||||
if not self.varstorebuilder:
|
if not self.varstorebuilder:
|
||||||
raise FeatureLibError("Can't define a variable scalar in a non-variable font", location)
|
raise FeatureLibError(
|
||||||
|
"Can't define a variable scalar in a non-variable font",
|
||||||
|
location,
|
||||||
|
)
|
||||||
val.axes = self.axes
|
val.axes = self.axes
|
||||||
default, index = val.add_to_variation_store(self.varstorebuilder)
|
default, index = val.add_to_variation_store(self.varstorebuilder)
|
||||||
vr[otName] = default
|
vr[otName] = default
|
||||||
|
@ -3,6 +3,7 @@ from typing import NamedTuple
|
|||||||
LOOKUP_DEBUG_INFO_KEY = "com.github.fonttools.feaLib"
|
LOOKUP_DEBUG_INFO_KEY = "com.github.fonttools.feaLib"
|
||||||
LOOKUP_DEBUG_ENV_VAR = "FONTTOOLS_LOOKUP_DEBUGGING"
|
LOOKUP_DEBUG_ENV_VAR = "FONTTOOLS_LOOKUP_DEBUGGING"
|
||||||
|
|
||||||
|
|
||||||
class LookupDebugInfo(NamedTuple):
|
class LookupDebugInfo(NamedTuple):
|
||||||
"""Information about where a lookup came from, to be embedded in a font"""
|
"""Information about where a lookup came from, to be embedded in a font"""
|
||||||
|
|
||||||
|
@ -134,7 +134,8 @@ class Parser(object):
|
|||||||
]
|
]
|
||||||
raise FeatureLibError(
|
raise FeatureLibError(
|
||||||
"The following glyph names are referenced but are missing from the "
|
"The following glyph names are referenced but are missing from the "
|
||||||
"glyph set:\n" + ("\n".join(error)), None
|
"glyph set:\n" + ("\n".join(error)),
|
||||||
|
None,
|
||||||
)
|
)
|
||||||
return self.doc_
|
return self.doc_
|
||||||
|
|
||||||
@ -396,7 +397,8 @@ class Parser(object):
|
|||||||
self.expect_symbol_("-")
|
self.expect_symbol_("-")
|
||||||
range_end = self.expect_cid_()
|
range_end = self.expect_cid_()
|
||||||
self.check_glyph_name_in_glyph_set(
|
self.check_glyph_name_in_glyph_set(
|
||||||
f"cid{range_start:05d}", f"cid{range_end:05d}",
|
f"cid{range_start:05d}",
|
||||||
|
f"cid{range_end:05d}",
|
||||||
)
|
)
|
||||||
glyphs.add_cid_range(
|
glyphs.add_cid_range(
|
||||||
range_start,
|
range_start,
|
||||||
@ -696,7 +698,9 @@ class Parser(object):
|
|||||||
location = self.cur_token_location_
|
location = self.cur_token_location_
|
||||||
glyphs = self.parse_glyphclass_(accept_glyphname=True)
|
glyphs = self.parse_glyphclass_(accept_glyphname=True)
|
||||||
if not glyphs.glyphSet():
|
if not glyphs.glyphSet():
|
||||||
raise FeatureLibError("Empty glyph class in mark class definition", location)
|
raise FeatureLibError(
|
||||||
|
"Empty glyph class in mark class definition", location
|
||||||
|
)
|
||||||
anchor = self.parse_anchor_()
|
anchor = self.parse_anchor_()
|
||||||
name = self.expect_class_name_()
|
name = self.expect_class_name_()
|
||||||
self.expect_symbol_(";")
|
self.expect_symbol_(";")
|
||||||
|
@ -4,7 +4,11 @@
|
|||||||
|
|
||||||
from fontTools import ttLib
|
from fontTools import ttLib
|
||||||
import fontTools.merge.base
|
import fontTools.merge.base
|
||||||
from fontTools.merge.cmap import computeMegaGlyphOrder, computeMegaCmap, renameCFFCharStrings
|
from fontTools.merge.cmap import (
|
||||||
|
computeMegaGlyphOrder,
|
||||||
|
computeMegaCmap,
|
||||||
|
renameCFFCharStrings,
|
||||||
|
)
|
||||||
from fontTools.merge.layout import layoutPreMerge, layoutPostMerge
|
from fontTools.merge.layout import layoutPreMerge, layoutPostMerge
|
||||||
from fontTools.merge.options import Options
|
from fontTools.merge.options import Options
|
||||||
import fontTools.merge.tables
|
import fontTools.merge.tables
|
||||||
@ -57,7 +61,7 @@ class Merger(object):
|
|||||||
fonts = [ttLib.TTFont(fontfile) for fontfile in fontfiles]
|
fonts = [ttLib.TTFont(fontfile) for fontfile in fontfiles]
|
||||||
for font, fontfile in zip(fonts, fontfiles):
|
for font, fontfile in zip(fonts, fontfiles):
|
||||||
font._merger__fontfile = fontfile
|
font._merger__fontfile = fontfile
|
||||||
font._merger__name = font['name'].getDebugName(4)
|
font._merger__name = font["name"].getDebugName(4)
|
||||||
return fonts
|
return fonts
|
||||||
|
|
||||||
def merge(self, fontfiles):
|
def merge(self, fontfiles):
|
||||||
@ -84,10 +88,10 @@ class Merger(object):
|
|||||||
fonts = self._openFonts(fontfiles)
|
fonts = self._openFonts(fontfiles)
|
||||||
for font, glyphOrder in zip(fonts, glyphOrders):
|
for font, glyphOrder in zip(fonts, glyphOrders):
|
||||||
font.setGlyphOrder(glyphOrder)
|
font.setGlyphOrder(glyphOrder)
|
||||||
if 'CFF ' in font:
|
if "CFF " in font:
|
||||||
renameCFFCharStrings(self, glyphOrder, font['CFF '])
|
renameCFFCharStrings(self, glyphOrder, font["CFF "])
|
||||||
|
|
||||||
cmaps = [font['cmap'] for font in fonts]
|
cmaps = [font["cmap"] for font in fonts]
|
||||||
self.duplicateGlyphsPerFont = [{} for _ in fonts]
|
self.duplicateGlyphsPerFont = [{} for _ in fonts]
|
||||||
computeMegaCmap(self, cmaps)
|
computeMegaCmap(self, cmaps)
|
||||||
|
|
||||||
@ -100,7 +104,7 @@ class Merger(object):
|
|||||||
self.fonts = fonts
|
self.fonts = fonts
|
||||||
|
|
||||||
allTags = reduce(set.union, (list(font.keys()) for font in fonts), set())
|
allTags = reduce(set.union, (list(font.keys()) for font in fonts), set())
|
||||||
allTags.remove('GlyphOrder')
|
allTags.remove("GlyphOrder")
|
||||||
|
|
||||||
for tag in allTags:
|
for tag in allTags:
|
||||||
if tag in self.options.drop_tables:
|
if tag in self.options.drop_tables:
|
||||||
@ -131,16 +135,21 @@ class Merger(object):
|
|||||||
# Right now we don't use self at all. Will use in the future
|
# Right now we don't use self at all. Will use in the future
|
||||||
# for options and logging.
|
# for options and logging.
|
||||||
|
|
||||||
allKeys = set.union(set(), *(vars(table).keys() for table in tables if table is not NotImplemented))
|
allKeys = set.union(
|
||||||
|
set(),
|
||||||
|
*(vars(table).keys() for table in tables if table is not NotImplemented),
|
||||||
|
)
|
||||||
for key in allKeys:
|
for key in allKeys:
|
||||||
try:
|
try:
|
||||||
mergeLogic = logic[key]
|
mergeLogic = logic[key]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
try:
|
try:
|
||||||
mergeLogic = logic['*']
|
mergeLogic = logic["*"]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
raise Exception("Don't know how to merge key %s of class %s" %
|
raise Exception(
|
||||||
(key, returnTable.__class__.__name__))
|
"Don't know how to merge key %s of class %s"
|
||||||
|
% (key, returnTable.__class__.__name__)
|
||||||
|
)
|
||||||
if mergeLogic is NotImplemented:
|
if mergeLogic is NotImplemented:
|
||||||
continue
|
continue
|
||||||
value = mergeLogic(getattr(table, key, NotImplemented) for table in tables)
|
value = mergeLogic(getattr(table, key, NotImplemented) for table in tables)
|
||||||
@ -161,11 +170,8 @@ class Merger(object):
|
|||||||
font["OS/2"].recalcAvgCharWidth(font)
|
font["OS/2"].recalcAvgCharWidth(font)
|
||||||
|
|
||||||
|
|
||||||
__all__ = [
|
__all__ = ["Options", "Merger", "main"]
|
||||||
'Options',
|
|
||||||
'Merger',
|
|
||||||
'main'
|
|
||||||
]
|
|
||||||
|
|
||||||
@timer("make one with everything (TOTAL TIME)")
|
@timer("make one with everything (TOTAL TIME)")
|
||||||
def main(args=None):
|
def main(args=None):
|
||||||
@ -176,11 +182,11 @@ def main(args=None):
|
|||||||
args = sys.argv[1:]
|
args = sys.argv[1:]
|
||||||
|
|
||||||
options = Options()
|
options = Options()
|
||||||
args = options.parse_opts(args, ignore_unknown=['output-file'])
|
args = options.parse_opts(args, ignore_unknown=["output-file"])
|
||||||
outfile = 'merged.ttf'
|
outfile = "merged.ttf"
|
||||||
fontfiles = []
|
fontfiles = []
|
||||||
for g in args:
|
for g in args:
|
||||||
if g.startswith('--output-file='):
|
if g.startswith("--output-file="):
|
||||||
outfile = g[14:]
|
outfile = g[14:]
|
||||||
continue
|
continue
|
||||||
fontfiles.append(g)
|
fontfiles.append(g)
|
||||||
|
@ -2,5 +2,5 @@ import sys
|
|||||||
from fontTools.merge import main
|
from fontTools.merge import main
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == "__main__":
|
||||||
sys.exit(main())
|
sys.exit(main())
|
||||||
|
@ -12,19 +12,24 @@ log = logging.getLogger("fontTools.merge")
|
|||||||
def add_method(*clazzes, **kwargs):
|
def add_method(*clazzes, **kwargs):
|
||||||
"""Returns a decorator function that adds a new method to one or
|
"""Returns a decorator function that adds a new method to one or
|
||||||
more classes."""
|
more classes."""
|
||||||
allowDefault = kwargs.get('allowDefaultTable', False)
|
allowDefault = kwargs.get("allowDefaultTable", False)
|
||||||
|
|
||||||
def wrapper(method):
|
def wrapper(method):
|
||||||
done = []
|
done = []
|
||||||
for clazz in clazzes:
|
for clazz in clazzes:
|
||||||
if clazz in done: continue # Support multiple names of a clazz
|
if clazz in done:
|
||||||
|
continue # Support multiple names of a clazz
|
||||||
done.append(clazz)
|
done.append(clazz)
|
||||||
assert allowDefault or clazz != DefaultTable, 'Oops, table class not found.'
|
assert allowDefault or clazz != DefaultTable, "Oops, table class not found."
|
||||||
assert method.__name__ not in clazz.__dict__, \
|
assert (
|
||||||
"Oops, class '%s' has method '%s'." % (clazz.__name__, method.__name__)
|
method.__name__ not in clazz.__dict__
|
||||||
|
), "Oops, class '%s' has method '%s'." % (clazz.__name__, method.__name__)
|
||||||
setattr(clazz, method.__name__, method)
|
setattr(clazz, method.__name__, method)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
return wrapper
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
def mergeObjects(lst):
|
def mergeObjects(lst):
|
||||||
lst = [item for item in lst if item is not NotImplemented]
|
lst = [item for item in lst if item is not NotImplemented]
|
||||||
if not lst:
|
if not lst:
|
||||||
@ -46,10 +51,11 @@ def mergeObjects(lst):
|
|||||||
mergeLogic = logic[key]
|
mergeLogic = logic[key]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
try:
|
try:
|
||||||
mergeLogic = logic['*']
|
mergeLogic = logic["*"]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
raise Exception("Don't know how to merge key %s of class %s" %
|
raise Exception(
|
||||||
(key, clazz.__name__))
|
"Don't know how to merge key %s of class %s" % (key, clazz.__name__)
|
||||||
|
)
|
||||||
if mergeLogic is NotImplemented:
|
if mergeLogic is NotImplemented:
|
||||||
continue
|
continue
|
||||||
value = mergeLogic(getattr(table, key, NotImplemented) for table in lst)
|
value = mergeLogic(getattr(table, key, NotImplemented) for table in lst)
|
||||||
@ -60,9 +66,10 @@ def mergeObjects(lst):
|
|||||||
|
|
||||||
return returnTable
|
return returnTable
|
||||||
|
|
||||||
|
|
||||||
@add_method(DefaultTable, allowDefaultTable=True)
|
@add_method(DefaultTable, allowDefaultTable=True)
|
||||||
def merge(self, m, tables):
|
def merge(self, m, tables):
|
||||||
if not hasattr(self, 'mergeMap'):
|
if not hasattr(self, "mergeMap"):
|
||||||
log.info("Don't know how to merge '%s'.", self.tableTag)
|
log.info("Don't know how to merge '%s'.", self.tableTag)
|
||||||
return NotImplemented
|
return NotImplemented
|
||||||
|
|
||||||
@ -72,5 +79,3 @@ def merge(self, m, tables):
|
|||||||
return m.mergeObjects(self, self.mergeMap, tables)
|
return m.mergeObjects(self, self.mergeMap, tables)
|
||||||
else:
|
else:
|
||||||
return logic(tables)
|
return logic(tables)
|
||||||
|
|
||||||
|
|
||||||
|
@ -27,9 +27,14 @@ def computeMegaGlyphOrder(merger, glyphOrders):
|
|||||||
merger.glyphOrder = megaOrder = list(megaOrder.keys())
|
merger.glyphOrder = megaOrder = list(megaOrder.keys())
|
||||||
|
|
||||||
|
|
||||||
def _glyphsAreSame(glyphSet1, glyphSet2, glyph1, glyph2,
|
def _glyphsAreSame(
|
||||||
advanceTolerance=.05,
|
glyphSet1,
|
||||||
advanceToleranceEmpty=.20):
|
glyphSet2,
|
||||||
|
glyph1,
|
||||||
|
glyph2,
|
||||||
|
advanceTolerance=0.05,
|
||||||
|
advanceToleranceEmpty=0.20,
|
||||||
|
):
|
||||||
pen1 = DecomposingRecordingPen(glyphSet1)
|
pen1 = DecomposingRecordingPen(glyphSet1)
|
||||||
pen2 = DecomposingRecordingPen(glyphSet2)
|
pen2 = DecomposingRecordingPen(glyphSet2)
|
||||||
g1 = glyphSet1[glyph1]
|
g1 = glyphSet1[glyph1]
|
||||||
@ -43,11 +48,12 @@ def _glyphsAreSame(glyphSet1, glyphSet2, glyph1, glyph2,
|
|||||||
# TODO Warn if advances not the same but within tolerance.
|
# TODO Warn if advances not the same but within tolerance.
|
||||||
if abs(g1.width - g2.width) > g1.width * tolerance:
|
if abs(g1.width - g2.width) > g1.width * tolerance:
|
||||||
return False
|
return False
|
||||||
if hasattr(g1, 'height') and g1.height is not None:
|
if hasattr(g1, "height") and g1.height is not None:
|
||||||
if abs(g1.height - g2.height) > g1.height * tolerance:
|
if abs(g1.height - g2.height) > g1.height * tolerance:
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
# Valid (format, platformID, platEncID) triplets for cmap subtables containing
|
# Valid (format, platformID, platEncID) triplets for cmap subtables containing
|
||||||
# Unicode BMP-only and Unicode Full Repertoire semantics.
|
# Unicode BMP-only and Unicode Full Repertoire semantics.
|
||||||
# Cf. OpenType spec for "Platform specific encodings":
|
# Cf. OpenType spec for "Platform specific encodings":
|
||||||
@ -56,6 +62,7 @@ class _CmapUnicodePlatEncodings:
|
|||||||
BMP = {(4, 3, 1), (4, 0, 3), (4, 0, 4), (4, 0, 6)}
|
BMP = {(4, 3, 1), (4, 0, 3), (4, 0, 4), (4, 0, 6)}
|
||||||
FullRepertoire = {(12, 3, 10), (12, 0, 4), (12, 0, 6)}
|
FullRepertoire = {(12, 3, 10), (12, 0, 4), (12, 0, 6)}
|
||||||
|
|
||||||
|
|
||||||
def computeMegaCmap(merger, cmapTables):
|
def computeMegaCmap(merger, cmapTables):
|
||||||
"""Sets merger.cmap and merger.glyphOrder."""
|
"""Sets merger.cmap and merger.glyphOrder."""
|
||||||
|
|
||||||
@ -76,7 +83,10 @@ def computeMegaCmap(merger, cmapTables):
|
|||||||
log.warning(
|
log.warning(
|
||||||
"Dropped cmap subtable from font '%s':\t"
|
"Dropped cmap subtable from font '%s':\t"
|
||||||
"format %2s, platformID %2s, platEncID %2s",
|
"format %2s, platformID %2s, platEncID %2s",
|
||||||
fontIdx, subtable.format, subtable.platformID, subtable.platEncID
|
fontIdx,
|
||||||
|
subtable.format,
|
||||||
|
subtable.platformID,
|
||||||
|
subtable.platEncID,
|
||||||
)
|
)
|
||||||
if format12 is not None:
|
if format12 is not None:
|
||||||
chosenCmapTables.append((format12, fontIdx))
|
chosenCmapTables.append((format12, fontIdx))
|
||||||
@ -86,7 +96,7 @@ def computeMegaCmap(merger, cmapTables):
|
|||||||
# Build the unicode mapping
|
# Build the unicode mapping
|
||||||
merger.cmap = cmap = {}
|
merger.cmap = cmap = {}
|
||||||
fontIndexForGlyph = {}
|
fontIndexForGlyph = {}
|
||||||
glyphSets = [None for f in merger.fonts] if hasattr(merger, 'fonts') else None
|
glyphSets = [None for f in merger.fonts] if hasattr(merger, "fonts") else None
|
||||||
|
|
||||||
for table, fontIdx in chosenCmapTables:
|
for table, fontIdx in chosenCmapTables:
|
||||||
# handle duplicates
|
# handle duplicates
|
||||||
@ -113,7 +123,9 @@ def computeMegaCmap(merger, cmapTables):
|
|||||||
# Char previously mapped to oldgid but oldgid is already remapped to a different
|
# Char previously mapped to oldgid but oldgid is already remapped to a different
|
||||||
# gid, because of another Unicode character.
|
# gid, because of another Unicode character.
|
||||||
# TODO: Try harder to do something about these.
|
# TODO: Try harder to do something about these.
|
||||||
log.warning("Dropped mapping from codepoint %#06X to glyphId '%s'", uni, gid)
|
log.warning(
|
||||||
|
"Dropped mapping from codepoint %#06X to glyphId '%s'", uni, gid
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def renameCFFCharStrings(merger, glyphOrder, cffTable):
|
def renameCFFCharStrings(merger, glyphOrder, cffTable):
|
||||||
|
@ -17,14 +17,18 @@ def mergeLookupLists(lst):
|
|||||||
# TODO Do smarter merge.
|
# TODO Do smarter merge.
|
||||||
return sumLists(lst)
|
return sumLists(lst)
|
||||||
|
|
||||||
|
|
||||||
def mergeFeatures(lst):
|
def mergeFeatures(lst):
|
||||||
assert lst
|
assert lst
|
||||||
self = otTables.Feature()
|
self = otTables.Feature()
|
||||||
self.FeatureParams = None
|
self.FeatureParams = None
|
||||||
self.LookupListIndex = mergeLookupLists([l.LookupListIndex for l in lst if l.LookupListIndex])
|
self.LookupListIndex = mergeLookupLists(
|
||||||
|
[l.LookupListIndex for l in lst if l.LookupListIndex]
|
||||||
|
)
|
||||||
self.LookupCount = len(self.LookupListIndex)
|
self.LookupCount = len(self.LookupListIndex)
|
||||||
return self
|
return self
|
||||||
|
|
||||||
|
|
||||||
def mergeFeatureLists(lst):
|
def mergeFeatureLists(lst):
|
||||||
d = {}
|
d = {}
|
||||||
for l in lst:
|
for l in lst:
|
||||||
@ -41,6 +45,7 @@ def mergeFeatureLists(lst):
|
|||||||
ret.append(rec)
|
ret.append(rec)
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
|
|
||||||
def mergeLangSyses(lst):
|
def mergeLangSyses(lst):
|
||||||
assert lst
|
assert lst
|
||||||
|
|
||||||
@ -50,10 +55,13 @@ def mergeLangSyses(lst):
|
|||||||
self = otTables.LangSys()
|
self = otTables.LangSys()
|
||||||
self.LookupOrder = None
|
self.LookupOrder = None
|
||||||
self.ReqFeatureIndex = 0xFFFF
|
self.ReqFeatureIndex = 0xFFFF
|
||||||
self.FeatureIndex = mergeFeatureLists([l.FeatureIndex for l in lst if l.FeatureIndex])
|
self.FeatureIndex = mergeFeatureLists(
|
||||||
|
[l.FeatureIndex for l in lst if l.FeatureIndex]
|
||||||
|
)
|
||||||
self.FeatureCount = len(self.FeatureIndex)
|
self.FeatureCount = len(self.FeatureIndex)
|
||||||
return self
|
return self
|
||||||
|
|
||||||
|
|
||||||
def mergeScripts(lst):
|
def mergeScripts(lst):
|
||||||
assert lst
|
assert lst
|
||||||
|
|
||||||
@ -82,6 +90,7 @@ def mergeScripts(lst):
|
|||||||
self.DefaultLangSys = None
|
self.DefaultLangSys = None
|
||||||
return self
|
return self
|
||||||
|
|
||||||
|
|
||||||
def mergeScriptRecords(lst):
|
def mergeScriptRecords(lst):
|
||||||
d = {}
|
d = {}
|
||||||
for l in lst:
|
for l in lst:
|
||||||
@ -98,111 +107,124 @@ def mergeScriptRecords(lst):
|
|||||||
ret.append(rec)
|
ret.append(rec)
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
|
|
||||||
otTables.ScriptList.mergeMap = {
|
otTables.ScriptList.mergeMap = {
|
||||||
'ScriptCount': lambda lst: None, # TODO
|
"ScriptCount": lambda lst: None, # TODO
|
||||||
'ScriptRecord': mergeScriptRecords,
|
"ScriptRecord": mergeScriptRecords,
|
||||||
}
|
}
|
||||||
otTables.BaseScriptList.mergeMap = {
|
otTables.BaseScriptList.mergeMap = {
|
||||||
'BaseScriptCount': lambda lst: None, # TODO
|
"BaseScriptCount": lambda lst: None, # TODO
|
||||||
# TODO: Merge duplicate entries
|
# TODO: Merge duplicate entries
|
||||||
'BaseScriptRecord': lambda lst: sorted(sumLists(lst), key=lambda s: s.BaseScriptTag),
|
"BaseScriptRecord": lambda lst: sorted(
|
||||||
|
sumLists(lst), key=lambda s: s.BaseScriptTag
|
||||||
|
),
|
||||||
}
|
}
|
||||||
|
|
||||||
otTables.FeatureList.mergeMap = {
|
otTables.FeatureList.mergeMap = {
|
||||||
'FeatureCount': sum,
|
"FeatureCount": sum,
|
||||||
'FeatureRecord': lambda lst: sorted(sumLists(lst), key=lambda s: s.FeatureTag),
|
"FeatureRecord": lambda lst: sorted(sumLists(lst), key=lambda s: s.FeatureTag),
|
||||||
}
|
}
|
||||||
|
|
||||||
otTables.LookupList.mergeMap = {
|
otTables.LookupList.mergeMap = {
|
||||||
'LookupCount': sum,
|
"LookupCount": sum,
|
||||||
'Lookup': sumLists,
|
"Lookup": sumLists,
|
||||||
}
|
}
|
||||||
|
|
||||||
otTables.Coverage.mergeMap = {
|
otTables.Coverage.mergeMap = {
|
||||||
'Format': min,
|
"Format": min,
|
||||||
'glyphs': sumLists,
|
"glyphs": sumLists,
|
||||||
}
|
}
|
||||||
|
|
||||||
otTables.ClassDef.mergeMap = {
|
otTables.ClassDef.mergeMap = {
|
||||||
'Format': min,
|
"Format": min,
|
||||||
'classDefs': sumDicts,
|
"classDefs": sumDicts,
|
||||||
}
|
}
|
||||||
|
|
||||||
otTables.LigCaretList.mergeMap = {
|
otTables.LigCaretList.mergeMap = {
|
||||||
'Coverage': mergeObjects,
|
"Coverage": mergeObjects,
|
||||||
'LigGlyphCount': sum,
|
"LigGlyphCount": sum,
|
||||||
'LigGlyph': sumLists,
|
"LigGlyph": sumLists,
|
||||||
}
|
}
|
||||||
|
|
||||||
otTables.AttachList.mergeMap = {
|
otTables.AttachList.mergeMap = {
|
||||||
'Coverage': mergeObjects,
|
"Coverage": mergeObjects,
|
||||||
'GlyphCount': sum,
|
"GlyphCount": sum,
|
||||||
'AttachPoint': sumLists,
|
"AttachPoint": sumLists,
|
||||||
}
|
}
|
||||||
|
|
||||||
# XXX Renumber MarkFilterSets of lookups
|
# XXX Renumber MarkFilterSets of lookups
|
||||||
otTables.MarkGlyphSetsDef.mergeMap = {
|
otTables.MarkGlyphSetsDef.mergeMap = {
|
||||||
'MarkSetTableFormat': equal,
|
"MarkSetTableFormat": equal,
|
||||||
'MarkSetCount': sum,
|
"MarkSetCount": sum,
|
||||||
'Coverage': sumLists,
|
"Coverage": sumLists,
|
||||||
}
|
}
|
||||||
|
|
||||||
otTables.Axis.mergeMap = {
|
otTables.Axis.mergeMap = {
|
||||||
'*': mergeObjects,
|
"*": mergeObjects,
|
||||||
}
|
}
|
||||||
|
|
||||||
# XXX Fix BASE table merging
|
# XXX Fix BASE table merging
|
||||||
otTables.BaseTagList.mergeMap = {
|
otTables.BaseTagList.mergeMap = {
|
||||||
'BaseTagCount': sum,
|
"BaseTagCount": sum,
|
||||||
'BaselineTag': sumLists,
|
"BaselineTag": sumLists,
|
||||||
}
|
}
|
||||||
|
|
||||||
otTables.GDEF.mergeMap = \
|
otTables.GDEF.mergeMap = (
|
||||||
otTables.GSUB.mergeMap = \
|
otTables.GSUB.mergeMap
|
||||||
otTables.GPOS.mergeMap = \
|
) = (
|
||||||
otTables.BASE.mergeMap = \
|
otTables.GPOS.mergeMap
|
||||||
otTables.JSTF.mergeMap = \
|
) = otTables.BASE.mergeMap = otTables.JSTF.mergeMap = otTables.MATH.mergeMap = {
|
||||||
otTables.MATH.mergeMap = \
|
"*": mergeObjects,
|
||||||
{
|
"Version": max,
|
||||||
'*': mergeObjects,
|
|
||||||
'Version': max,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ttLib.getTableClass('GDEF').mergeMap = \
|
ttLib.getTableClass("GDEF").mergeMap = ttLib.getTableClass(
|
||||||
ttLib.getTableClass('GSUB').mergeMap = \
|
"GSUB"
|
||||||
ttLib.getTableClass('GPOS').mergeMap = \
|
).mergeMap = ttLib.getTableClass("GPOS").mergeMap = ttLib.getTableClass(
|
||||||
ttLib.getTableClass('BASE').mergeMap = \
|
"BASE"
|
||||||
ttLib.getTableClass('JSTF').mergeMap = \
|
).mergeMap = ttLib.getTableClass(
|
||||||
ttLib.getTableClass('MATH').mergeMap = \
|
"JSTF"
|
||||||
{
|
).mergeMap = ttLib.getTableClass(
|
||||||
'tableTag': onlyExisting(equal), # XXX clean me up
|
"MATH"
|
||||||
'table': mergeObjects,
|
).mergeMap = {
|
||||||
|
"tableTag": onlyExisting(equal), # XXX clean me up
|
||||||
|
"table": mergeObjects,
|
||||||
}
|
}
|
||||||
|
|
||||||
@add_method(ttLib.getTableClass('GSUB'))
|
|
||||||
|
@add_method(ttLib.getTableClass("GSUB"))
|
||||||
def merge(self, m, tables):
|
def merge(self, m, tables):
|
||||||
|
|
||||||
assert len(tables) == len(m.duplicateGlyphsPerFont)
|
assert len(tables) == len(m.duplicateGlyphsPerFont)
|
||||||
for i, (table, dups) in enumerate(zip(tables, m.duplicateGlyphsPerFont)):
|
for i, (table, dups) in enumerate(zip(tables, m.duplicateGlyphsPerFont)):
|
||||||
if not dups: continue
|
if not dups:
|
||||||
|
continue
|
||||||
if table is None or table is NotImplemented:
|
if table is None or table is NotImplemented:
|
||||||
log.warning("Have non-identical duplicates to resolve for '%s' but no GSUB. Are duplicates intended?: %s", m.fonts[i]._merger__name, dups)
|
log.warning(
|
||||||
|
"Have non-identical duplicates to resolve for '%s' but no GSUB. Are duplicates intended?: %s",
|
||||||
|
m.fonts[i]._merger__name,
|
||||||
|
dups,
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
synthFeature = None
|
synthFeature = None
|
||||||
synthLookup = None
|
synthLookup = None
|
||||||
for script in table.table.ScriptList.ScriptRecord:
|
for script in table.table.ScriptList.ScriptRecord:
|
||||||
if script.ScriptTag == 'DFLT': continue # XXX
|
if script.ScriptTag == "DFLT":
|
||||||
for langsys in [script.Script.DefaultLangSys] + [l.LangSys for l in script.Script.LangSysRecord]:
|
continue # XXX
|
||||||
if langsys is None: continue # XXX Create!
|
for langsys in [script.Script.DefaultLangSys] + [
|
||||||
feature = [v for v in langsys.FeatureIndex if v.FeatureTag == 'locl']
|
l.LangSys for l in script.Script.LangSysRecord
|
||||||
|
]:
|
||||||
|
if langsys is None:
|
||||||
|
continue # XXX Create!
|
||||||
|
feature = [v for v in langsys.FeatureIndex if v.FeatureTag == "locl"]
|
||||||
assert len(feature) <= 1
|
assert len(feature) <= 1
|
||||||
if feature:
|
if feature:
|
||||||
feature = feature[0]
|
feature = feature[0]
|
||||||
else:
|
else:
|
||||||
if not synthFeature:
|
if not synthFeature:
|
||||||
synthFeature = otTables.FeatureRecord()
|
synthFeature = otTables.FeatureRecord()
|
||||||
synthFeature.FeatureTag = 'locl'
|
synthFeature.FeatureTag = "locl"
|
||||||
f = synthFeature.Feature = otTables.Feature()
|
f = synthFeature.Feature = otTables.Feature()
|
||||||
f.FeatureParams = None
|
f.FeatureParams = None
|
||||||
f.LookupCount = 0
|
f.LookupCount = 0
|
||||||
@ -238,7 +260,9 @@ def merge(self, m, tables):
|
|||||||
DefaultTable.merge(self, m, tables)
|
DefaultTable.merge(self, m, tables)
|
||||||
return self
|
return self
|
||||||
|
|
||||||
@add_method(otTables.SingleSubst,
|
|
||||||
|
@add_method(
|
||||||
|
otTables.SingleSubst,
|
||||||
otTables.MultipleSubst,
|
otTables.MultipleSubst,
|
||||||
otTables.AlternateSubst,
|
otTables.AlternateSubst,
|
||||||
otTables.LigatureSubst,
|
otTables.LigatureSubst,
|
||||||
@ -248,29 +272,32 @@ def merge(self, m, tables):
|
|||||||
otTables.CursivePos,
|
otTables.CursivePos,
|
||||||
otTables.MarkBasePos,
|
otTables.MarkBasePos,
|
||||||
otTables.MarkLigPos,
|
otTables.MarkLigPos,
|
||||||
otTables.MarkMarkPos)
|
otTables.MarkMarkPos,
|
||||||
|
)
|
||||||
def mapLookups(self, lookupMap):
|
def mapLookups(self, lookupMap):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
# Copied and trimmed down from subset.py
|
# Copied and trimmed down from subset.py
|
||||||
@add_method(otTables.ContextSubst,
|
@add_method(
|
||||||
|
otTables.ContextSubst,
|
||||||
otTables.ChainContextSubst,
|
otTables.ChainContextSubst,
|
||||||
otTables.ContextPos,
|
otTables.ContextPos,
|
||||||
otTables.ChainContextPos)
|
otTables.ChainContextPos,
|
||||||
|
)
|
||||||
def __merge_classify_context(self):
|
def __merge_classify_context(self):
|
||||||
|
|
||||||
class ContextHelper(object):
|
class ContextHelper(object):
|
||||||
def __init__(self, klass, Format):
|
def __init__(self, klass, Format):
|
||||||
if klass.__name__.endswith('Subst'):
|
if klass.__name__.endswith("Subst"):
|
||||||
Typ = 'Sub'
|
Typ = "Sub"
|
||||||
Type = 'Subst'
|
Type = "Subst"
|
||||||
else:
|
else:
|
||||||
Typ = 'Pos'
|
Typ = "Pos"
|
||||||
Type = 'Pos'
|
Type = "Pos"
|
||||||
if klass.__name__.startswith('Chain'):
|
if klass.__name__.startswith("Chain"):
|
||||||
Chain = 'Chain'
|
Chain = "Chain"
|
||||||
else:
|
else:
|
||||||
Chain = ''
|
Chain = ""
|
||||||
ChainTyp = Chain + Typ
|
ChainTyp = Chain + Typ
|
||||||
|
|
||||||
self.Typ = Typ
|
self.Typ = Typ
|
||||||
@ -278,14 +305,14 @@ def __merge_classify_context(self):
|
|||||||
self.Chain = Chain
|
self.Chain = Chain
|
||||||
self.ChainTyp = ChainTyp
|
self.ChainTyp = ChainTyp
|
||||||
|
|
||||||
self.LookupRecord = Type+'LookupRecord'
|
self.LookupRecord = Type + "LookupRecord"
|
||||||
|
|
||||||
if Format == 1:
|
if Format == 1:
|
||||||
self.Rule = ChainTyp+'Rule'
|
self.Rule = ChainTyp + "Rule"
|
||||||
self.RuleSet = ChainTyp+'RuleSet'
|
self.RuleSet = ChainTyp + "RuleSet"
|
||||||
elif Format == 2:
|
elif Format == 2:
|
||||||
self.Rule = ChainTyp+'ClassRule'
|
self.Rule = ChainTyp + "ClassRule"
|
||||||
self.RuleSet = ChainTyp+'ClassSet'
|
self.RuleSet = ChainTyp + "ClassSet"
|
||||||
|
|
||||||
if self.Format not in [1, 2, 3]:
|
if self.Format not in [1, 2, 3]:
|
||||||
return None # Don't shoot the messenger; let it go
|
return None # Don't shoot the messenger; let it go
|
||||||
@ -297,99 +324,121 @@ def __merge_classify_context(self):
|
|||||||
return self.__class__._merge__ContextHelpers[self.Format]
|
return self.__class__._merge__ContextHelpers[self.Format]
|
||||||
|
|
||||||
|
|
||||||
@add_method(otTables.ContextSubst,
|
@add_method(
|
||||||
|
otTables.ContextSubst,
|
||||||
otTables.ChainContextSubst,
|
otTables.ChainContextSubst,
|
||||||
otTables.ContextPos,
|
otTables.ContextPos,
|
||||||
otTables.ChainContextPos)
|
otTables.ChainContextPos,
|
||||||
|
)
|
||||||
def mapLookups(self, lookupMap):
|
def mapLookups(self, lookupMap):
|
||||||
c = self.__merge_classify_context()
|
c = self.__merge_classify_context()
|
||||||
|
|
||||||
if self.Format in [1, 2]:
|
if self.Format in [1, 2]:
|
||||||
for rs in getattr(self, c.RuleSet):
|
for rs in getattr(self, c.RuleSet):
|
||||||
if not rs: continue
|
if not rs:
|
||||||
|
continue
|
||||||
for r in getattr(rs, c.Rule):
|
for r in getattr(rs, c.Rule):
|
||||||
if not r: continue
|
if not r:
|
||||||
|
continue
|
||||||
for ll in getattr(r, c.LookupRecord):
|
for ll in getattr(r, c.LookupRecord):
|
||||||
if not ll: continue
|
if not ll:
|
||||||
|
continue
|
||||||
ll.LookupListIndex = lookupMap[ll.LookupListIndex]
|
ll.LookupListIndex = lookupMap[ll.LookupListIndex]
|
||||||
elif self.Format == 3:
|
elif self.Format == 3:
|
||||||
for ll in getattr(self, c.LookupRecord):
|
for ll in getattr(self, c.LookupRecord):
|
||||||
if not ll: continue
|
if not ll:
|
||||||
|
continue
|
||||||
ll.LookupListIndex = lookupMap[ll.LookupListIndex]
|
ll.LookupListIndex = lookupMap[ll.LookupListIndex]
|
||||||
else:
|
else:
|
||||||
assert 0, "unknown format: %s" % self.Format
|
assert 0, "unknown format: %s" % self.Format
|
||||||
|
|
||||||
@add_method(otTables.ExtensionSubst,
|
|
||||||
otTables.ExtensionPos)
|
@add_method(otTables.ExtensionSubst, otTables.ExtensionPos)
|
||||||
def mapLookups(self, lookupMap):
|
def mapLookups(self, lookupMap):
|
||||||
if self.Format == 1:
|
if self.Format == 1:
|
||||||
self.ExtSubTable.mapLookups(lookupMap)
|
self.ExtSubTable.mapLookups(lookupMap)
|
||||||
else:
|
else:
|
||||||
assert 0, "unknown format: %s" % self.Format
|
assert 0, "unknown format: %s" % self.Format
|
||||||
|
|
||||||
|
|
||||||
@add_method(otTables.Lookup)
|
@add_method(otTables.Lookup)
|
||||||
def mapLookups(self, lookupMap):
|
def mapLookups(self, lookupMap):
|
||||||
for st in self.SubTable:
|
for st in self.SubTable:
|
||||||
if not st: continue
|
if not st:
|
||||||
|
continue
|
||||||
st.mapLookups(lookupMap)
|
st.mapLookups(lookupMap)
|
||||||
|
|
||||||
|
|
||||||
@add_method(otTables.LookupList)
|
@add_method(otTables.LookupList)
|
||||||
def mapLookups(self, lookupMap):
|
def mapLookups(self, lookupMap):
|
||||||
for l in self.Lookup:
|
for l in self.Lookup:
|
||||||
if not l: continue
|
if not l:
|
||||||
|
continue
|
||||||
l.mapLookups(lookupMap)
|
l.mapLookups(lookupMap)
|
||||||
|
|
||||||
|
|
||||||
@add_method(otTables.Lookup)
|
@add_method(otTables.Lookup)
|
||||||
def mapMarkFilteringSets(self, markFilteringSetMap):
|
def mapMarkFilteringSets(self, markFilteringSetMap):
|
||||||
if self.LookupFlag & 0x0010:
|
if self.LookupFlag & 0x0010:
|
||||||
self.MarkFilteringSet = markFilteringSetMap[self.MarkFilteringSet]
|
self.MarkFilteringSet = markFilteringSetMap[self.MarkFilteringSet]
|
||||||
|
|
||||||
|
|
||||||
@add_method(otTables.LookupList)
|
@add_method(otTables.LookupList)
|
||||||
def mapMarkFilteringSets(self, markFilteringSetMap):
|
def mapMarkFilteringSets(self, markFilteringSetMap):
|
||||||
for l in self.Lookup:
|
for l in self.Lookup:
|
||||||
if not l: continue
|
if not l:
|
||||||
|
continue
|
||||||
l.mapMarkFilteringSets(markFilteringSetMap)
|
l.mapMarkFilteringSets(markFilteringSetMap)
|
||||||
|
|
||||||
|
|
||||||
@add_method(otTables.Feature)
|
@add_method(otTables.Feature)
|
||||||
def mapLookups(self, lookupMap):
|
def mapLookups(self, lookupMap):
|
||||||
self.LookupListIndex = [lookupMap[i] for i in self.LookupListIndex]
|
self.LookupListIndex = [lookupMap[i] for i in self.LookupListIndex]
|
||||||
|
|
||||||
|
|
||||||
@add_method(otTables.FeatureList)
|
@add_method(otTables.FeatureList)
|
||||||
def mapLookups(self, lookupMap):
|
def mapLookups(self, lookupMap):
|
||||||
for f in self.FeatureRecord:
|
for f in self.FeatureRecord:
|
||||||
if not f or not f.Feature: continue
|
if not f or not f.Feature:
|
||||||
|
continue
|
||||||
f.Feature.mapLookups(lookupMap)
|
f.Feature.mapLookups(lookupMap)
|
||||||
|
|
||||||
@add_method(otTables.DefaultLangSys,
|
|
||||||
otTables.LangSys)
|
@add_method(otTables.DefaultLangSys, otTables.LangSys)
|
||||||
def mapFeatures(self, featureMap):
|
def mapFeatures(self, featureMap):
|
||||||
self.FeatureIndex = [featureMap[i] for i in self.FeatureIndex]
|
self.FeatureIndex = [featureMap[i] for i in self.FeatureIndex]
|
||||||
if self.ReqFeatureIndex != 65535:
|
if self.ReqFeatureIndex != 65535:
|
||||||
self.ReqFeatureIndex = featureMap[self.ReqFeatureIndex]
|
self.ReqFeatureIndex = featureMap[self.ReqFeatureIndex]
|
||||||
|
|
||||||
|
|
||||||
@add_method(otTables.Script)
|
@add_method(otTables.Script)
|
||||||
def mapFeatures(self, featureMap):
|
def mapFeatures(self, featureMap):
|
||||||
if self.DefaultLangSys:
|
if self.DefaultLangSys:
|
||||||
self.DefaultLangSys.mapFeatures(featureMap)
|
self.DefaultLangSys.mapFeatures(featureMap)
|
||||||
for l in self.LangSysRecord:
|
for l in self.LangSysRecord:
|
||||||
if not l or not l.LangSys: continue
|
if not l or not l.LangSys:
|
||||||
|
continue
|
||||||
l.LangSys.mapFeatures(featureMap)
|
l.LangSys.mapFeatures(featureMap)
|
||||||
|
|
||||||
|
|
||||||
@add_method(otTables.ScriptList)
|
@add_method(otTables.ScriptList)
|
||||||
def mapFeatures(self, featureMap):
|
def mapFeatures(self, featureMap):
|
||||||
for s in self.ScriptRecord:
|
for s in self.ScriptRecord:
|
||||||
if not s or not s.Script: continue
|
if not s or not s.Script:
|
||||||
|
continue
|
||||||
s.Script.mapFeatures(featureMap)
|
s.Script.mapFeatures(featureMap)
|
||||||
|
|
||||||
|
|
||||||
def layoutPreMerge(font):
|
def layoutPreMerge(font):
|
||||||
# Map indices to references
|
# Map indices to references
|
||||||
|
|
||||||
GDEF = font.get('GDEF')
|
GDEF = font.get("GDEF")
|
||||||
GSUB = font.get('GSUB')
|
GSUB = font.get("GSUB")
|
||||||
GPOS = font.get('GPOS')
|
GPOS = font.get("GPOS")
|
||||||
|
|
||||||
for t in [GSUB, GPOS]:
|
for t in [GSUB, GPOS]:
|
||||||
if not t: continue
|
if not t:
|
||||||
|
continue
|
||||||
|
|
||||||
if t.table.LookupList:
|
if t.table.LookupList:
|
||||||
lookupMap = {i: v for i, v in enumerate(t.table.LookupList.Lookup)}
|
lookupMap = {i: v for i, v in enumerate(t.table.LookupList.Lookup)}
|
||||||
@ -397,7 +446,9 @@ def layoutPreMerge(font):
|
|||||||
t.table.FeatureList.mapLookups(lookupMap)
|
t.table.FeatureList.mapLookups(lookupMap)
|
||||||
|
|
||||||
if GDEF and GDEF.table.Version >= 0x00010002:
|
if GDEF and GDEF.table.Version >= 0x00010002:
|
||||||
markFilteringSetMap = {i:v for i,v in enumerate(GDEF.table.MarkGlyphSetsDef.Coverage)}
|
markFilteringSetMap = {
|
||||||
|
i: v for i, v in enumerate(GDEF.table.MarkGlyphSetsDef.Coverage)
|
||||||
|
}
|
||||||
t.table.LookupList.mapMarkFilteringSets(markFilteringSetMap)
|
t.table.LookupList.mapMarkFilteringSets(markFilteringSetMap)
|
||||||
|
|
||||||
if t.table.FeatureList and t.table.ScriptList:
|
if t.table.FeatureList and t.table.ScriptList:
|
||||||
@ -406,15 +457,17 @@ def layoutPreMerge(font):
|
|||||||
|
|
||||||
# TODO FeatureParams nameIDs
|
# TODO FeatureParams nameIDs
|
||||||
|
|
||||||
|
|
||||||
def layoutPostMerge(font):
|
def layoutPostMerge(font):
|
||||||
# Map references back to indices
|
# Map references back to indices
|
||||||
|
|
||||||
GDEF = font.get('GDEF')
|
GDEF = font.get("GDEF")
|
||||||
GSUB = font.get('GSUB')
|
GSUB = font.get("GSUB")
|
||||||
GPOS = font.get('GPOS')
|
GPOS = font.get("GPOS")
|
||||||
|
|
||||||
for t in [GSUB, GPOS]:
|
for t in [GSUB, GPOS]:
|
||||||
if not t: continue
|
if not t:
|
||||||
|
continue
|
||||||
|
|
||||||
if t.table.FeatureList and t.table.ScriptList:
|
if t.table.FeatureList and t.table.ScriptList:
|
||||||
|
|
||||||
@ -423,12 +476,18 @@ def layoutPostMerge(font):
|
|||||||
t.table.ScriptList.mapFeatures(featureMap)
|
t.table.ScriptList.mapFeatures(featureMap)
|
||||||
|
|
||||||
# Record used features.
|
# Record used features.
|
||||||
featureMap = AttendanceRecordingIdentityDict(t.table.FeatureList.FeatureRecord)
|
featureMap = AttendanceRecordingIdentityDict(
|
||||||
|
t.table.FeatureList.FeatureRecord
|
||||||
|
)
|
||||||
t.table.ScriptList.mapFeatures(featureMap)
|
t.table.ScriptList.mapFeatures(featureMap)
|
||||||
usedIndices = featureMap.s
|
usedIndices = featureMap.s
|
||||||
|
|
||||||
# Remove unused features
|
# Remove unused features
|
||||||
t.table.FeatureList.FeatureRecord = [f for i,f in enumerate(t.table.FeatureList.FeatureRecord) if i in usedIndices]
|
t.table.FeatureList.FeatureRecord = [
|
||||||
|
f
|
||||||
|
for i, f in enumerate(t.table.FeatureList.FeatureRecord)
|
||||||
|
if i in usedIndices
|
||||||
|
]
|
||||||
|
|
||||||
# Map back to indices.
|
# Map back to indices.
|
||||||
featureMap = NonhashableDict(t.table.FeatureList.FeatureRecord)
|
featureMap = NonhashableDict(t.table.FeatureList.FeatureRecord)
|
||||||
@ -450,7 +509,9 @@ def layoutPostMerge(font):
|
|||||||
usedIndices = lookupMap.s
|
usedIndices = lookupMap.s
|
||||||
|
|
||||||
# Remove unused lookups
|
# Remove unused lookups
|
||||||
t.table.LookupList.Lookup = [l for i,l in enumerate(t.table.LookupList.Lookup) if i in usedIndices]
|
t.table.LookupList.Lookup = [
|
||||||
|
l for i, l in enumerate(t.table.LookupList.Lookup) if i in usedIndices
|
||||||
|
]
|
||||||
|
|
||||||
# Map back to indices.
|
# Map back to indices.
|
||||||
lookupMap = NonhashableDict(t.table.LookupList.Lookup)
|
lookupMap = NonhashableDict(t.table.LookupList.Lookup)
|
||||||
@ -460,7 +521,9 @@ def layoutPostMerge(font):
|
|||||||
t.table.LookupList.LookupCount = len(t.table.LookupList.Lookup)
|
t.table.LookupList.LookupCount = len(t.table.LookupList.Lookup)
|
||||||
|
|
||||||
if GDEF and GDEF.table.Version >= 0x00010002:
|
if GDEF and GDEF.table.Version >= 0x00010002:
|
||||||
markFilteringSetMap = NonhashableDict(GDEF.table.MarkGlyphSetsDef.Coverage)
|
markFilteringSetMap = NonhashableDict(
|
||||||
|
GDEF.table.MarkGlyphSetsDef.Coverage
|
||||||
|
)
|
||||||
t.table.LookupList.mapMarkFilteringSets(markFilteringSetMap)
|
t.table.LookupList.mapMarkFilteringSets(markFilteringSetMap)
|
||||||
|
|
||||||
# TODO FeatureParams nameIDs
|
# TODO FeatureParams nameIDs
|
||||||
|
@ -4,7 +4,6 @@
|
|||||||
|
|
||||||
|
|
||||||
class Options(object):
|
class Options(object):
|
||||||
|
|
||||||
class UnknownOptionError(Exception):
|
class UnknownOptionError(Exception):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@ -27,12 +26,12 @@ class Options(object):
|
|||||||
opts = {}
|
opts = {}
|
||||||
for a in argv:
|
for a in argv:
|
||||||
orig_a = a
|
orig_a = a
|
||||||
if not a.startswith('--'):
|
if not a.startswith("--"):
|
||||||
ret.append(a)
|
ret.append(a)
|
||||||
continue
|
continue
|
||||||
a = a[2:]
|
a = a[2:]
|
||||||
i = a.find('=')
|
i = a.find("=")
|
||||||
op = '='
|
op = "="
|
||||||
if i == -1:
|
if i == -1:
|
||||||
if a.startswith("no-"):
|
if a.startswith("no-"):
|
||||||
k = a[3:]
|
k = a[3:]
|
||||||
@ -43,11 +42,11 @@ class Options(object):
|
|||||||
else:
|
else:
|
||||||
k = a[:i]
|
k = a[:i]
|
||||||
if k[-1] in "-+":
|
if k[-1] in "-+":
|
||||||
op = k[-1]+'=' # Ops is '-=' or '+=' now.
|
op = k[-1] + "=" # Ops is '-=' or '+=' now.
|
||||||
k = k[:-1]
|
k = k[:-1]
|
||||||
v = a[i + 1 :]
|
v = a[i + 1 :]
|
||||||
ok = k
|
ok = k
|
||||||
k = k.replace('-', '_')
|
k = k.replace("-", "_")
|
||||||
if not hasattr(self, k):
|
if not hasattr(self, k):
|
||||||
if ignore_unknown is True or ok in ignore_unknown:
|
if ignore_unknown is True or ok in ignore_unknown:
|
||||||
ret.append(orig_a)
|
ret.append(orig_a)
|
||||||
@ -61,16 +60,16 @@ class Options(object):
|
|||||||
elif isinstance(ov, int):
|
elif isinstance(ov, int):
|
||||||
v = int(v)
|
v = int(v)
|
||||||
elif isinstance(ov, list):
|
elif isinstance(ov, list):
|
||||||
vv = v.split(',')
|
vv = v.split(",")
|
||||||
if vv == ['']:
|
if vv == [""]:
|
||||||
vv = []
|
vv = []
|
||||||
vv = [int(x, 0) if len(x) and x[0] in "0123456789" else x for x in vv]
|
vv = [int(x, 0) if len(x) and x[0] in "0123456789" else x for x in vv]
|
||||||
if op == '=':
|
if op == "=":
|
||||||
v = vv
|
v = vv
|
||||||
elif op == '+=':
|
elif op == "+=":
|
||||||
v = ov
|
v = ov
|
||||||
v.extend(vv)
|
v.extend(vv)
|
||||||
elif op == '-=':
|
elif op == "-=":
|
||||||
v = ov
|
v = ov
|
||||||
for x in vv:
|
for x in vv:
|
||||||
if x in v:
|
if x in v:
|
||||||
@ -82,4 +81,3 @@ class Options(object):
|
|||||||
self.set(**opts)
|
self.set(**opts)
|
||||||
|
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
|
@ -13,21 +13,21 @@ import logging
|
|||||||
log = logging.getLogger("fontTools.merge")
|
log = logging.getLogger("fontTools.merge")
|
||||||
|
|
||||||
|
|
||||||
ttLib.getTableClass('maxp').mergeMap = {
|
ttLib.getTableClass("maxp").mergeMap = {
|
||||||
'*': max,
|
"*": max,
|
||||||
'tableTag': equal,
|
"tableTag": equal,
|
||||||
'tableVersion': equal,
|
"tableVersion": equal,
|
||||||
'numGlyphs': sum,
|
"numGlyphs": sum,
|
||||||
'maxStorage': first,
|
"maxStorage": first,
|
||||||
'maxFunctionDefs': first,
|
"maxFunctionDefs": first,
|
||||||
'maxInstructionDefs': first,
|
"maxInstructionDefs": first,
|
||||||
# TODO When we correctly merge hinting data, update these values:
|
# TODO When we correctly merge hinting data, update these values:
|
||||||
# maxFunctionDefs, maxInstructionDefs, maxSizeOfInstructions
|
# maxFunctionDefs, maxInstructionDefs, maxSizeOfInstructions
|
||||||
}
|
}
|
||||||
|
|
||||||
headFlagsMergeBitMap = {
|
headFlagsMergeBitMap = {
|
||||||
'size': 16,
|
"size": 16,
|
||||||
'*': bitwise_or,
|
"*": bitwise_or,
|
||||||
1: bitwise_and, # Baseline at y = 0
|
1: bitwise_and, # Baseline at y = 0
|
||||||
2: bitwise_and, # lsb at x = 0
|
2: bitwise_and, # lsb at x = 0
|
||||||
3: bitwise_and, # Force ppem to integer values. FIXME?
|
3: bitwise_and, # Force ppem to integer values. FIXME?
|
||||||
@ -39,64 +39,64 @@ headFlagsMergeBitMap = {
|
|||||||
15: lambda bit: 0, # Always set to zero
|
15: lambda bit: 0, # Always set to zero
|
||||||
}
|
}
|
||||||
|
|
||||||
ttLib.getTableClass('head').mergeMap = {
|
ttLib.getTableClass("head").mergeMap = {
|
||||||
'tableTag': equal,
|
"tableTag": equal,
|
||||||
'tableVersion': max,
|
"tableVersion": max,
|
||||||
'fontRevision': max,
|
"fontRevision": max,
|
||||||
'checkSumAdjustment': lambda lst: 0, # We need *something* here
|
"checkSumAdjustment": lambda lst: 0, # We need *something* here
|
||||||
'magicNumber': equal,
|
"magicNumber": equal,
|
||||||
'flags': mergeBits(headFlagsMergeBitMap),
|
"flags": mergeBits(headFlagsMergeBitMap),
|
||||||
'unitsPerEm': equal,
|
"unitsPerEm": equal,
|
||||||
'created': current_time,
|
"created": current_time,
|
||||||
'modified': current_time,
|
"modified": current_time,
|
||||||
'xMin': min,
|
"xMin": min,
|
||||||
'yMin': min,
|
"yMin": min,
|
||||||
'xMax': max,
|
"xMax": max,
|
||||||
'yMax': max,
|
"yMax": max,
|
||||||
'macStyle': first,
|
"macStyle": first,
|
||||||
'lowestRecPPEM': max,
|
"lowestRecPPEM": max,
|
||||||
'fontDirectionHint': lambda lst: 2,
|
"fontDirectionHint": lambda lst: 2,
|
||||||
'indexToLocFormat': first,
|
"indexToLocFormat": first,
|
||||||
'glyphDataFormat': equal,
|
"glyphDataFormat": equal,
|
||||||
}
|
}
|
||||||
|
|
||||||
ttLib.getTableClass('hhea').mergeMap = {
|
ttLib.getTableClass("hhea").mergeMap = {
|
||||||
'*': equal,
|
"*": equal,
|
||||||
'tableTag': equal,
|
"tableTag": equal,
|
||||||
'tableVersion': max,
|
"tableVersion": max,
|
||||||
'ascent': max,
|
"ascent": max,
|
||||||
'descent': min,
|
"descent": min,
|
||||||
'lineGap': max,
|
"lineGap": max,
|
||||||
'advanceWidthMax': max,
|
"advanceWidthMax": max,
|
||||||
'minLeftSideBearing': min,
|
"minLeftSideBearing": min,
|
||||||
'minRightSideBearing': min,
|
"minRightSideBearing": min,
|
||||||
'xMaxExtent': max,
|
"xMaxExtent": max,
|
||||||
'caretSlopeRise': first,
|
"caretSlopeRise": first,
|
||||||
'caretSlopeRun': first,
|
"caretSlopeRun": first,
|
||||||
'caretOffset': first,
|
"caretOffset": first,
|
||||||
'numberOfHMetrics': recalculate,
|
"numberOfHMetrics": recalculate,
|
||||||
}
|
}
|
||||||
|
|
||||||
ttLib.getTableClass('vhea').mergeMap = {
|
ttLib.getTableClass("vhea").mergeMap = {
|
||||||
'*': equal,
|
"*": equal,
|
||||||
'tableTag': equal,
|
"tableTag": equal,
|
||||||
'tableVersion': max,
|
"tableVersion": max,
|
||||||
'ascent': max,
|
"ascent": max,
|
||||||
'descent': min,
|
"descent": min,
|
||||||
'lineGap': max,
|
"lineGap": max,
|
||||||
'advanceHeightMax': max,
|
"advanceHeightMax": max,
|
||||||
'minTopSideBearing': min,
|
"minTopSideBearing": min,
|
||||||
'minBottomSideBearing': min,
|
"minBottomSideBearing": min,
|
||||||
'yMaxExtent': max,
|
"yMaxExtent": max,
|
||||||
'caretSlopeRise': first,
|
"caretSlopeRise": first,
|
||||||
'caretSlopeRun': first,
|
"caretSlopeRun": first,
|
||||||
'caretOffset': first,
|
"caretOffset": first,
|
||||||
'numberOfVMetrics': recalculate,
|
"numberOfVMetrics": recalculate,
|
||||||
}
|
}
|
||||||
|
|
||||||
os2FsTypeMergeBitMap = {
|
os2FsTypeMergeBitMap = {
|
||||||
'size': 16,
|
"size": 16,
|
||||||
'*': lambda bit: 0,
|
"*": lambda bit: 0,
|
||||||
1: bitwise_or, # no embedding permitted
|
1: bitwise_or, # no embedding permitted
|
||||||
2: bitwise_and, # allow previewing and printing documents
|
2: bitwise_and, # allow previewing and printing documents
|
||||||
3: bitwise_and, # allow editing documents
|
3: bitwise_and, # allow editing documents
|
||||||
@ -104,6 +104,7 @@ os2FsTypeMergeBitMap = {
|
|||||||
9: bitwise_or, # no embedding of outlines permitted
|
9: bitwise_or, # no embedding of outlines permitted
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def mergeOs2FsType(lst):
|
def mergeOs2FsType(lst):
|
||||||
lst = list(lst)
|
lst = list(lst)
|
||||||
if all(item == 0 for item in lst):
|
if all(item == 0 for item in lst):
|
||||||
@ -128,39 +129,40 @@ def mergeOs2FsType(lst):
|
|||||||
return fsType
|
return fsType
|
||||||
|
|
||||||
|
|
||||||
ttLib.getTableClass('OS/2').mergeMap = {
|
ttLib.getTableClass("OS/2").mergeMap = {
|
||||||
'*': first,
|
"*": first,
|
||||||
'tableTag': equal,
|
"tableTag": equal,
|
||||||
'version': max,
|
"version": max,
|
||||||
'xAvgCharWidth': first, # Will be recalculated at the end on the merged font
|
"xAvgCharWidth": first, # Will be recalculated at the end on the merged font
|
||||||
'fsType': mergeOs2FsType, # Will be overwritten
|
"fsType": mergeOs2FsType, # Will be overwritten
|
||||||
'panose': first, # FIXME: should really be the first Latin font
|
"panose": first, # FIXME: should really be the first Latin font
|
||||||
'ulUnicodeRange1': bitwise_or,
|
"ulUnicodeRange1": bitwise_or,
|
||||||
'ulUnicodeRange2': bitwise_or,
|
"ulUnicodeRange2": bitwise_or,
|
||||||
'ulUnicodeRange3': bitwise_or,
|
"ulUnicodeRange3": bitwise_or,
|
||||||
'ulUnicodeRange4': bitwise_or,
|
"ulUnicodeRange4": bitwise_or,
|
||||||
'fsFirstCharIndex': min,
|
"fsFirstCharIndex": min,
|
||||||
'fsLastCharIndex': max,
|
"fsLastCharIndex": max,
|
||||||
'sTypoAscender': max,
|
"sTypoAscender": max,
|
||||||
'sTypoDescender': min,
|
"sTypoDescender": min,
|
||||||
'sTypoLineGap': max,
|
"sTypoLineGap": max,
|
||||||
'usWinAscent': max,
|
"usWinAscent": max,
|
||||||
'usWinDescent': max,
|
"usWinDescent": max,
|
||||||
# Version 1
|
# Version 1
|
||||||
'ulCodePageRange1': onlyExisting(bitwise_or),
|
"ulCodePageRange1": onlyExisting(bitwise_or),
|
||||||
'ulCodePageRange2': onlyExisting(bitwise_or),
|
"ulCodePageRange2": onlyExisting(bitwise_or),
|
||||||
# Version 2, 3, 4
|
# Version 2, 3, 4
|
||||||
'sxHeight': onlyExisting(max),
|
"sxHeight": onlyExisting(max),
|
||||||
'sCapHeight': onlyExisting(max),
|
"sCapHeight": onlyExisting(max),
|
||||||
'usDefaultChar': onlyExisting(first),
|
"usDefaultChar": onlyExisting(first),
|
||||||
'usBreakChar': onlyExisting(first),
|
"usBreakChar": onlyExisting(first),
|
||||||
'usMaxContext': onlyExisting(max),
|
"usMaxContext": onlyExisting(max),
|
||||||
# version 5
|
# version 5
|
||||||
'usLowerOpticalPointSize': onlyExisting(min),
|
"usLowerOpticalPointSize": onlyExisting(min),
|
||||||
'usUpperOpticalPointSize': onlyExisting(max),
|
"usUpperOpticalPointSize": onlyExisting(max),
|
||||||
}
|
}
|
||||||
|
|
||||||
@add_method(ttLib.getTableClass('OS/2'))
|
|
||||||
|
@add_method(ttLib.getTableClass("OS/2"))
|
||||||
def merge(self, m, tables):
|
def merge(self, m, tables):
|
||||||
DefaultTable.merge(self, m, tables)
|
DefaultTable.merge(self, m, tables)
|
||||||
if self.version < 2:
|
if self.version < 2:
|
||||||
@ -174,41 +176,43 @@ def merge(self, m, tables):
|
|||||||
self.fsType &= ~0x0004
|
self.fsType &= ~0x0004
|
||||||
return self
|
return self
|
||||||
|
|
||||||
ttLib.getTableClass('post').mergeMap = {
|
|
||||||
'*': first,
|
ttLib.getTableClass("post").mergeMap = {
|
||||||
'tableTag': equal,
|
"*": first,
|
||||||
'formatType': max,
|
"tableTag": equal,
|
||||||
'isFixedPitch': min,
|
"formatType": max,
|
||||||
'minMemType42': max,
|
"isFixedPitch": min,
|
||||||
'maxMemType42': lambda lst: 0,
|
"minMemType42": max,
|
||||||
'minMemType1': max,
|
"maxMemType42": lambda lst: 0,
|
||||||
'maxMemType1': lambda lst: 0,
|
"minMemType1": max,
|
||||||
'mapping': onlyExisting(sumDicts),
|
"maxMemType1": lambda lst: 0,
|
||||||
'extraNames': lambda lst: [],
|
"mapping": onlyExisting(sumDicts),
|
||||||
|
"extraNames": lambda lst: [],
|
||||||
}
|
}
|
||||||
|
|
||||||
ttLib.getTableClass('vmtx').mergeMap = ttLib.getTableClass('hmtx').mergeMap = {
|
ttLib.getTableClass("vmtx").mergeMap = ttLib.getTableClass("hmtx").mergeMap = {
|
||||||
'tableTag': equal,
|
"tableTag": equal,
|
||||||
'metrics': sumDicts,
|
"metrics": sumDicts,
|
||||||
}
|
}
|
||||||
|
|
||||||
ttLib.getTableClass('name').mergeMap = {
|
ttLib.getTableClass("name").mergeMap = {
|
||||||
'tableTag': equal,
|
"tableTag": equal,
|
||||||
'names': first, # FIXME? Does mixing name records make sense?
|
"names": first, # FIXME? Does mixing name records make sense?
|
||||||
}
|
}
|
||||||
|
|
||||||
ttLib.getTableClass('loca').mergeMap = {
|
ttLib.getTableClass("loca").mergeMap = {
|
||||||
'*': recalculate,
|
"*": recalculate,
|
||||||
'tableTag': equal,
|
"tableTag": equal,
|
||||||
}
|
}
|
||||||
|
|
||||||
ttLib.getTableClass('glyf').mergeMap = {
|
ttLib.getTableClass("glyf").mergeMap = {
|
||||||
'tableTag': equal,
|
"tableTag": equal,
|
||||||
'glyphs': sumDicts,
|
"glyphs": sumDicts,
|
||||||
'glyphOrder': sumLists,
|
"glyphOrder": sumLists,
|
||||||
}
|
}
|
||||||
|
|
||||||
@add_method(ttLib.getTableClass('glyf'))
|
|
||||||
|
@add_method(ttLib.getTableClass("glyf"))
|
||||||
def merge(self, m, tables):
|
def merge(self, m, tables):
|
||||||
for i, table in enumerate(tables):
|
for i, table in enumerate(tables):
|
||||||
for g in table.glyphs.values():
|
for g in table.glyphs.values():
|
||||||
@ -222,18 +226,20 @@ def merge(self, m, tables):
|
|||||||
g.expand(table)
|
g.expand(table)
|
||||||
return DefaultTable.merge(self, m, tables)
|
return DefaultTable.merge(self, m, tables)
|
||||||
|
|
||||||
ttLib.getTableClass('prep').mergeMap = lambda self, lst: first(lst)
|
|
||||||
ttLib.getTableClass('fpgm').mergeMap = lambda self, lst: first(lst)
|
|
||||||
ttLib.getTableClass('cvt ').mergeMap = lambda self, lst: first(lst)
|
|
||||||
ttLib.getTableClass('gasp').mergeMap = lambda self, lst: first(lst) # FIXME? Appears irreconcilable
|
|
||||||
|
|
||||||
@add_method(ttLib.getTableClass('CFF '))
|
ttLib.getTableClass("prep").mergeMap = lambda self, lst: first(lst)
|
||||||
|
ttLib.getTableClass("fpgm").mergeMap = lambda self, lst: first(lst)
|
||||||
|
ttLib.getTableClass("cvt ").mergeMap = lambda self, lst: first(lst)
|
||||||
|
ttLib.getTableClass("gasp").mergeMap = lambda self, lst: first(
|
||||||
|
lst
|
||||||
|
) # FIXME? Appears irreconcilable
|
||||||
|
|
||||||
|
|
||||||
|
@add_method(ttLib.getTableClass("CFF "))
|
||||||
def merge(self, m, tables):
|
def merge(self, m, tables):
|
||||||
|
|
||||||
if any(hasattr(table, "FDSelect") for table in tables):
|
if any(hasattr(table, "FDSelect") for table in tables):
|
||||||
raise NotImplementedError(
|
raise NotImplementedError("Merging CID-keyed CFF tables is not supported yet")
|
||||||
"Merging CID-keyed CFF tables is not supported yet"
|
|
||||||
)
|
|
||||||
|
|
||||||
for table in tables:
|
for table in tables:
|
||||||
table.cff.desubroutinize()
|
table.cff.desubroutinize()
|
||||||
@ -279,17 +285,18 @@ def merge(self, m, tables):
|
|||||||
|
|
||||||
return newcff
|
return newcff
|
||||||
|
|
||||||
@add_method(ttLib.getTableClass('cmap'))
|
|
||||||
|
@add_method(ttLib.getTableClass("cmap"))
|
||||||
def merge(self, m, tables):
|
def merge(self, m, tables):
|
||||||
|
|
||||||
# TODO Handle format=14.
|
# TODO Handle format=14.
|
||||||
if not hasattr(m, 'cmap'):
|
if not hasattr(m, "cmap"):
|
||||||
computeMegaCmap(m, tables)
|
computeMegaCmap(m, tables)
|
||||||
cmap = m.cmap
|
cmap = m.cmap
|
||||||
|
|
||||||
cmapBmpOnly = {uni: gid for uni, gid in cmap.items() if uni <= 0xFFFF}
|
cmapBmpOnly = {uni: gid for uni, gid in cmap.items() if uni <= 0xFFFF}
|
||||||
self.tables = []
|
self.tables = []
|
||||||
module = ttLib.getTableModule('cmap')
|
module = ttLib.getTableModule("cmap")
|
||||||
if len(cmapBmpOnly) != len(cmap):
|
if len(cmapBmpOnly) != len(cmap):
|
||||||
# format-12 required.
|
# format-12 required.
|
||||||
cmapTable = module.cmap_classes[12](12)
|
cmapTable = module.cmap_classes[12](12)
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
# Copyright 2021 Behdad Esfahbod. All Rights Reserved.
|
# Copyright 2021 Behdad Esfahbod. All Rights Reserved.
|
||||||
|
|
||||||
|
|
||||||
def is_Default_Ignorable(u):
|
def is_Default_Ignorable(u):
|
||||||
# http://www.unicode.org/reports/tr44/#Default_Ignorable_Code_Point
|
# http://www.unicode.org/reports/tr44/#Default_Ignorable_Code_Point
|
||||||
#
|
#
|
||||||
@ -35,31 +36,43 @@ def is_Default_Ignorable(u):
|
|||||||
# E0100..E01EF # Mn [240] VARIATION SELECTOR-17..VARIATION SELECTOR-256
|
# E0100..E01EF # Mn [240] VARIATION SELECTOR-17..VARIATION SELECTOR-256
|
||||||
# E01F0..E0FFF # Cn [3600] <reserved-E01F0>..<reserved-E0FFF>
|
# E01F0..E0FFF # Cn [3600] <reserved-E01F0>..<reserved-E0FFF>
|
||||||
return (
|
return (
|
||||||
u == 0x00AD or # Cf SOFT HYPHEN
|
u == 0x00AD
|
||||||
u == 0x034F or # Mn COMBINING GRAPHEME JOINER
|
or u == 0x034F # Cf SOFT HYPHEN
|
||||||
u == 0x061C or # Cf ARABIC LETTER MARK
|
or u == 0x061C # Mn COMBINING GRAPHEME JOINER
|
||||||
0x115F <= u <= 0x1160 or # Lo [2] HANGUL CHOSEONG FILLER..HANGUL JUNGSEONG FILLER
|
or 0x115F <= u <= 0x1160 # Cf ARABIC LETTER MARK
|
||||||
0x17B4 <= u <= 0x17B5 or # Mn [2] KHMER VOWEL INHERENT AQ..KHMER VOWEL INHERENT AA
|
or 0x17B4 # Lo [2] HANGUL CHOSEONG FILLER..HANGUL JUNGSEONG FILLER
|
||||||
0x180B <= u <= 0x180D or # Mn [3] MONGOLIAN FREE VARIATION SELECTOR ONE..MONGOLIAN FREE VARIATION SELECTOR THREE
|
<= u
|
||||||
u == 0x180E or # Cf MONGOLIAN VOWEL SEPARATOR
|
<= 0x17B5
|
||||||
u == 0x180F or # Mn MONGOLIAN FREE VARIATION SELECTOR FOUR
|
or 0x180B # Mn [2] KHMER VOWEL INHERENT AQ..KHMER VOWEL INHERENT AA
|
||||||
0x200B <= u <= 0x200F or # Cf [5] ZERO WIDTH SPACE..RIGHT-TO-LEFT MARK
|
<= u
|
||||||
0x202A <= u <= 0x202E or # Cf [5] LEFT-TO-RIGHT EMBEDDING..RIGHT-TO-LEFT OVERRIDE
|
<= 0x180D
|
||||||
0x2060 <= u <= 0x2064 or # Cf [5] WORD JOINER..INVISIBLE PLUS
|
or u # Mn [3] MONGOLIAN FREE VARIATION SELECTOR ONE..MONGOLIAN FREE VARIATION SELECTOR THREE
|
||||||
u == 0x2065 or # Cn <reserved-2065>
|
== 0x180E
|
||||||
0x2066 <= u <= 0x206F or # Cf [10] LEFT-TO-RIGHT ISOLATE..NOMINAL DIGIT SHAPES
|
or u == 0x180F # Cf MONGOLIAN VOWEL SEPARATOR
|
||||||
u == 0x3164 or # Lo HANGUL FILLER
|
or 0x200B <= u <= 0x200F # Mn MONGOLIAN FREE VARIATION SELECTOR FOUR
|
||||||
0xFE00 <= u <= 0xFE0F or # Mn [16] VARIATION SELECTOR-1..VARIATION SELECTOR-16
|
or 0x202A <= u <= 0x202E # Cf [5] ZERO WIDTH SPACE..RIGHT-TO-LEFT MARK
|
||||||
u == 0xFEFF or # Cf ZERO WIDTH NO-BREAK SPACE
|
or 0x2060 # Cf [5] LEFT-TO-RIGHT EMBEDDING..RIGHT-TO-LEFT OVERRIDE
|
||||||
u == 0xFFA0 or # Lo HALFWIDTH HANGUL FILLER
|
<= u
|
||||||
0xFFF0 <= u <= 0xFFF8 or # Cn [9] <reserved-FFF0>..<reserved-FFF8>
|
<= 0x2064
|
||||||
0x1BCA0 <= u <= 0x1BCA3 or # Cf [4] SHORTHAND FORMAT LETTER OVERLAP..SHORTHAND FORMAT UP STEP
|
or u == 0x2065 # Cf [5] WORD JOINER..INVISIBLE PLUS
|
||||||
0x1D173 <= u <= 0x1D17A or # Cf [8] MUSICAL SYMBOL BEGIN BEAM..MUSICAL SYMBOL END PHRASE
|
or 0x2066 <= u <= 0x206F # Cn <reserved-2065>
|
||||||
u == 0xE0000 or # Cn <reserved-E0000>
|
or u == 0x3164 # Cf [10] LEFT-TO-RIGHT ISOLATE..NOMINAL DIGIT SHAPES
|
||||||
u == 0xE0001 or # Cf LANGUAGE TAG
|
or 0xFE00 <= u <= 0xFE0F # Lo HANGUL FILLER
|
||||||
0xE0002 <= u <= 0xE001F or # Cn [30] <reserved-E0002>..<reserved-E001F>
|
or u == 0xFEFF # Mn [16] VARIATION SELECTOR-1..VARIATION SELECTOR-16
|
||||||
0xE0020 <= u <= 0xE007F or # Cf [96] TAG SPACE..CANCEL TAG
|
or u == 0xFFA0 # Cf ZERO WIDTH NO-BREAK SPACE
|
||||||
0xE0080 <= u <= 0xE00FF or # Cn [128] <reserved-E0080>..<reserved-E00FF>
|
or 0xFFF0 <= u <= 0xFFF8 # Lo HALFWIDTH HANGUL FILLER
|
||||||
0xE0100 <= u <= 0xE01EF or # Mn [240] VARIATION SELECTOR-17..VARIATION SELECTOR-256
|
or 0x1BCA0 <= u <= 0x1BCA3 # Cn [9] <reserved-FFF0>..<reserved-FFF8>
|
||||||
0xE01F0 <= u <= 0xE0FFF or # Cn [3600] <reserved-E01F0>..<reserved-E0FFF>
|
or 0x1D173 # Cf [4] SHORTHAND FORMAT LETTER OVERLAP..SHORTHAND FORMAT UP STEP
|
||||||
False)
|
<= u
|
||||||
|
<= 0x1D17A
|
||||||
|
or u == 0xE0000 # Cf [8] MUSICAL SYMBOL BEGIN BEAM..MUSICAL SYMBOL END PHRASE
|
||||||
|
or u == 0xE0001 # Cn <reserved-E0000>
|
||||||
|
or 0xE0002 <= u <= 0xE001F # Cf LANGUAGE TAG
|
||||||
|
or 0xE0020 <= u <= 0xE007F # Cn [30] <reserved-E0002>..<reserved-E001F>
|
||||||
|
or 0xE0080 <= u <= 0xE00FF # Cf [96] TAG SPACE..CANCEL TAG
|
||||||
|
or 0xE0100 <= u <= 0xE01EF # Cn [128] <reserved-E0080>..<reserved-E00FF>
|
||||||
|
or 0xE01F0 # Mn [240] VARIATION SELECTOR-17..VARIATION SELECTOR-256
|
||||||
|
<= u
|
||||||
|
<= 0xE0FFF
|
||||||
|
or False # Cn [3600] <reserved-E01F0>..<reserved-E0FFF>
|
||||||
|
)
|
||||||
|
@ -14,6 +14,7 @@ log = logging.getLogger("fontTools.merge")
|
|||||||
|
|
||||||
# General utility functions for merging values from different fonts
|
# General utility functions for merging values from different fonts
|
||||||
|
|
||||||
|
|
||||||
def equal(lst):
|
def equal(lst):
|
||||||
lst = list(lst)
|
lst = list(lst)
|
||||||
t = iter(lst)
|
t = iter(lst)
|
||||||
@ -21,25 +22,32 @@ def equal(lst):
|
|||||||
assert all(item == first for item in t), "Expected all items to be equal: %s" % lst
|
assert all(item == first for item in t), "Expected all items to be equal: %s" % lst
|
||||||
return first
|
return first
|
||||||
|
|
||||||
|
|
||||||
def first(lst):
|
def first(lst):
|
||||||
return next(iter(lst))
|
return next(iter(lst))
|
||||||
|
|
||||||
|
|
||||||
def recalculate(lst):
|
def recalculate(lst):
|
||||||
return NotImplemented
|
return NotImplemented
|
||||||
|
|
||||||
|
|
||||||
def current_time(lst):
|
def current_time(lst):
|
||||||
return timestampNow()
|
return timestampNow()
|
||||||
|
|
||||||
|
|
||||||
def bitwise_and(lst):
|
def bitwise_and(lst):
|
||||||
return reduce(operator.and_, lst)
|
return reduce(operator.and_, lst)
|
||||||
|
|
||||||
|
|
||||||
def bitwise_or(lst):
|
def bitwise_or(lst):
|
||||||
return reduce(operator.or_, lst)
|
return reduce(operator.or_, lst)
|
||||||
|
|
||||||
|
|
||||||
def avg_int(lst):
|
def avg_int(lst):
|
||||||
lst = list(lst)
|
lst = list(lst)
|
||||||
return sum(lst) // len(lst)
|
return sum(lst) // len(lst)
|
||||||
|
|
||||||
|
|
||||||
def onlyExisting(func):
|
def onlyExisting(func):
|
||||||
"""Returns a filter func that when called with a list,
|
"""Returns a filter func that when called with a list,
|
||||||
only calls func on the non-NotImplemented items of the list,
|
only calls func on the non-NotImplemented items of the list,
|
||||||
@ -52,29 +60,31 @@ def onlyExisting(func):
|
|||||||
|
|
||||||
return wrapper
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
def sumLists(lst):
|
def sumLists(lst):
|
||||||
l = []
|
l = []
|
||||||
for item in lst:
|
for item in lst:
|
||||||
l.extend(item)
|
l.extend(item)
|
||||||
return l
|
return l
|
||||||
|
|
||||||
|
|
||||||
def sumDicts(lst):
|
def sumDicts(lst):
|
||||||
d = {}
|
d = {}
|
||||||
for item in lst:
|
for item in lst:
|
||||||
d.update(item)
|
d.update(item)
|
||||||
return d
|
return d
|
||||||
|
|
||||||
def mergeBits(bitmap):
|
|
||||||
|
|
||||||
|
def mergeBits(bitmap):
|
||||||
def wrapper(lst):
|
def wrapper(lst):
|
||||||
lst = list(lst)
|
lst = list(lst)
|
||||||
returnValue = 0
|
returnValue = 0
|
||||||
for bitNumber in range(bitmap['size']):
|
for bitNumber in range(bitmap["size"]):
|
||||||
try:
|
try:
|
||||||
mergeLogic = bitmap[bitNumber]
|
mergeLogic = bitmap[bitNumber]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
try:
|
try:
|
||||||
mergeLogic = bitmap['*']
|
mergeLogic = bitmap["*"]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
raise Exception("Don't know how to merge bit %s" % bitNumber)
|
raise Exception("Don't know how to merge bit %s" % bitNumber)
|
||||||
shiftedBit = 1 << bitNumber
|
shiftedBit = 1 << bitNumber
|
||||||
@ -98,6 +108,7 @@ class AttendanceRecordingIdentityDict(object):
|
|||||||
self.s.add(self.d[id(v)])
|
self.s.add(self.d[id(v)])
|
||||||
return v
|
return v
|
||||||
|
|
||||||
|
|
||||||
class GregariousIdentityDict(object):
|
class GregariousIdentityDict(object):
|
||||||
"""A dictionary-like object that welcomes guests without reservations and
|
"""A dictionary-like object that welcomes guests without reservations and
|
||||||
adds them to the end of the guest list."""
|
adds them to the end of the guest list."""
|
||||||
@ -112,6 +123,7 @@ class GregariousIdentityDict(object):
|
|||||||
self.l.append(v)
|
self.l.append(v)
|
||||||
return v
|
return v
|
||||||
|
|
||||||
|
|
||||||
class NonhashableDict(object):
|
class NonhashableDict(object):
|
||||||
"""A dictionary-like object mapping objects to values."""
|
"""A dictionary-like object mapping objects to values."""
|
||||||
|
|
||||||
|
@ -23,6 +23,7 @@ def calcBounds(array):
|
|||||||
ys = [y for x, y in array]
|
ys = [y for x, y in array]
|
||||||
return min(xs), min(ys), max(xs), max(ys)
|
return min(xs), min(ys), max(xs), max(ys)
|
||||||
|
|
||||||
|
|
||||||
def calcIntBounds(array, round=otRound):
|
def calcIntBounds(array, round=otRound):
|
||||||
"""Calculate the integer bounding rectangle of a 2D points array.
|
"""Calculate the integer bounding rectangle of a 2D points array.
|
||||||
|
|
||||||
@ -57,6 +58,7 @@ def updateBounds(bounds, p, min=min, max=max):
|
|||||||
xMin, yMin, xMax, yMax = bounds
|
xMin, yMin, xMax, yMax = bounds
|
||||||
return min(xMin, x), min(yMin, y), max(xMax, x), max(yMax, y)
|
return min(xMin, x), min(yMin, y), max(xMax, x), max(yMax, y)
|
||||||
|
|
||||||
|
|
||||||
def pointInRect(p, rect):
|
def pointInRect(p, rect):
|
||||||
"""Test if a point is inside a bounding rectangle.
|
"""Test if a point is inside a bounding rectangle.
|
||||||
|
|
||||||
@ -72,6 +74,7 @@ def pointInRect(p, rect):
|
|||||||
xMin, yMin, xMax, yMax = rect
|
xMin, yMin, xMax, yMax = rect
|
||||||
return (xMin <= x <= xMax) and (yMin <= y <= yMax)
|
return (xMin <= x <= xMax) and (yMin <= y <= yMax)
|
||||||
|
|
||||||
|
|
||||||
def pointsInRect(array, rect):
|
def pointsInRect(array, rect):
|
||||||
"""Determine which points are inside a bounding rectangle.
|
"""Determine which points are inside a bounding rectangle.
|
||||||
|
|
||||||
@ -88,6 +91,7 @@ def pointsInRect(array, rect):
|
|||||||
xMin, yMin, xMax, yMax = rect
|
xMin, yMin, xMax, yMax = rect
|
||||||
return [(xMin <= x <= xMax) and (yMin <= y <= yMax) for x, y in array]
|
return [(xMin <= x <= xMax) and (yMin <= y <= yMax) for x, y in array]
|
||||||
|
|
||||||
|
|
||||||
def vectorLength(vector):
|
def vectorLength(vector):
|
||||||
"""Calculate the length of the given vector.
|
"""Calculate the length of the given vector.
|
||||||
|
|
||||||
@ -100,6 +104,7 @@ def vectorLength(vector):
|
|||||||
x, y = vector
|
x, y = vector
|
||||||
return math.sqrt(x**2 + y**2)
|
return math.sqrt(x**2 + y**2)
|
||||||
|
|
||||||
|
|
||||||
def asInt16(array):
|
def asInt16(array):
|
||||||
"""Round a list of floats to 16-bit signed integers.
|
"""Round a list of floats to 16-bit signed integers.
|
||||||
|
|
||||||
@ -130,6 +135,7 @@ def normRect(rect):
|
|||||||
(xMin, yMin, xMax, yMax) = rect
|
(xMin, yMin, xMax, yMax) = rect
|
||||||
return min(xMin, xMax), min(yMin, yMax), max(xMin, xMax), max(yMin, yMax)
|
return min(xMin, xMax), min(yMin, yMax), max(xMin, xMax), max(yMin, yMax)
|
||||||
|
|
||||||
|
|
||||||
def scaleRect(rect, x, y):
|
def scaleRect(rect, x, y):
|
||||||
"""Scale a bounding box rectangle.
|
"""Scale a bounding box rectangle.
|
||||||
|
|
||||||
@ -145,6 +151,7 @@ def scaleRect(rect, x, y):
|
|||||||
(xMin, yMin, xMax, yMax) = rect
|
(xMin, yMin, xMax, yMax) = rect
|
||||||
return xMin * x, yMin * y, xMax * x, yMax * y
|
return xMin * x, yMin * y, xMax * x, yMax * y
|
||||||
|
|
||||||
|
|
||||||
def offsetRect(rect, dx, dy):
|
def offsetRect(rect, dx, dy):
|
||||||
"""Offset a bounding box rectangle.
|
"""Offset a bounding box rectangle.
|
||||||
|
|
||||||
@ -160,6 +167,7 @@ def offsetRect(rect, dx, dy):
|
|||||||
(xMin, yMin, xMax, yMax) = rect
|
(xMin, yMin, xMax, yMax) = rect
|
||||||
return xMin + dx, yMin + dy, xMax + dx, yMax + dy
|
return xMin + dx, yMin + dy, xMax + dx, yMax + dy
|
||||||
|
|
||||||
|
|
||||||
def insetRect(rect, dx, dy):
|
def insetRect(rect, dx, dy):
|
||||||
"""Inset a bounding box rectangle on all sides.
|
"""Inset a bounding box rectangle on all sides.
|
||||||
|
|
||||||
@ -175,6 +183,7 @@ def insetRect(rect, dx, dy):
|
|||||||
(xMin, yMin, xMax, yMax) = rect
|
(xMin, yMin, xMax, yMax) = rect
|
||||||
return xMin + dx, yMin + dy, xMax - dx, yMax - dy
|
return xMin + dx, yMin + dy, xMax - dx, yMax - dy
|
||||||
|
|
||||||
|
|
||||||
def sectRect(rect1, rect2):
|
def sectRect(rect1, rect2):
|
||||||
"""Test for rectangle-rectangle intersection.
|
"""Test for rectangle-rectangle intersection.
|
||||||
|
|
||||||
@ -191,12 +200,17 @@ def sectRect(rect1, rect2):
|
|||||||
"""
|
"""
|
||||||
(xMin1, yMin1, xMax1, yMax1) = rect1
|
(xMin1, yMin1, xMax1, yMax1) = rect1
|
||||||
(xMin2, yMin2, xMax2, yMax2) = rect2
|
(xMin2, yMin2, xMax2, yMax2) = rect2
|
||||||
xMin, yMin, xMax, yMax = (max(xMin1, xMin2), max(yMin1, yMin2),
|
xMin, yMin, xMax, yMax = (
|
||||||
min(xMax1, xMax2), min(yMax1, yMax2))
|
max(xMin1, xMin2),
|
||||||
|
max(yMin1, yMin2),
|
||||||
|
min(xMax1, xMax2),
|
||||||
|
min(yMax1, yMax2),
|
||||||
|
)
|
||||||
if xMin >= xMax or yMin >= yMax:
|
if xMin >= xMax or yMin >= yMax:
|
||||||
return False, (0, 0, 0, 0)
|
return False, (0, 0, 0, 0)
|
||||||
return True, (xMin, yMin, xMax, yMax)
|
return True, (xMin, yMin, xMax, yMax)
|
||||||
|
|
||||||
|
|
||||||
def unionRect(rect1, rect2):
|
def unionRect(rect1, rect2):
|
||||||
"""Determine union of bounding rectangles.
|
"""Determine union of bounding rectangles.
|
||||||
|
|
||||||
@ -211,10 +225,15 @@ def unionRect(rect1, rect2):
|
|||||||
"""
|
"""
|
||||||
(xMin1, yMin1, xMax1, yMax1) = rect1
|
(xMin1, yMin1, xMax1, yMax1) = rect1
|
||||||
(xMin2, yMin2, xMax2, yMax2) = rect2
|
(xMin2, yMin2, xMax2, yMax2) = rect2
|
||||||
xMin, yMin, xMax, yMax = (min(xMin1, xMin2), min(yMin1, yMin2),
|
xMin, yMin, xMax, yMax = (
|
||||||
max(xMax1, xMax2), max(yMax1, yMax2))
|
min(xMin1, xMin2),
|
||||||
|
min(yMin1, yMin2),
|
||||||
|
max(xMax1, xMax2),
|
||||||
|
max(yMax1, yMax2),
|
||||||
|
)
|
||||||
return (xMin, yMin, xMax, yMax)
|
return (xMin, yMin, xMax, yMax)
|
||||||
|
|
||||||
|
|
||||||
def rectCenter(rect):
|
def rectCenter(rect):
|
||||||
"""Determine rectangle center.
|
"""Determine rectangle center.
|
||||||
|
|
||||||
@ -228,6 +247,7 @@ def rectCenter(rect):
|
|||||||
(xMin, yMin, xMax, yMax) = rect
|
(xMin, yMin, xMax, yMax) = rect
|
||||||
return (xMin + xMax) / 2, (yMin + yMax) / 2
|
return (xMin + xMax) / 2, (yMin + yMax) / 2
|
||||||
|
|
||||||
|
|
||||||
def rectArea(rect):
|
def rectArea(rect):
|
||||||
"""Determine rectangle area.
|
"""Determine rectangle area.
|
||||||
|
|
||||||
@ -241,6 +261,7 @@ def rectArea(rect):
|
|||||||
(xMin, yMin, xMax, yMax) = rect
|
(xMin, yMin, xMax, yMax) = rect
|
||||||
return (yMax - yMin) * (xMax - xMin)
|
return (yMax - yMin) * (xMax - xMin)
|
||||||
|
|
||||||
|
|
||||||
def intRect(rect):
|
def intRect(rect):
|
||||||
"""Round a rectangle to integer values.
|
"""Round a rectangle to integer values.
|
||||||
|
|
||||||
@ -262,7 +283,6 @@ def intRect(rect):
|
|||||||
|
|
||||||
|
|
||||||
class Vector(_Vector):
|
class Vector(_Vector):
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
warnings.warn(
|
warnings.warn(
|
||||||
"fontTools.misc.arrayTools.Vector has been deprecated, please use "
|
"fontTools.misc.arrayTools.Vector has been deprecated, please use "
|
||||||
@ -373,7 +393,9 @@ def _test():
|
|||||||
(0, 2, 4, 5)
|
(0, 2, 4, 5)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
import sys
|
import sys
|
||||||
import doctest
|
import doctest
|
||||||
|
|
||||||
sys.exit(doctest.testmod().failed)
|
sys.exit(doctest.testmod().failed)
|
||||||
|
@ -168,4 +168,5 @@ def classify(list_of_sets, sort=True):
|
|||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
import sys, doctest
|
import sys, doctest
|
||||||
|
|
||||||
sys.exit(doctest.testmod(optionflags=doctest.ELLIPSIS).failed)
|
sys.exit(doctest.testmod(optionflags=doctest.ELLIPSIS).failed)
|
||||||
|
@ -6,7 +6,9 @@ import re
|
|||||||
numberAddedRE = re.compile(r"#\d+$")
|
numberAddedRE = re.compile(r"#\d+$")
|
||||||
|
|
||||||
|
|
||||||
def makeOutputFileName(input, outputDir=None, extension=None, overWrite=False, suffix=""):
|
def makeOutputFileName(
|
||||||
|
input, outputDir=None, extension=None, overWrite=False, suffix=""
|
||||||
|
):
|
||||||
"""Generates a suitable file name for writing output.
|
"""Generates a suitable file name for writing output.
|
||||||
|
|
||||||
Often tools will want to take a file, do some kind of transformation to it,
|
Often tools will want to take a file, do some kind of transformation to it,
|
||||||
@ -44,6 +46,7 @@ def makeOutputFileName(input, outputDir=None, extension=None, overWrite=False, s
|
|||||||
if not overWrite:
|
if not overWrite:
|
||||||
while os.path.exists(output):
|
while os.path.exists(output):
|
||||||
output = os.path.join(
|
output = os.path.join(
|
||||||
dirName, fileName + suffix + "#" + repr(n) + extension)
|
dirName, fileName + suffix + "#" + repr(n) + extension
|
||||||
|
)
|
||||||
n += 1
|
n += 1
|
||||||
return output
|
return output
|
||||||
|
@ -10,9 +10,11 @@ We only define the symbols that we use. E.g. see fontTools.cu2qu
|
|||||||
|
|
||||||
from types import SimpleNamespace
|
from types import SimpleNamespace
|
||||||
|
|
||||||
|
|
||||||
def _empty_decorator(x):
|
def _empty_decorator(x):
|
||||||
return x
|
return x
|
||||||
|
|
||||||
|
|
||||||
compiled = False
|
compiled = False
|
||||||
|
|
||||||
for name in ("double", "complex", "int"):
|
for name in ("double", "complex", "int"):
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
"""Misc dict tools."""
|
"""Misc dict tools."""
|
||||||
|
|
||||||
|
|
||||||
__all__ = ['hashdict']
|
__all__ = ["hashdict"]
|
||||||
|
|
||||||
# https://stackoverflow.com/questions/1151658/python-hashable-dicts
|
# https://stackoverflow.com/questions/1151658/python-hashable-dicts
|
||||||
class hashdict(dict):
|
class hashdict(dict):
|
||||||
@ -26,36 +26,54 @@ class hashdict(dict):
|
|||||||
http://stackoverflow.com/questions/1151658/python-hashable-dicts
|
http://stackoverflow.com/questions/1151658/python-hashable-dicts
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __key(self):
|
def __key(self):
|
||||||
return tuple(sorted(self.items()))
|
return tuple(sorted(self.items()))
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return "{0}({1})".format(self.__class__.__name__,
|
return "{0}({1})".format(
|
||||||
", ".join("{0}={1}".format(
|
self.__class__.__name__,
|
||||||
str(i[0]),repr(i[1])) for i in self.__key()))
|
", ".join("{0}={1}".format(str(i[0]), repr(i[1])) for i in self.__key()),
|
||||||
|
)
|
||||||
|
|
||||||
def __hash__(self):
|
def __hash__(self):
|
||||||
return hash(self.__key())
|
return hash(self.__key())
|
||||||
|
|
||||||
def __setitem__(self, key, value):
|
def __setitem__(self, key, value):
|
||||||
raise TypeError("{0} does not support item assignment"
|
raise TypeError(
|
||||||
.format(self.__class__.__name__))
|
"{0} does not support item assignment".format(self.__class__.__name__)
|
||||||
|
)
|
||||||
|
|
||||||
def __delitem__(self, key):
|
def __delitem__(self, key):
|
||||||
raise TypeError("{0} does not support item assignment"
|
raise TypeError(
|
||||||
.format(self.__class__.__name__))
|
"{0} does not support item assignment".format(self.__class__.__name__)
|
||||||
|
)
|
||||||
|
|
||||||
def clear(self):
|
def clear(self):
|
||||||
raise TypeError("{0} does not support item assignment"
|
raise TypeError(
|
||||||
.format(self.__class__.__name__))
|
"{0} does not support item assignment".format(self.__class__.__name__)
|
||||||
|
)
|
||||||
|
|
||||||
def pop(self, *args, **kwargs):
|
def pop(self, *args, **kwargs):
|
||||||
raise TypeError("{0} does not support item assignment"
|
raise TypeError(
|
||||||
.format(self.__class__.__name__))
|
"{0} does not support item assignment".format(self.__class__.__name__)
|
||||||
|
)
|
||||||
|
|
||||||
def popitem(self, *args, **kwargs):
|
def popitem(self, *args, **kwargs):
|
||||||
raise TypeError("{0} does not support item assignment"
|
raise TypeError(
|
||||||
.format(self.__class__.__name__))
|
"{0} does not support item assignment".format(self.__class__.__name__)
|
||||||
|
)
|
||||||
|
|
||||||
def setdefault(self, *args, **kwargs):
|
def setdefault(self, *args, **kwargs):
|
||||||
raise TypeError("{0} does not support item assignment"
|
raise TypeError(
|
||||||
.format(self.__class__.__name__))
|
"{0} does not support item assignment".format(self.__class__.__name__)
|
||||||
|
)
|
||||||
|
|
||||||
def update(self, *args, **kwargs):
|
def update(self, *args, **kwargs):
|
||||||
raise TypeError("{0} does not support item assignment"
|
raise TypeError(
|
||||||
.format(self.__class__.__name__))
|
"{0} does not support item assignment".format(self.__class__.__name__)
|
||||||
|
)
|
||||||
|
|
||||||
# update is not ok because it mutates the object
|
# update is not ok because it mutates the object
|
||||||
# __add__ is ok because it creates a new object
|
# __add__ is ok because it creates a new object
|
||||||
# while the new object is under construction, it's ok to mutate it
|
# while the new object is under construction, it's ok to mutate it
|
||||||
@ -63,4 +81,3 @@ class hashdict(dict):
|
|||||||
result = hashdict(self)
|
result = hashdict(self)
|
||||||
dict.update(result, right)
|
dict.update(result, right)
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
@ -21,6 +21,7 @@ def _decryptChar(cipher, R):
|
|||||||
R = ((cipher + R) * 52845 + 22719) & 0xFFFF
|
R = ((cipher + R) * 52845 + 22719) & 0xFFFF
|
||||||
return bytechr(plain), R
|
return bytechr(plain), R
|
||||||
|
|
||||||
|
|
||||||
def _encryptChar(plain, R):
|
def _encryptChar(plain, R):
|
||||||
plain = byteord(plain)
|
plain = byteord(plain)
|
||||||
cipher = ((plain ^ (R >> 8))) & 0xFF
|
cipher = ((plain ^ (R >> 8))) & 0xFF
|
||||||
@ -56,6 +57,7 @@ def decrypt(cipherstring, R):
|
|||||||
plainstring = bytesjoin(plainList)
|
plainstring = bytesjoin(plainList)
|
||||||
return plainstring, int(R)
|
return plainstring, int(R)
|
||||||
|
|
||||||
|
|
||||||
def encrypt(plainstring, R):
|
def encrypt(plainstring, R):
|
||||||
r"""
|
r"""
|
||||||
Encrypts a string using the Type 1 encryption algorithm.
|
Encrypts a string using the Type 1 encryption algorithm.
|
||||||
@ -99,10 +101,13 @@ def encrypt(plainstring, R):
|
|||||||
|
|
||||||
def hexString(s):
|
def hexString(s):
|
||||||
import binascii
|
import binascii
|
||||||
|
|
||||||
return binascii.hexlify(s)
|
return binascii.hexlify(s)
|
||||||
|
|
||||||
|
|
||||||
def deHexString(h):
|
def deHexString(h):
|
||||||
import binascii
|
import binascii
|
||||||
|
|
||||||
h = bytesjoin(h.split())
|
h = bytesjoin(h.split())
|
||||||
return binascii.unhexlify(h)
|
return binascii.unhexlify(h)
|
||||||
|
|
||||||
@ -110,4 +115,5 @@ def deHexString(h):
|
|||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
import sys
|
import sys
|
||||||
import doctest
|
import doctest
|
||||||
|
|
||||||
sys.exit(doctest.testmod().failed)
|
sys.exit(doctest.testmod().failed)
|
||||||
|
@ -6,13 +6,13 @@ import fontTools.encodings.codecs
|
|||||||
# Map keyed by platformID, then platEncID, then possibly langID
|
# Map keyed by platformID, then platEncID, then possibly langID
|
||||||
_encodingMap = {
|
_encodingMap = {
|
||||||
0: { # Unicode
|
0: { # Unicode
|
||||||
0: 'utf_16_be',
|
0: "utf_16_be",
|
||||||
1: 'utf_16_be',
|
1: "utf_16_be",
|
||||||
2: 'utf_16_be',
|
2: "utf_16_be",
|
||||||
3: 'utf_16_be',
|
3: "utf_16_be",
|
||||||
4: 'utf_16_be',
|
4: "utf_16_be",
|
||||||
5: 'utf_16_be',
|
5: "utf_16_be",
|
||||||
6: 'utf_16_be',
|
6: "utf_16_be",
|
||||||
},
|
},
|
||||||
1: { # Macintosh
|
1: { # Macintosh
|
||||||
# See
|
# See
|
||||||
@ -31,35 +31,36 @@ _encodingMap = {
|
|||||||
38: "mac_latin2",
|
38: "mac_latin2",
|
||||||
39: "mac_latin2",
|
39: "mac_latin2",
|
||||||
40: "mac_latin2",
|
40: "mac_latin2",
|
||||||
Ellipsis: 'mac_roman', # Other
|
Ellipsis: "mac_roman", # Other
|
||||||
},
|
},
|
||||||
1: 'x_mac_japanese_ttx',
|
1: "x_mac_japanese_ttx",
|
||||||
2: 'x_mac_trad_chinese_ttx',
|
2: "x_mac_trad_chinese_ttx",
|
||||||
3: 'x_mac_korean_ttx',
|
3: "x_mac_korean_ttx",
|
||||||
6: 'mac_greek',
|
6: "mac_greek",
|
||||||
7: 'mac_cyrillic',
|
7: "mac_cyrillic",
|
||||||
25: 'x_mac_simp_chinese_ttx',
|
25: "x_mac_simp_chinese_ttx",
|
||||||
29: 'mac_latin2',
|
29: "mac_latin2",
|
||||||
35: 'mac_turkish',
|
35: "mac_turkish",
|
||||||
37: 'mac_iceland',
|
37: "mac_iceland",
|
||||||
},
|
},
|
||||||
2: { # ISO
|
2: { # ISO
|
||||||
0: 'ascii',
|
0: "ascii",
|
||||||
1: 'utf_16_be',
|
1: "utf_16_be",
|
||||||
2: 'latin1',
|
2: "latin1",
|
||||||
},
|
},
|
||||||
3: { # Microsoft
|
3: { # Microsoft
|
||||||
0: 'utf_16_be',
|
0: "utf_16_be",
|
||||||
1: 'utf_16_be',
|
1: "utf_16_be",
|
||||||
2: 'shift_jis',
|
2: "shift_jis",
|
||||||
3: 'gb2312',
|
3: "gb2312",
|
||||||
4: 'big5',
|
4: "big5",
|
||||||
5: 'euc_kr',
|
5: "euc_kr",
|
||||||
6: 'johab',
|
6: "johab",
|
||||||
10: 'utf_16_be',
|
10: "utf_16_be",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def getEncoding(platformID, platEncID, langID, default=None):
|
def getEncoding(platformID, platEncID, langID, default=None):
|
||||||
"""Returns the Python encoding name for OpenType platformID/encodingID/langID
|
"""Returns the Python encoding name for OpenType platformID/encodingID/langID
|
||||||
triplet. If encoding for these values is not known, by default None is
|
triplet. If encoding for these values is not known, by default None is
|
||||||
|
@ -244,7 +244,8 @@ except ImportError:
|
|||||||
except UnicodeDecodeError:
|
except UnicodeDecodeError:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"Bytes strings can only contain ASCII characters. "
|
"Bytes strings can only contain ASCII characters. "
|
||||||
"Use unicode strings for non-ASCII characters.")
|
"Use unicode strings for non-ASCII characters."
|
||||||
|
)
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
_raise_serialization_error(s)
|
_raise_serialization_error(s)
|
||||||
if s and _invalid_xml_string.search(s):
|
if s and _invalid_xml_string.search(s):
|
||||||
@ -425,9 +426,7 @@ except ImportError:
|
|||||||
write(_escape_cdata(elem.tail))
|
write(_escape_cdata(elem.tail))
|
||||||
|
|
||||||
def _raise_serialization_error(text):
|
def _raise_serialization_error(text):
|
||||||
raise TypeError(
|
raise TypeError("cannot serialize %r (type %s)" % (text, type(text).__name__))
|
||||||
"cannot serialize %r (type %s)" % (text, type(text).__name__)
|
|
||||||
)
|
|
||||||
|
|
||||||
def _escape_cdata(text):
|
def _escape_cdata(text):
|
||||||
# escape character data
|
# escape character data
|
||||||
|
@ -133,6 +133,7 @@ def userNameToFileName(userName, existing=[], prefix="", suffix=""):
|
|||||||
# finished
|
# finished
|
||||||
return fullName
|
return fullName
|
||||||
|
|
||||||
|
|
||||||
def handleClash1(userName, existing=[], prefix="", suffix=""):
|
def handleClash1(userName, existing=[], prefix="", suffix=""):
|
||||||
"""
|
"""
|
||||||
existing should be a case-insensitive list
|
existing should be a case-insensitive list
|
||||||
@ -167,7 +168,7 @@ def handleClash1(userName, existing=[], prefix="", suffix=""):
|
|||||||
prefixLength = len(prefix)
|
prefixLength = len(prefix)
|
||||||
suffixLength = len(suffix)
|
suffixLength = len(suffix)
|
||||||
if prefixLength + len(userName) + suffixLength + 15 > maxFileNameLength:
|
if prefixLength + len(userName) + suffixLength + 15 > maxFileNameLength:
|
||||||
l = (prefixLength + len(userName) + suffixLength + 15)
|
l = prefixLength + len(userName) + suffixLength + 15
|
||||||
sliceLength = maxFileNameLength - l
|
sliceLength = maxFileNameLength - l
|
||||||
userName = userName[:sliceLength]
|
userName = userName[:sliceLength]
|
||||||
finalName = None
|
finalName = None
|
||||||
@ -189,6 +190,7 @@ def handleClash1(userName, existing=[], prefix="", suffix=""):
|
|||||||
# finished
|
# finished
|
||||||
return finalName
|
return finalName
|
||||||
|
|
||||||
|
|
||||||
def handleClash2(existing=[], prefix="", suffix=""):
|
def handleClash2(existing=[], prefix="", suffix=""):
|
||||||
"""
|
"""
|
||||||
existing should be a case-insensitive list
|
existing should be a case-insensitive list
|
||||||
@ -236,7 +238,9 @@ def handleClash2(existing=[], prefix="", suffix=""):
|
|||||||
# finished
|
# finished
|
||||||
return finalName
|
return finalName
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
import doctest
|
import doctest
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
sys.exit(doctest.testmod().failed)
|
sys.exit(doctest.testmod().failed)
|
||||||
|
@ -231,8 +231,10 @@ def ensureVersionIsLong(value):
|
|||||||
if value < 0x10000:
|
if value < 0x10000:
|
||||||
newValue = floatToFixed(value, 16)
|
newValue = floatToFixed(value, 16)
|
||||||
log.warning(
|
log.warning(
|
||||||
"Table version value is a float: %.4f; "
|
"Table version value is a float: %.4f; " "fix to use hex instead: 0x%08x",
|
||||||
"fix to use hex instead: 0x%08x", value, newValue)
|
value,
|
||||||
|
newValue,
|
||||||
|
)
|
||||||
value = newValue
|
value = newValue
|
||||||
return value
|
return value
|
||||||
|
|
||||||
|
@ -54,9 +54,10 @@ class LevelFormatter(logging.Formatter):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, fmt=None, datefmt=None, style="%"):
|
def __init__(self, fmt=None, datefmt=None, style="%"):
|
||||||
if style != '%':
|
if style != "%":
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"only '%' percent style is supported in both python 2 and 3")
|
"only '%' percent style is supported in both python 2 and 3"
|
||||||
|
)
|
||||||
if fmt is None:
|
if fmt is None:
|
||||||
fmt = DEFAULT_FORMATS
|
fmt = DEFAULT_FORMATS
|
||||||
if isinstance(fmt, str):
|
if isinstance(fmt, str):
|
||||||
@ -66,7 +67,7 @@ class LevelFormatter(logging.Formatter):
|
|||||||
custom_formats = dict(fmt)
|
custom_formats = dict(fmt)
|
||||||
default_format = custom_formats.pop("*", None)
|
default_format = custom_formats.pop("*", None)
|
||||||
else:
|
else:
|
||||||
raise TypeError('fmt must be a str or a dict of str: %r' % fmt)
|
raise TypeError("fmt must be a str or a dict of str: %r" % fmt)
|
||||||
super(LevelFormatter, self).__init__(default_format, datefmt)
|
super(LevelFormatter, self).__init__(default_format, datefmt)
|
||||||
self.default_format = self._fmt
|
self.default_format = self._fmt
|
||||||
self.custom_formats = {}
|
self.custom_formats = {}
|
||||||
@ -133,15 +134,18 @@ def configLogger(**kwargs):
|
|||||||
handlers = kwargs.pop("handlers", None)
|
handlers = kwargs.pop("handlers", None)
|
||||||
if handlers is None:
|
if handlers is None:
|
||||||
if "stream" in kwargs and "filename" in kwargs:
|
if "stream" in kwargs and "filename" in kwargs:
|
||||||
raise ValueError("'stream' and 'filename' should not be "
|
raise ValueError(
|
||||||
"specified together")
|
"'stream' and 'filename' should not be " "specified together"
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
if "stream" in kwargs or "filename" in kwargs:
|
if "stream" in kwargs or "filename" in kwargs:
|
||||||
raise ValueError("'stream' or 'filename' should not be "
|
raise ValueError(
|
||||||
"specified together with 'handlers'")
|
"'stream' or 'filename' should not be "
|
||||||
|
"specified together with 'handlers'"
|
||||||
|
)
|
||||||
if handlers is None:
|
if handlers is None:
|
||||||
filename = kwargs.pop("filename", None)
|
filename = kwargs.pop("filename", None)
|
||||||
mode = kwargs.pop("filemode", 'a')
|
mode = kwargs.pop("filemode", "a")
|
||||||
if filename:
|
if filename:
|
||||||
h = logging.FileHandler(filename, mode)
|
h = logging.FileHandler(filename, mode)
|
||||||
else:
|
else:
|
||||||
@ -159,7 +163,7 @@ def configLogger(**kwargs):
|
|||||||
fs = kwargs.pop("format", None)
|
fs = kwargs.pop("format", None)
|
||||||
dfs = kwargs.pop("datefmt", None)
|
dfs = kwargs.pop("datefmt", None)
|
||||||
# XXX: '%' is the only format style supported on both py2 and 3
|
# XXX: '%' is the only format style supported on both py2 and 3
|
||||||
style = kwargs.pop("style", '%')
|
style = kwargs.pop("style", "%")
|
||||||
fmt = LevelFormatter(fs, dfs, style)
|
fmt = LevelFormatter(fs, dfs, style)
|
||||||
filters = kwargs.pop("filters", [])
|
filters = kwargs.pop("filters", [])
|
||||||
for h in handlers:
|
for h in handlers:
|
||||||
@ -177,8 +181,8 @@ def configLogger(**kwargs):
|
|||||||
if level is not None:
|
if level is not None:
|
||||||
logger.setLevel(level)
|
logger.setLevel(level)
|
||||||
if kwargs:
|
if kwargs:
|
||||||
keys = ', '.join(kwargs.keys())
|
keys = ", ".join(kwargs.keys())
|
||||||
raise ValueError('Unrecognised argument(s): %s' % keys)
|
raise ValueError("Unrecognised argument(s): %s" % keys)
|
||||||
|
|
||||||
|
|
||||||
def _resetExistingLoggers(parent="root"):
|
def _resetExistingLoggers(parent="root"):
|
||||||
@ -287,10 +291,9 @@ class Timer(object):
|
|||||||
def __init__(self, logger=None, msg=None, level=None, start=None):
|
def __init__(self, logger=None, msg=None, level=None, start=None):
|
||||||
self.reset(start)
|
self.reset(start)
|
||||||
if logger is None:
|
if logger is None:
|
||||||
for arg in ('msg', 'level'):
|
for arg in ("msg", "level"):
|
||||||
if locals().get(arg) is not None:
|
if locals().get(arg) is not None:
|
||||||
raise ValueError(
|
raise ValueError("'%s' can't be specified without a 'logger'" % arg)
|
||||||
"'%s' can't be specified without a 'logger'" % arg)
|
|
||||||
self.logger = logger
|
self.logger = logger
|
||||||
self.level = level if level is not None else TIME_LEVEL
|
self.level = level if level is not None else TIME_LEVEL
|
||||||
self.msg = msg
|
self.msg = msg
|
||||||
@ -350,7 +353,7 @@ class Timer(object):
|
|||||||
message = self.formatTime(self.msg, time)
|
message = self.formatTime(self.msg, time)
|
||||||
# Allow log handlers to see the individual parts to facilitate things
|
# Allow log handlers to see the individual parts to facilitate things
|
||||||
# like a server accumulating aggregate stats.
|
# like a server accumulating aggregate stats.
|
||||||
msg_parts = { 'msg': self.msg, 'time': time }
|
msg_parts = {"msg": self.msg, "time": time}
|
||||||
self.logger.log(self.level, message, msg_parts)
|
self.logger.log(self.level, message, msg_parts)
|
||||||
|
|
||||||
def __call__(self, func_or_msg=None, **kwargs):
|
def __call__(self, func_or_msg=None, **kwargs):
|
||||||
@ -370,6 +373,7 @@ class Timer(object):
|
|||||||
def wrapper(*args, **kwds):
|
def wrapper(*args, **kwds):
|
||||||
with self:
|
with self:
|
||||||
return func(*args, **kwds)
|
return func(*args, **kwds)
|
||||||
|
|
||||||
return wrapper
|
return wrapper
|
||||||
else:
|
else:
|
||||||
msg = func_or_msg or kwargs.get("msg")
|
msg = func_or_msg or kwargs.get("msg")
|
||||||
@ -425,8 +429,7 @@ class ChannelsFilter(logging.Filter):
|
|||||||
nlen = self.lengths[name]
|
nlen = self.lengths[name]
|
||||||
if name == record.name:
|
if name == record.name:
|
||||||
return True
|
return True
|
||||||
elif (record.name.find(name, 0, nlen) == 0
|
elif record.name.find(name, 0, nlen) == 0 and record.name[nlen] == ".":
|
||||||
and record.name[nlen] == "."):
|
|
||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@ -465,6 +468,7 @@ class CapturingLogHandler(logging.Handler):
|
|||||||
|
|
||||||
def assertRegex(self, regexp, msg=None):
|
def assertRegex(self, regexp, msg=None):
|
||||||
import re
|
import re
|
||||||
|
|
||||||
pattern = re.compile(regexp)
|
pattern = re.compile(regexp)
|
||||||
for r in self.records:
|
for r in self.records:
|
||||||
if pattern.search(r.getMessage()):
|
if pattern.search(r.getMessage()):
|
||||||
@ -505,32 +509,35 @@ class LogMixin(object):
|
|||||||
@property
|
@property
|
||||||
def log(self):
|
def log(self):
|
||||||
if not hasattr(self, "_log"):
|
if not hasattr(self, "_log"):
|
||||||
name = ".".join(
|
name = ".".join((self.__class__.__module__, self.__class__.__name__))
|
||||||
(self.__class__.__module__, self.__class__.__name__)
|
|
||||||
)
|
|
||||||
self._log = logging.getLogger(name)
|
self._log = logging.getLogger(name)
|
||||||
return self._log
|
return self._log
|
||||||
|
|
||||||
|
|
||||||
def deprecateArgument(name, msg, category=UserWarning):
|
def deprecateArgument(name, msg, category=UserWarning):
|
||||||
"""Raise a warning about deprecated function argument 'name'."""
|
"""Raise a warning about deprecated function argument 'name'."""
|
||||||
warnings.warn(
|
warnings.warn("%r is deprecated; %s" % (name, msg), category=category, stacklevel=3)
|
||||||
"%r is deprecated; %s" % (name, msg), category=category, stacklevel=3)
|
|
||||||
|
|
||||||
|
|
||||||
def deprecateFunction(msg, category=UserWarning):
|
def deprecateFunction(msg, category=UserWarning):
|
||||||
"""Decorator to raise a warning when a deprecated function is called."""
|
"""Decorator to raise a warning when a deprecated function is called."""
|
||||||
|
|
||||||
def decorator(func):
|
def decorator(func):
|
||||||
@wraps(func)
|
@wraps(func)
|
||||||
def wrapper(*args, **kwargs):
|
def wrapper(*args, **kwargs):
|
||||||
warnings.warn(
|
warnings.warn(
|
||||||
"%r is deprecated; %s" % (func.__name__, msg),
|
"%r is deprecated; %s" % (func.__name__, msg),
|
||||||
category=category, stacklevel=2)
|
category=category,
|
||||||
|
stacklevel=2,
|
||||||
|
)
|
||||||
return func(*args, **kwargs)
|
return func(*args, **kwargs)
|
||||||
|
|
||||||
return wrapper
|
return wrapper
|
||||||
|
|
||||||
return decorator
|
return decorator
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
import doctest
|
import doctest
|
||||||
|
|
||||||
sys.exit(doctest.testmod(optionflags=doctest.ELLIPSIS).failed)
|
sys.exit(doctest.testmod(optionflags=doctest.ELLIPSIS).failed)
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
from fontTools.misc.textTools import Tag, bytesjoin, strjoin
|
from fontTools.misc.textTools import Tag, bytesjoin, strjoin
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import xattr
|
import xattr
|
||||||
except ImportError:
|
except ImportError:
|
||||||
@ -24,7 +25,7 @@ def getMacCreatorAndType(path):
|
|||||||
"""
|
"""
|
||||||
if xattr is not None:
|
if xattr is not None:
|
||||||
try:
|
try:
|
||||||
finderInfo = xattr.getxattr(path, 'com.apple.FinderInfo')
|
finderInfo = xattr.getxattr(path, "com.apple.FinderInfo")
|
||||||
except (KeyError, IOError):
|
except (KeyError, IOError):
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
@ -48,7 +49,8 @@ def setMacCreatorAndType(path, fileCreator, fileType):
|
|||||||
"""
|
"""
|
||||||
if xattr is not None:
|
if xattr is not None:
|
||||||
from fontTools.misc.textTools import pad
|
from fontTools.misc.textTools import pad
|
||||||
|
|
||||||
if not all(len(s) == 4 for s in (fileCreator, fileType)):
|
if not all(len(s) == 4 for s in (fileCreator, fileType)):
|
||||||
raise TypeError('arg must be string of 4 chars')
|
raise TypeError("arg must be string of 4 chars")
|
||||||
finderInfo = pad(bytesjoin([fileType, fileCreator]), 32)
|
finderInfo = pad(bytesjoin([fileType, fileCreator]), 32)
|
||||||
xattr.setxattr(path, 'com.apple.FinderInfo', finderInfo)
|
xattr.setxattr(path, "com.apple.FinderInfo", finderInfo)
|
||||||
|
@ -23,6 +23,7 @@ class ResourceReader(MutableMapping):
|
|||||||
representing all the resources of a certain type.
|
representing all the resources of a certain type.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, fileOrPath):
|
def __init__(self, fileOrPath):
|
||||||
"""Open a file
|
"""Open a file
|
||||||
|
|
||||||
@ -31,7 +32,7 @@ class ResourceReader(MutableMapping):
|
|||||||
``os.PathLike`` object, or a string.
|
``os.PathLike`` object, or a string.
|
||||||
"""
|
"""
|
||||||
self._resources = OrderedDict()
|
self._resources = OrderedDict()
|
||||||
if hasattr(fileOrPath, 'read'):
|
if hasattr(fileOrPath, "read"):
|
||||||
self.file = fileOrPath
|
self.file = fileOrPath
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
@ -48,7 +49,7 @@ class ResourceReader(MutableMapping):
|
|||||||
def openResourceFork(path):
|
def openResourceFork(path):
|
||||||
if hasattr(path, "__fspath__"): # support os.PathLike objects
|
if hasattr(path, "__fspath__"): # support os.PathLike objects
|
||||||
path = path.__fspath__()
|
path = path.__fspath__()
|
||||||
with open(path + '/..namedfork/rsrc', 'rb') as resfork:
|
with open(path + "/..namedfork/rsrc", "rb") as resfork:
|
||||||
data = resfork.read()
|
data = resfork.read()
|
||||||
infile = BytesIO(data)
|
infile = BytesIO(data)
|
||||||
infile.name = path
|
infile.name = path
|
||||||
@ -56,7 +57,7 @@ class ResourceReader(MutableMapping):
|
|||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def openDataFork(path):
|
def openDataFork(path):
|
||||||
with open(path, 'rb') as datafork:
|
with open(path, "rb") as datafork:
|
||||||
data = datafork.read()
|
data = datafork.read()
|
||||||
infile = BytesIO(data)
|
infile = BytesIO(data)
|
||||||
infile.name = path
|
infile.name = path
|
||||||
@ -73,13 +74,13 @@ class ResourceReader(MutableMapping):
|
|||||||
except OverflowError:
|
except OverflowError:
|
||||||
raise ResourceError("Failed to seek offset ('offset' is too large)")
|
raise ResourceError("Failed to seek offset ('offset' is too large)")
|
||||||
if self.file.tell() != offset:
|
if self.file.tell() != offset:
|
||||||
raise ResourceError('Failed to seek offset (reached EOF)')
|
raise ResourceError("Failed to seek offset (reached EOF)")
|
||||||
try:
|
try:
|
||||||
data = self.file.read(numBytes)
|
data = self.file.read(numBytes)
|
||||||
except OverflowError:
|
except OverflowError:
|
||||||
raise ResourceError("Cannot read resource ('numBytes' is too large)")
|
raise ResourceError("Cannot read resource ('numBytes' is too large)")
|
||||||
if len(data) != numBytes:
|
if len(data) != numBytes:
|
||||||
raise ResourceError('Cannot read resource (not enough data)')
|
raise ResourceError("Cannot read resource (not enough data)")
|
||||||
return data
|
return data
|
||||||
|
|
||||||
def _readHeaderAndMap(self):
|
def _readHeaderAndMap(self):
|
||||||
@ -96,15 +97,15 @@ class ResourceReader(MutableMapping):
|
|||||||
def _readTypeList(self):
|
def _readTypeList(self):
|
||||||
absTypeListOffset = self.absTypeListOffset
|
absTypeListOffset = self.absTypeListOffset
|
||||||
numTypesData = self._read(2, absTypeListOffset)
|
numTypesData = self._read(2, absTypeListOffset)
|
||||||
self.numTypes, = struct.unpack('>H', numTypesData)
|
(self.numTypes,) = struct.unpack(">H", numTypesData)
|
||||||
absTypeListOffset2 = absTypeListOffset + 2
|
absTypeListOffset2 = absTypeListOffset + 2
|
||||||
for i in range(self.numTypes + 1):
|
for i in range(self.numTypes + 1):
|
||||||
resTypeItemOffset = absTypeListOffset2 + ResourceTypeItemSize * i
|
resTypeItemOffset = absTypeListOffset2 + ResourceTypeItemSize * i
|
||||||
resTypeItemData = self._read(ResourceTypeItemSize, resTypeItemOffset)
|
resTypeItemData = self._read(ResourceTypeItemSize, resTypeItemOffset)
|
||||||
item = sstruct.unpack(ResourceTypeItem, resTypeItemData)
|
item = sstruct.unpack(ResourceTypeItem, resTypeItemData)
|
||||||
resType = tostr(item['type'], encoding='mac-roman')
|
resType = tostr(item["type"], encoding="mac-roman")
|
||||||
refListOffset = absTypeListOffset + item['refListOffset']
|
refListOffset = absTypeListOffset + item["refListOffset"]
|
||||||
numRes = item['numRes'] + 1
|
numRes = item["numRes"] + 1
|
||||||
resources = self._readReferenceList(resType, refListOffset, numRes)
|
resources = self._readReferenceList(resType, refListOffset, numRes)
|
||||||
self._resources[resType] = resources
|
self._resources[resType] = resources
|
||||||
|
|
||||||
@ -174,7 +175,7 @@ class ResourceReader(MutableMapping):
|
|||||||
|
|
||||||
def getNamedResource(self, resType, name):
|
def getNamedResource(self, resType, name):
|
||||||
"""Return the named resource of given type, else return None."""
|
"""Return the named resource of given type, else return None."""
|
||||||
name = tostr(name, encoding='mac-roman')
|
name = tostr(name, encoding="mac-roman")
|
||||||
for res in self.get(resType, []):
|
for res in self.get(resType, []):
|
||||||
if res.name == name:
|
if res.name == name:
|
||||||
return res
|
return res
|
||||||
@ -196,8 +197,9 @@ class Resource(object):
|
|||||||
attr: attributes.
|
attr: attributes.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, resType=None, resData=None, resID=None, resName=None,
|
def __init__(
|
||||||
resAttr=None):
|
self, resType=None, resData=None, resID=None, resName=None, resAttr=None
|
||||||
|
):
|
||||||
self.type = resType
|
self.type = resType
|
||||||
self.data = resData
|
self.data = resData
|
||||||
self.id = resID
|
self.id = resID
|
||||||
@ -207,16 +209,16 @@ class Resource(object):
|
|||||||
def decompile(self, refData, reader):
|
def decompile(self, refData, reader):
|
||||||
sstruct.unpack(ResourceRefItem, refData, self)
|
sstruct.unpack(ResourceRefItem, refData, self)
|
||||||
# interpret 3-byte dataOffset as (padded) ULONG to unpack it with struct
|
# interpret 3-byte dataOffset as (padded) ULONG to unpack it with struct
|
||||||
self.dataOffset, = struct.unpack('>L', bytesjoin([b"\0", self.dataOffset]))
|
(self.dataOffset,) = struct.unpack(">L", bytesjoin([b"\0", self.dataOffset]))
|
||||||
absDataOffset = reader.dataOffset + self.dataOffset
|
absDataOffset = reader.dataOffset + self.dataOffset
|
||||||
dataLength, = struct.unpack(">L", reader._read(4, absDataOffset))
|
(dataLength,) = struct.unpack(">L", reader._read(4, absDataOffset))
|
||||||
self.data = reader._read(dataLength)
|
self.data = reader._read(dataLength)
|
||||||
if self.nameOffset == -1:
|
if self.nameOffset == -1:
|
||||||
return
|
return
|
||||||
absNameOffset = reader.absNameListOffset + self.nameOffset
|
absNameOffset = reader.absNameListOffset + self.nameOffset
|
||||||
nameLength, = struct.unpack('B', reader._read(1, absNameOffset))
|
(nameLength,) = struct.unpack("B", reader._read(1, absNameOffset))
|
||||||
name, = struct.unpack('>%ss' % nameLength, reader._read(nameLength))
|
(name,) = struct.unpack(">%ss" % nameLength, reader._read(nameLength))
|
||||||
self.name = tostr(name, encoding='mac-roman')
|
self.name = tostr(name, encoding="mac-roman")
|
||||||
|
|
||||||
|
|
||||||
ResourceForkHeader = """
|
ResourceForkHeader = """
|
||||||
|
@ -353,7 +353,9 @@ def _real_element(value: float, ctx: SimpleNamespace) -> etree.Element:
|
|||||||
return el
|
return el
|
||||||
|
|
||||||
|
|
||||||
def _dict_element(d: Mapping[str, PlistEncodable], ctx: SimpleNamespace) -> etree.Element:
|
def _dict_element(
|
||||||
|
d: Mapping[str, PlistEncodable], ctx: SimpleNamespace
|
||||||
|
) -> etree.Element:
|
||||||
el = etree.Element("dict")
|
el = etree.Element("dict")
|
||||||
items = d.items()
|
items = d.items()
|
||||||
if ctx.sort_keys:
|
if ctx.sort_keys:
|
||||||
@ -371,7 +373,9 @@ def _dict_element(d: Mapping[str, PlistEncodable], ctx: SimpleNamespace) -> etre
|
|||||||
return el
|
return el
|
||||||
|
|
||||||
|
|
||||||
def _array_element(array: Sequence[PlistEncodable], ctx: SimpleNamespace) -> etree.Element:
|
def _array_element(
|
||||||
|
array: Sequence[PlistEncodable], ctx: SimpleNamespace
|
||||||
|
) -> etree.Element:
|
||||||
el = etree.Element("array")
|
el = etree.Element("array")
|
||||||
if len(array) == 0:
|
if len(array) == 0:
|
||||||
return el
|
return el
|
||||||
|
@ -3,7 +3,10 @@ CFF dictionary data and Type1/Type2 CharStrings.
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
from fontTools.misc.fixedTools import (
|
from fontTools.misc.fixedTools import (
|
||||||
fixedToFloat, floatToFixed, floatToFixedToStr, strToFixedToFloat,
|
fixedToFloat,
|
||||||
|
floatToFixed,
|
||||||
|
floatToFixedToStr,
|
||||||
|
strToFixedToFloat,
|
||||||
)
|
)
|
||||||
from fontTools.misc.textTools import bytechr, byteord, bytesjoin, strjoin
|
from fontTools.misc.textTools import bytechr, byteord, bytesjoin, strjoin
|
||||||
from fontTools.pens.boundsPen import BoundsPen
|
from fontTools.pens.boundsPen import BoundsPen
|
||||||
@ -27,44 +30,52 @@ def read_operator(self, b0, data, index):
|
|||||||
value = self.handle_operator(operator)
|
value = self.handle_operator(operator)
|
||||||
return value, index
|
return value, index
|
||||||
|
|
||||||
|
|
||||||
def read_byte(self, b0, data, index):
|
def read_byte(self, b0, data, index):
|
||||||
return b0 - 139, index
|
return b0 - 139, index
|
||||||
|
|
||||||
|
|
||||||
def read_smallInt1(self, b0, data, index):
|
def read_smallInt1(self, b0, data, index):
|
||||||
b1 = byteord(data[index])
|
b1 = byteord(data[index])
|
||||||
return (b0 - 247) * 256 + b1 + 108, index + 1
|
return (b0 - 247) * 256 + b1 + 108, index + 1
|
||||||
|
|
||||||
|
|
||||||
def read_smallInt2(self, b0, data, index):
|
def read_smallInt2(self, b0, data, index):
|
||||||
b1 = byteord(data[index])
|
b1 = byteord(data[index])
|
||||||
return -(b0 - 251) * 256 - b1 - 108, index + 1
|
return -(b0 - 251) * 256 - b1 - 108, index + 1
|
||||||
|
|
||||||
|
|
||||||
def read_shortInt(self, b0, data, index):
|
def read_shortInt(self, b0, data, index):
|
||||||
value, = struct.unpack(">h", data[index:index+2])
|
(value,) = struct.unpack(">h", data[index : index + 2])
|
||||||
return value, index + 2
|
return value, index + 2
|
||||||
|
|
||||||
|
|
||||||
def read_longInt(self, b0, data, index):
|
def read_longInt(self, b0, data, index):
|
||||||
value, = struct.unpack(">l", data[index:index+4])
|
(value,) = struct.unpack(">l", data[index : index + 4])
|
||||||
return value, index + 4
|
return value, index + 4
|
||||||
|
|
||||||
|
|
||||||
def read_fixed1616(self, b0, data, index):
|
def read_fixed1616(self, b0, data, index):
|
||||||
value, = struct.unpack(">l", data[index:index+4])
|
(value,) = struct.unpack(">l", data[index : index + 4])
|
||||||
return fixedToFloat(value, precisionBits=16), index + 4
|
return fixedToFloat(value, precisionBits=16), index + 4
|
||||||
|
|
||||||
|
|
||||||
def read_reserved(self, b0, data, index):
|
def read_reserved(self, b0, data, index):
|
||||||
assert NotImplementedError
|
assert NotImplementedError
|
||||||
return NotImplemented, index
|
return NotImplemented, index
|
||||||
|
|
||||||
|
|
||||||
def read_realNumber(self, b0, data, index):
|
def read_realNumber(self, b0, data, index):
|
||||||
number = ''
|
number = ""
|
||||||
while True:
|
while True:
|
||||||
b = byteord(data[index])
|
b = byteord(data[index])
|
||||||
index = index + 1
|
index = index + 1
|
||||||
nibble0 = (b & 0xf0) >> 4
|
nibble0 = (b & 0xF0) >> 4
|
||||||
nibble1 = b & 0x0f
|
nibble1 = b & 0x0F
|
||||||
if nibble0 == 0xf:
|
if nibble0 == 0xF:
|
||||||
break
|
break
|
||||||
number = number + realNibbles[nibble0]
|
number = number + realNibbles[nibble0]
|
||||||
if nibble1 == 0xf:
|
if nibble1 == 0xF:
|
||||||
break
|
break
|
||||||
number = number + realNibbles[nibble1]
|
number = number + realNibbles[nibble1]
|
||||||
return float(number), index
|
return float(number), index
|
||||||
@ -88,8 +99,23 @@ cffDictOperandEncoding[30] = read_realNumber
|
|||||||
cffDictOperandEncoding[255] = read_reserved
|
cffDictOperandEncoding[255] = read_reserved
|
||||||
|
|
||||||
|
|
||||||
realNibbles = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
|
realNibbles = [
|
||||||
'.', 'E', 'E-', None, '-']
|
"0",
|
||||||
|
"1",
|
||||||
|
"2",
|
||||||
|
"3",
|
||||||
|
"4",
|
||||||
|
"5",
|
||||||
|
"6",
|
||||||
|
"7",
|
||||||
|
"8",
|
||||||
|
"9",
|
||||||
|
".",
|
||||||
|
"E",
|
||||||
|
"E-",
|
||||||
|
None,
|
||||||
|
"-",
|
||||||
|
]
|
||||||
realNibblesDict = {v: i for i, v in enumerate(realNibbles)}
|
realNibblesDict = {v: i for i, v in enumerate(realNibbles)}
|
||||||
|
|
||||||
maxOpStack = 193
|
maxOpStack = 193
|
||||||
@ -112,62 +138,63 @@ def buildOperatorDict(operatorList):
|
|||||||
|
|
||||||
t2Operators = [
|
t2Operators = [
|
||||||
# opcode name
|
# opcode name
|
||||||
(1, 'hstem'),
|
(1, "hstem"),
|
||||||
(3, 'vstem'),
|
(3, "vstem"),
|
||||||
(4, 'vmoveto'),
|
(4, "vmoveto"),
|
||||||
(5, 'rlineto'),
|
(5, "rlineto"),
|
||||||
(6, 'hlineto'),
|
(6, "hlineto"),
|
||||||
(7, 'vlineto'),
|
(7, "vlineto"),
|
||||||
(8, 'rrcurveto'),
|
(8, "rrcurveto"),
|
||||||
(10, 'callsubr'),
|
(10, "callsubr"),
|
||||||
(11, 'return'),
|
(11, "return"),
|
||||||
(14, 'endchar'),
|
(14, "endchar"),
|
||||||
(15, 'vsindex'),
|
(15, "vsindex"),
|
||||||
(16, 'blend'),
|
(16, "blend"),
|
||||||
(18, 'hstemhm'),
|
(18, "hstemhm"),
|
||||||
(19, 'hintmask'),
|
(19, "hintmask"),
|
||||||
(20, 'cntrmask'),
|
(20, "cntrmask"),
|
||||||
(21, 'rmoveto'),
|
(21, "rmoveto"),
|
||||||
(22, 'hmoveto'),
|
(22, "hmoveto"),
|
||||||
(23, 'vstemhm'),
|
(23, "vstemhm"),
|
||||||
(24, 'rcurveline'),
|
(24, "rcurveline"),
|
||||||
(25, 'rlinecurve'),
|
(25, "rlinecurve"),
|
||||||
(26, 'vvcurveto'),
|
(26, "vvcurveto"),
|
||||||
(27, 'hhcurveto'),
|
(27, "hhcurveto"),
|
||||||
# (28, 'shortint'), # not really an operator
|
# (28, 'shortint'), # not really an operator
|
||||||
(29, 'callgsubr'),
|
(29, "callgsubr"),
|
||||||
(30, 'vhcurveto'),
|
(30, "vhcurveto"),
|
||||||
(31, 'hvcurveto'),
|
(31, "hvcurveto"),
|
||||||
((12, 0), 'ignore'), # dotsection. Yes, there a few very early OTF/CFF
|
((12, 0), "ignore"), # dotsection. Yes, there a few very early OTF/CFF
|
||||||
# fonts with this deprecated operator. Just ignore it.
|
# fonts with this deprecated operator. Just ignore it.
|
||||||
((12, 3), 'and'),
|
((12, 3), "and"),
|
||||||
((12, 4), 'or'),
|
((12, 4), "or"),
|
||||||
((12, 5), 'not'),
|
((12, 5), "not"),
|
||||||
((12, 8), 'store'),
|
((12, 8), "store"),
|
||||||
((12, 9), 'abs'),
|
((12, 9), "abs"),
|
||||||
((12, 10), 'add'),
|
((12, 10), "add"),
|
||||||
((12, 11), 'sub'),
|
((12, 11), "sub"),
|
||||||
((12, 12), 'div'),
|
((12, 12), "div"),
|
||||||
((12, 13), 'load'),
|
((12, 13), "load"),
|
||||||
((12, 14), 'neg'),
|
((12, 14), "neg"),
|
||||||
((12, 15), 'eq'),
|
((12, 15), "eq"),
|
||||||
((12, 18), 'drop'),
|
((12, 18), "drop"),
|
||||||
((12, 20), 'put'),
|
((12, 20), "put"),
|
||||||
((12, 21), 'get'),
|
((12, 21), "get"),
|
||||||
((12, 22), 'ifelse'),
|
((12, 22), "ifelse"),
|
||||||
((12, 23), 'random'),
|
((12, 23), "random"),
|
||||||
((12, 24), 'mul'),
|
((12, 24), "mul"),
|
||||||
((12, 26), 'sqrt'),
|
((12, 26), "sqrt"),
|
||||||
((12, 27), 'dup'),
|
((12, 27), "dup"),
|
||||||
((12, 28), 'exch'),
|
((12, 28), "exch"),
|
||||||
((12, 29), 'index'),
|
((12, 29), "index"),
|
||||||
((12, 30), 'roll'),
|
((12, 30), "roll"),
|
||||||
((12, 34), 'hflex'),
|
((12, 34), "hflex"),
|
||||||
((12, 35), 'flex'),
|
((12, 35), "flex"),
|
||||||
((12, 36), 'hflex1'),
|
((12, 36), "hflex1"),
|
||||||
((12, 37), 'flex1'),
|
((12, 37), "flex1"),
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
def getIntEncoder(format):
|
def getIntEncoder(format):
|
||||||
if format == "cff":
|
if format == "cff":
|
||||||
fourByteOp = bytechr(29)
|
fourByteOp = bytechr(29)
|
||||||
@ -177,8 +204,13 @@ def getIntEncoder(format):
|
|||||||
assert format == "t2"
|
assert format == "t2"
|
||||||
fourByteOp = None
|
fourByteOp = None
|
||||||
|
|
||||||
def encodeInt(value, fourByteOp=fourByteOp, bytechr=bytechr,
|
def encodeInt(
|
||||||
pack=struct.pack, unpack=struct.unpack):
|
value,
|
||||||
|
fourByteOp=fourByteOp,
|
||||||
|
bytechr=bytechr,
|
||||||
|
pack=struct.pack,
|
||||||
|
unpack=struct.unpack,
|
||||||
|
):
|
||||||
if -107 <= value <= 107:
|
if -107 <= value <= 107:
|
||||||
code = bytechr(value + 139)
|
code = bytechr(value + 139)
|
||||||
elif 108 <= value <= 1131:
|
elif 108 <= value <= 1131:
|
||||||
@ -200,9 +232,11 @@ def getIntEncoder(format):
|
|||||||
# distinguish anymore between small ints that were supposed to
|
# distinguish anymore between small ints that were supposed to
|
||||||
# be small fixed numbers and small ints that were just small
|
# be small fixed numbers and small ints that were just small
|
||||||
# ints. Hence the warning.
|
# ints. Hence the warning.
|
||||||
log.warning("4-byte T2 number got passed to the "
|
log.warning(
|
||||||
|
"4-byte T2 number got passed to the "
|
||||||
"IntType handler. This should happen only when reading in "
|
"IntType handler. This should happen only when reading in "
|
||||||
"old XML files.\n")
|
"old XML files.\n"
|
||||||
|
)
|
||||||
code = bytechr(255) + pack(">l", value)
|
code = bytechr(255) + pack(">l", value)
|
||||||
else:
|
else:
|
||||||
code = fourByteOp + pack(">l", value)
|
code = fourByteOp + pack(">l", value)
|
||||||
@ -215,6 +249,7 @@ encodeIntCFF = getIntEncoder("cff")
|
|||||||
encodeIntT1 = getIntEncoder("t1")
|
encodeIntT1 = getIntEncoder("t1")
|
||||||
encodeIntT2 = getIntEncoder("t2")
|
encodeIntT2 = getIntEncoder("t2")
|
||||||
|
|
||||||
|
|
||||||
def encodeFixed(f, pack=struct.pack):
|
def encodeFixed(f, pack=struct.pack):
|
||||||
"""For T2 only"""
|
"""For T2 only"""
|
||||||
value = floatToFixed(f, precisionBits=16)
|
value = floatToFixed(f, precisionBits=16)
|
||||||
@ -224,7 +259,8 @@ def encodeFixed(f, pack=struct.pack):
|
|||||||
return b"\xff" + pack(">l", value) # encode the entire fixed point value
|
return b"\xff" + pack(">l", value) # encode the entire fixed point value
|
||||||
|
|
||||||
|
|
||||||
realZeroBytes = bytechr(30) + bytechr(0xf)
|
realZeroBytes = bytechr(30) + bytechr(0xF)
|
||||||
|
|
||||||
|
|
||||||
def encodeFloat(f):
|
def encodeFloat(f):
|
||||||
# For CFF only, used in cffLib
|
# For CFF only, used in cffLib
|
||||||
@ -249,20 +285,20 @@ def encodeFloat(f):
|
|||||||
elif c2 == "+":
|
elif c2 == "+":
|
||||||
s = s[1:]
|
s = s[1:]
|
||||||
nibbles.append(realNibblesDict[c])
|
nibbles.append(realNibblesDict[c])
|
||||||
nibbles.append(0xf)
|
nibbles.append(0xF)
|
||||||
if len(nibbles) % 2:
|
if len(nibbles) % 2:
|
||||||
nibbles.append(0xf)
|
nibbles.append(0xF)
|
||||||
d = bytechr(30)
|
d = bytechr(30)
|
||||||
for i in range(0, len(nibbles), 2):
|
for i in range(0, len(nibbles), 2):
|
||||||
d = d + bytechr(nibbles[i] << 4 | nibbles[i + 1])
|
d = d + bytechr(nibbles[i] << 4 | nibbles[i + 1])
|
||||||
return d
|
return d
|
||||||
|
|
||||||
|
|
||||||
class CharStringCompileError(Exception): pass
|
class CharStringCompileError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
class SimpleT2Decompiler(object):
|
class SimpleT2Decompiler(object):
|
||||||
|
|
||||||
def __init__(self, localSubrs, globalSubrs, private=None, blender=None):
|
def __init__(self, localSubrs, globalSubrs, private=None, blender=None):
|
||||||
self.localSubrs = localSubrs
|
self.localSubrs = localSubrs
|
||||||
self.localBias = calcSubrBias(localSubrs)
|
self.localBias = calcSubrBias(localSubrs)
|
||||||
@ -346,10 +382,13 @@ class SimpleT2Decompiler(object):
|
|||||||
|
|
||||||
def op_hstem(self, index):
|
def op_hstem(self, index):
|
||||||
self.countHints()
|
self.countHints()
|
||||||
|
|
||||||
def op_vstem(self, index):
|
def op_vstem(self, index):
|
||||||
self.countHints()
|
self.countHints()
|
||||||
|
|
||||||
def op_hstemhm(self, index):
|
def op_hstemhm(self, index):
|
||||||
self.countHints()
|
self.countHints()
|
||||||
|
|
||||||
def op_vstemhm(self, index):
|
def op_vstemhm(self, index):
|
||||||
self.countHints()
|
self.countHints()
|
||||||
|
|
||||||
@ -369,46 +408,67 @@ class SimpleT2Decompiler(object):
|
|||||||
# misc
|
# misc
|
||||||
def op_and(self, index):
|
def op_and(self, index):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def op_or(self, index):
|
def op_or(self, index):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def op_not(self, index):
|
def op_not(self, index):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def op_store(self, index):
|
def op_store(self, index):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def op_abs(self, index):
|
def op_abs(self, index):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def op_add(self, index):
|
def op_add(self, index):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def op_sub(self, index):
|
def op_sub(self, index):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def op_div(self, index):
|
def op_div(self, index):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def op_load(self, index):
|
def op_load(self, index):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def op_neg(self, index):
|
def op_neg(self, index):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def op_eq(self, index):
|
def op_eq(self, index):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def op_drop(self, index):
|
def op_drop(self, index):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def op_put(self, index):
|
def op_put(self, index):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def op_get(self, index):
|
def op_get(self, index):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def op_ifelse(self, index):
|
def op_ifelse(self, index):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def op_random(self, index):
|
def op_random(self, index):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def op_mul(self, index):
|
def op_mul(self, index):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def op_sqrt(self, index):
|
def op_sqrt(self, index):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def op_dup(self, index):
|
def op_dup(self, index):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def op_exch(self, index):
|
def op_exch(self, index):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def op_index(self, index):
|
def op_index(self, index):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def op_roll(self, index):
|
def op_roll(self, index):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
@ -418,7 +478,9 @@ class SimpleT2Decompiler(object):
|
|||||||
numBlends = self.pop()
|
numBlends = self.pop()
|
||||||
numOps = numBlends * (self.numRegions + 1)
|
numOps = numBlends * (self.numRegions + 1)
|
||||||
if self.blender is None:
|
if self.blender is None:
|
||||||
del self.operandStack[-(numOps-numBlends):] # Leave the default operands on the stack.
|
del self.operandStack[
|
||||||
|
-(numOps - numBlends) :
|
||||||
|
] # Leave the default operands on the stack.
|
||||||
else:
|
else:
|
||||||
argi = len(self.operandStack) - numOps
|
argi = len(self.operandStack) - numOps
|
||||||
end_args = tuplei = argi + numBlends
|
end_args = tuplei = argi + numBlends
|
||||||
@ -439,37 +501,44 @@ class SimpleT2Decompiler(object):
|
|||||||
|
|
||||||
t1Operators = [
|
t1Operators = [
|
||||||
# opcode name
|
# opcode name
|
||||||
(1, 'hstem'),
|
(1, "hstem"),
|
||||||
(3, 'vstem'),
|
(3, "vstem"),
|
||||||
(4, 'vmoveto'),
|
(4, "vmoveto"),
|
||||||
(5, 'rlineto'),
|
(5, "rlineto"),
|
||||||
(6, 'hlineto'),
|
(6, "hlineto"),
|
||||||
(7, 'vlineto'),
|
(7, "vlineto"),
|
||||||
(8, 'rrcurveto'),
|
(8, "rrcurveto"),
|
||||||
(9, 'closepath'),
|
(9, "closepath"),
|
||||||
(10, 'callsubr'),
|
(10, "callsubr"),
|
||||||
(11, 'return'),
|
(11, "return"),
|
||||||
(13, 'hsbw'),
|
(13, "hsbw"),
|
||||||
(14, 'endchar'),
|
(14, "endchar"),
|
||||||
(21, 'rmoveto'),
|
(21, "rmoveto"),
|
||||||
(22, 'hmoveto'),
|
(22, "hmoveto"),
|
||||||
(30, 'vhcurveto'),
|
(30, "vhcurveto"),
|
||||||
(31, 'hvcurveto'),
|
(31, "hvcurveto"),
|
||||||
((12, 0), 'dotsection'),
|
((12, 0), "dotsection"),
|
||||||
((12, 1), 'vstem3'),
|
((12, 1), "vstem3"),
|
||||||
((12, 2), 'hstem3'),
|
((12, 2), "hstem3"),
|
||||||
((12, 6), 'seac'),
|
((12, 6), "seac"),
|
||||||
((12, 7), 'sbw'),
|
((12, 7), "sbw"),
|
||||||
((12, 12), 'div'),
|
((12, 12), "div"),
|
||||||
((12, 16), 'callothersubr'),
|
((12, 16), "callothersubr"),
|
||||||
((12, 17), 'pop'),
|
((12, 17), "pop"),
|
||||||
((12, 33), 'setcurrentpoint'),
|
((12, 33), "setcurrentpoint"),
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
class T2WidthExtractor(SimpleT2Decompiler):
|
class T2WidthExtractor(SimpleT2Decompiler):
|
||||||
|
def __init__(
|
||||||
def __init__(self, localSubrs, globalSubrs, nominalWidthX, defaultWidthX, private=None, blender=None):
|
self,
|
||||||
|
localSubrs,
|
||||||
|
globalSubrs,
|
||||||
|
nominalWidthX,
|
||||||
|
defaultWidthX,
|
||||||
|
private=None,
|
||||||
|
blender=None,
|
||||||
|
):
|
||||||
SimpleT2Decompiler.__init__(self, localSubrs, globalSubrs, private, blender)
|
SimpleT2Decompiler.__init__(self, localSubrs, globalSubrs, private, blender)
|
||||||
self.nominalWidthX = nominalWidthX
|
self.nominalWidthX = nominalWidthX
|
||||||
self.defaultWidthX = defaultWidthX
|
self.defaultWidthX = defaultWidthX
|
||||||
@ -484,7 +553,9 @@ class T2WidthExtractor(SimpleT2Decompiler):
|
|||||||
if not self.gotWidth:
|
if not self.gotWidth:
|
||||||
if evenOdd ^ (len(args) % 2):
|
if evenOdd ^ (len(args) % 2):
|
||||||
# For CFF2 charstrings, this should never happen
|
# For CFF2 charstrings, this should never happen
|
||||||
assert self.defaultWidthX is not None, "CFF2 CharStrings must not have an initial width value"
|
assert (
|
||||||
|
self.defaultWidthX is not None
|
||||||
|
), "CFF2 CharStrings must not have an initial width value"
|
||||||
self.width = self.nominalWidthX + args[0]
|
self.width = self.nominalWidthX + args[0]
|
||||||
args = args[1:]
|
args = args[1:]
|
||||||
else:
|
else:
|
||||||
@ -510,10 +581,25 @@ class T2WidthExtractor(SimpleT2Decompiler):
|
|||||||
|
|
||||||
|
|
||||||
class T2OutlineExtractor(T2WidthExtractor):
|
class T2OutlineExtractor(T2WidthExtractor):
|
||||||
|
def __init__(
|
||||||
def __init__(self, pen, localSubrs, globalSubrs, nominalWidthX, defaultWidthX, private=None, blender=None):
|
self,
|
||||||
|
pen,
|
||||||
|
localSubrs,
|
||||||
|
globalSubrs,
|
||||||
|
nominalWidthX,
|
||||||
|
defaultWidthX,
|
||||||
|
private=None,
|
||||||
|
blender=None,
|
||||||
|
):
|
||||||
T2WidthExtractor.__init__(
|
T2WidthExtractor.__init__(
|
||||||
self, localSubrs, globalSubrs, nominalWidthX, defaultWidthX, private, blender)
|
self,
|
||||||
|
localSubrs,
|
||||||
|
globalSubrs,
|
||||||
|
nominalWidthX,
|
||||||
|
defaultWidthX,
|
||||||
|
private,
|
||||||
|
blender,
|
||||||
|
)
|
||||||
self.pen = pen
|
self.pen = pen
|
||||||
self.subrLevel = 0
|
self.subrLevel = 0
|
||||||
|
|
||||||
@ -586,17 +672,21 @@ class T2OutlineExtractor(T2WidthExtractor):
|
|||||||
def op_rmoveto(self, index):
|
def op_rmoveto(self, index):
|
||||||
self.endPath()
|
self.endPath()
|
||||||
self.rMoveTo(self.popallWidth())
|
self.rMoveTo(self.popallWidth())
|
||||||
|
|
||||||
def op_hmoveto(self, index):
|
def op_hmoveto(self, index):
|
||||||
self.endPath()
|
self.endPath()
|
||||||
self.rMoveTo((self.popallWidth(1)[0], 0))
|
self.rMoveTo((self.popallWidth(1)[0], 0))
|
||||||
|
|
||||||
def op_vmoveto(self, index):
|
def op_vmoveto(self, index):
|
||||||
self.endPath()
|
self.endPath()
|
||||||
self.rMoveTo((0, self.popallWidth(1)[0]))
|
self.rMoveTo((0, self.popallWidth(1)[0]))
|
||||||
|
|
||||||
def op_endchar(self, index):
|
def op_endchar(self, index):
|
||||||
self.endPath()
|
self.endPath()
|
||||||
args = self.popallWidth()
|
args = self.popallWidth()
|
||||||
if args:
|
if args:
|
||||||
from fontTools.encodings.StandardEncoding import StandardEncoding
|
from fontTools.encodings.StandardEncoding import StandardEncoding
|
||||||
|
|
||||||
# endchar can do seac accent bulding; The T2 spec says it's deprecated,
|
# endchar can do seac accent bulding; The T2 spec says it's deprecated,
|
||||||
# but recent software that shall remain nameless does output it.
|
# but recent software that shall remain nameless does output it.
|
||||||
adx, ady, bchar, achar = args
|
adx, ady, bchar, achar = args
|
||||||
@ -616,6 +706,7 @@ class T2OutlineExtractor(T2WidthExtractor):
|
|||||||
|
|
||||||
def op_hlineto(self, index):
|
def op_hlineto(self, index):
|
||||||
self.alternatingLineto(1)
|
self.alternatingLineto(1)
|
||||||
|
|
||||||
def op_vlineto(self, index):
|
def op_vlineto(self, index):
|
||||||
self.alternatingLineto(0)
|
self.alternatingLineto(0)
|
||||||
|
|
||||||
@ -626,7 +717,14 @@ class T2OutlineExtractor(T2WidthExtractor):
|
|||||||
"""{dxa dya dxb dyb dxc dyc}+ rrcurveto"""
|
"""{dxa dya dxb dyb dxc dyc}+ rrcurveto"""
|
||||||
args = self.popall()
|
args = self.popall()
|
||||||
for i in range(0, len(args), 6):
|
for i in range(0, len(args), 6):
|
||||||
dxa, dya, dxb, dyb, dxc, dyc, = args[i:i+6]
|
(
|
||||||
|
dxa,
|
||||||
|
dya,
|
||||||
|
dxb,
|
||||||
|
dyb,
|
||||||
|
dxc,
|
||||||
|
dyc,
|
||||||
|
) = args[i : i + 6]
|
||||||
self.rCurveTo((dxa, dya), (dxb, dyb), (dxc, dyc))
|
self.rCurveTo((dxa, dya), (dxb, dyb), (dxc, dyc))
|
||||||
|
|
||||||
def op_rcurveline(self, index):
|
def op_rcurveline(self, index):
|
||||||
@ -701,10 +799,12 @@ class T2OutlineExtractor(T2WidthExtractor):
|
|||||||
dy5 = -dy2
|
dy5 = -dy2
|
||||||
self.rCurveTo((dx1, dy1), (dx2, dy2), (dx3, dy3))
|
self.rCurveTo((dx1, dy1), (dx2, dy2), (dx3, dy3))
|
||||||
self.rCurveTo((dx4, dy4), (dx5, dy5), (dx6, dy6))
|
self.rCurveTo((dx4, dy4), (dx5, dy5), (dx6, dy6))
|
||||||
|
|
||||||
def op_flex(self, index):
|
def op_flex(self, index):
|
||||||
dx1, dy1, dx2, dy2, dx3, dy3, dx4, dy4, dx5, dy5, dx6, dy6, fd = self.popall()
|
dx1, dy1, dx2, dy2, dx3, dy3, dx4, dy4, dx5, dy5, dx6, dy6, fd = self.popall()
|
||||||
self.rCurveTo((dx1, dy1), (dx2, dy2), (dx3, dy3))
|
self.rCurveTo((dx1, dy1), (dx2, dy2), (dx3, dy3))
|
||||||
self.rCurveTo((dx4, dy4), (dx5, dy5), (dx6, dy6))
|
self.rCurveTo((dx4, dy4), (dx5, dy5), (dx6, dy6))
|
||||||
|
|
||||||
def op_hflex1(self, index):
|
def op_hflex1(self, index):
|
||||||
dx1, dy1, dx2, dy2, dx3, dx4, dx5, dy5, dx6 = self.popall()
|
dx1, dy1, dx2, dy2, dx3, dx4, dx5, dy5, dx6 = self.popall()
|
||||||
dy3 = dy4 = 0
|
dy3 = dy4 = 0
|
||||||
@ -712,6 +812,7 @@ class T2OutlineExtractor(T2WidthExtractor):
|
|||||||
|
|
||||||
self.rCurveTo((dx1, dy1), (dx2, dy2), (dx3, dy3))
|
self.rCurveTo((dx1, dy1), (dx2, dy2), (dx3, dy3))
|
||||||
self.rCurveTo((dx4, dy4), (dx5, dy5), (dx6, dy6))
|
self.rCurveTo((dx4, dy4), (dx5, dy5), (dx6, dy6))
|
||||||
|
|
||||||
def op_flex1(self, index):
|
def op_flex1(self, index):
|
||||||
dx1, dy1, dx2, dy2, dx3, dy3, dx4, dy4, dx5, dy5, d6 = self.popall()
|
dx1, dy1, dx2, dy2, dx3, dy3, dx4, dy4, dx5, dy5, d6 = self.popall()
|
||||||
dx = dx1 + dx2 + dx3 + dx4 + dx5
|
dx = dx1 + dx2 + dx3 + dx4 + dx5
|
||||||
@ -728,18 +829,25 @@ class T2OutlineExtractor(T2WidthExtractor):
|
|||||||
# misc
|
# misc
|
||||||
def op_and(self, index):
|
def op_and(self, index):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def op_or(self, index):
|
def op_or(self, index):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def op_not(self, index):
|
def op_not(self, index):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def op_store(self, index):
|
def op_store(self, index):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def op_abs(self, index):
|
def op_abs(self, index):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def op_add(self, index):
|
def op_add(self, index):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def op_sub(self, index):
|
def op_sub(self, index):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def op_div(self, index):
|
def op_div(self, index):
|
||||||
num2 = self.pop()
|
num2 = self.pop()
|
||||||
num1 = self.pop()
|
num1 = self.pop()
|
||||||
@ -749,32 +857,46 @@ class T2OutlineExtractor(T2WidthExtractor):
|
|||||||
self.push(d1)
|
self.push(d1)
|
||||||
else:
|
else:
|
||||||
self.push(d2)
|
self.push(d2)
|
||||||
|
|
||||||
def op_load(self, index):
|
def op_load(self, index):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def op_neg(self, index):
|
def op_neg(self, index):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def op_eq(self, index):
|
def op_eq(self, index):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def op_drop(self, index):
|
def op_drop(self, index):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def op_put(self, index):
|
def op_put(self, index):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def op_get(self, index):
|
def op_get(self, index):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def op_ifelse(self, index):
|
def op_ifelse(self, index):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def op_random(self, index):
|
def op_random(self, index):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def op_mul(self, index):
|
def op_mul(self, index):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def op_sqrt(self, index):
|
def op_sqrt(self, index):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def op_dup(self, index):
|
def op_dup(self, index):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def op_exch(self, index):
|
def op_exch(self, index):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def op_index(self, index):
|
def op_index(self, index):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def op_roll(self, index):
|
def op_roll(self, index):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
@ -813,8 +935,8 @@ class T2OutlineExtractor(T2WidthExtractor):
|
|||||||
self.rCurveTo((dxa, 0), (dxb, dyb), (dxc, dyc))
|
self.rCurveTo((dxa, 0), (dxb, dyb), (dxc, dyc))
|
||||||
return args
|
return args
|
||||||
|
|
||||||
class T1OutlineExtractor(T2OutlineExtractor):
|
|
||||||
|
|
||||||
|
class T1OutlineExtractor(T2OutlineExtractor):
|
||||||
def __init__(self, pen, subrs):
|
def __init__(self, pen, subrs):
|
||||||
self.pen = pen
|
self.pen = pen
|
||||||
self.subrs = subrs
|
self.subrs = subrs
|
||||||
@ -846,6 +968,7 @@ class T1OutlineExtractor(T2OutlineExtractor):
|
|||||||
return
|
return
|
||||||
self.endPath()
|
self.endPath()
|
||||||
self.rMoveTo(self.popall())
|
self.rMoveTo(self.popall())
|
||||||
|
|
||||||
def op_hmoveto(self, index):
|
def op_hmoveto(self, index):
|
||||||
if self.flexing:
|
if self.flexing:
|
||||||
# We must add a parameter to the stack if we are flexing
|
# We must add a parameter to the stack if we are flexing
|
||||||
@ -853,6 +976,7 @@ class T1OutlineExtractor(T2OutlineExtractor):
|
|||||||
return
|
return
|
||||||
self.endPath()
|
self.endPath()
|
||||||
self.rMoveTo((self.popall()[0], 0))
|
self.rMoveTo((self.popall()[0], 0))
|
||||||
|
|
||||||
def op_vmoveto(self, index):
|
def op_vmoveto(self, index):
|
||||||
if self.flexing:
|
if self.flexing:
|
||||||
# We must add a parameter to the stack if we are flexing
|
# We must add a parameter to the stack if we are flexing
|
||||||
@ -861,8 +985,10 @@ class T1OutlineExtractor(T2OutlineExtractor):
|
|||||||
return
|
return
|
||||||
self.endPath()
|
self.endPath()
|
||||||
self.rMoveTo((0, self.popall()[0]))
|
self.rMoveTo((0, self.popall()[0]))
|
||||||
|
|
||||||
def op_closepath(self, index):
|
def op_closepath(self, index):
|
||||||
self.closePath()
|
self.closePath()
|
||||||
|
|
||||||
def op_setcurrentpoint(self, index):
|
def op_setcurrentpoint(self, index):
|
||||||
args = self.popall()
|
args = self.popall()
|
||||||
x, y = args
|
x, y = args
|
||||||
@ -876,6 +1002,7 @@ class T1OutlineExtractor(T2OutlineExtractor):
|
|||||||
self.width = wx
|
self.width = wx
|
||||||
self.sbx = sbx
|
self.sbx = sbx
|
||||||
self.currentPoint = sbx, self.currentPoint[1]
|
self.currentPoint = sbx, self.currentPoint[1]
|
||||||
|
|
||||||
def op_sbw(self, index):
|
def op_sbw(self, index):
|
||||||
self.popall() # XXX
|
self.popall() # XXX
|
||||||
|
|
||||||
@ -884,6 +1011,7 @@ class T1OutlineExtractor(T2OutlineExtractor):
|
|||||||
subrIndex = self.pop()
|
subrIndex = self.pop()
|
||||||
subr = self.subrs[subrIndex]
|
subr = self.subrs[subrIndex]
|
||||||
self.execute(subr)
|
self.execute(subr)
|
||||||
|
|
||||||
def op_callothersubr(self, index):
|
def op_callothersubr(self, index):
|
||||||
subrIndex = self.pop()
|
subrIndex = self.pop()
|
||||||
nArgs = self.pop()
|
nArgs = self.pop()
|
||||||
@ -894,6 +1022,7 @@ class T1OutlineExtractor(T2OutlineExtractor):
|
|||||||
elif subrIndex == 1 and nArgs == 0:
|
elif subrIndex == 1 and nArgs == 0:
|
||||||
self.flexing = 1
|
self.flexing = 1
|
||||||
# ignore...
|
# ignore...
|
||||||
|
|
||||||
def op_pop(self, index):
|
def op_pop(self, index):
|
||||||
pass # ignore...
|
pass # ignore...
|
||||||
|
|
||||||
@ -941,20 +1070,25 @@ class T1OutlineExtractor(T2OutlineExtractor):
|
|||||||
|
|
||||||
def op_dotsection(self, index):
|
def op_dotsection(self, index):
|
||||||
self.popall() # XXX
|
self.popall() # XXX
|
||||||
|
|
||||||
def op_hstem3(self, index):
|
def op_hstem3(self, index):
|
||||||
self.popall() # XXX
|
self.popall() # XXX
|
||||||
|
|
||||||
def op_seac(self, index):
|
def op_seac(self, index):
|
||||||
"asb adx ady bchar achar seac"
|
"asb adx ady bchar achar seac"
|
||||||
from fontTools.encodings.StandardEncoding import StandardEncoding
|
from fontTools.encodings.StandardEncoding import StandardEncoding
|
||||||
|
|
||||||
asb, adx, ady, bchar, achar = self.popall()
|
asb, adx, ady, bchar, achar = self.popall()
|
||||||
baseGlyph = StandardEncoding[bchar]
|
baseGlyph = StandardEncoding[bchar]
|
||||||
self.pen.addComponent(baseGlyph, (1, 0, 0, 1, 0, 0))
|
self.pen.addComponent(baseGlyph, (1, 0, 0, 1, 0, 0))
|
||||||
accentGlyph = StandardEncoding[achar]
|
accentGlyph = StandardEncoding[achar]
|
||||||
adx = adx + self.sbx - asb # seac weirdness
|
adx = adx + self.sbx - asb # seac weirdness
|
||||||
self.pen.addComponent(accentGlyph, (1, 0, 0, 1, adx, ady))
|
self.pen.addComponent(accentGlyph, (1, 0, 0, 1, adx, ady))
|
||||||
|
|
||||||
def op_vstem3(self, index):
|
def op_vstem3(self, index):
|
||||||
self.popall() # XXX
|
self.popall() # XXX
|
||||||
|
|
||||||
|
|
||||||
class T2CharString(object):
|
class T2CharString(object):
|
||||||
|
|
||||||
operandEncoding = t2OperandEncoding
|
operandEncoding = t2OperandEncoding
|
||||||
@ -973,11 +1107,11 @@ class T2CharString(object):
|
|||||||
|
|
||||||
def getNumRegions(self, vsindex=None):
|
def getNumRegions(self, vsindex=None):
|
||||||
pd = self.private
|
pd = self.private
|
||||||
assert(pd is not None)
|
assert pd is not None
|
||||||
if vsindex is not None:
|
if vsindex is not None:
|
||||||
self._cur_vsindex = vsindex
|
self._cur_vsindex = vsindex
|
||||||
elif self._cur_vsindex is None:
|
elif self._cur_vsindex is None:
|
||||||
self._cur_vsindex = pd.vsindex if hasattr(pd, 'vsindex') else 0
|
self._cur_vsindex = pd.vsindex if hasattr(pd, "vsindex") else 0
|
||||||
return pd.getNumRegions(self._cur_vsindex)
|
return pd.getNumRegions(self._cur_vsindex)
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
@ -1001,9 +1135,15 @@ class T2CharString(object):
|
|||||||
|
|
||||||
def draw(self, pen, blender=None):
|
def draw(self, pen, blender=None):
|
||||||
subrs = getattr(self.private, "Subrs", [])
|
subrs = getattr(self.private, "Subrs", [])
|
||||||
extractor = self.outlineExtractor(pen, subrs, self.globalSubrs,
|
extractor = self.outlineExtractor(
|
||||||
self.private.nominalWidthX, self.private.defaultWidthX,
|
pen,
|
||||||
self.private, blender)
|
subrs,
|
||||||
|
self.globalSubrs,
|
||||||
|
self.private.nominalWidthX,
|
||||||
|
self.private.defaultWidthX,
|
||||||
|
self.private,
|
||||||
|
blender,
|
||||||
|
)
|
||||||
extractor.execute(self)
|
extractor.execute(self)
|
||||||
self.width = extractor.width
|
self.width = extractor.width
|
||||||
|
|
||||||
@ -1040,7 +1180,7 @@ class T2CharString(object):
|
|||||||
bytecode.extend(bytechr(b) for b in opcodes[token])
|
bytecode.extend(bytechr(b) for b in opcodes[token])
|
||||||
except KeyError:
|
except KeyError:
|
||||||
raise CharStringCompileError("illegal operator: %s" % token)
|
raise CharStringCompileError("illegal operator: %s" % token)
|
||||||
if token in ('hintmask', 'cntrmask'):
|
if token in ("hintmask", "cntrmask"):
|
||||||
bytecode.append(program[i]) # hint mask
|
bytecode.append(program[i]) # hint mask
|
||||||
i = i + 1
|
i = i + 1
|
||||||
elif isinstance(token, int):
|
elif isinstance(token, int):
|
||||||
@ -1067,8 +1207,7 @@ class T2CharString(object):
|
|||||||
self.bytecode = bytecode
|
self.bytecode = bytecode
|
||||||
self.program = None
|
self.program = None
|
||||||
|
|
||||||
def getToken(self, index,
|
def getToken(self, index, len=len, byteord=byteord, isinstance=isinstance):
|
||||||
len=len, byteord=byteord, isinstance=isinstance):
|
|
||||||
if self.bytecode is not None:
|
if self.bytecode is not None:
|
||||||
if index >= len(self.bytecode):
|
if index >= len(self.bytecode):
|
||||||
return None, 0, 0
|
return None, 0, 0
|
||||||
@ -1100,6 +1239,7 @@ class T2CharString(object):
|
|||||||
|
|
||||||
def toXML(self, xmlWriter, ttFont=None):
|
def toXML(self, xmlWriter, ttFont=None):
|
||||||
from fontTools.misc.textTools import num2binary
|
from fontTools.misc.textTools import num2binary
|
||||||
|
|
||||||
if self.bytecode is not None:
|
if self.bytecode is not None:
|
||||||
xmlWriter.dumphex(self.bytecode)
|
xmlWriter.dumphex(self.bytecode)
|
||||||
else:
|
else:
|
||||||
@ -1110,15 +1250,15 @@ class T2CharString(object):
|
|||||||
if token is None:
|
if token is None:
|
||||||
break
|
break
|
||||||
if isOperator:
|
if isOperator:
|
||||||
if token in ('hintmask', 'cntrmask'):
|
if token in ("hintmask", "cntrmask"):
|
||||||
hintMask, isOperator, index = self.getToken(index)
|
hintMask, isOperator, index = self.getToken(index)
|
||||||
bits = []
|
bits = []
|
||||||
for byte in hintMask:
|
for byte in hintMask:
|
||||||
bits.append(num2binary(byteord(byte), 8))
|
bits.append(num2binary(byteord(byte), 8))
|
||||||
hintMask = strjoin(bits)
|
hintMask = strjoin(bits)
|
||||||
line = ' '.join(args + [token, hintMask])
|
line = " ".join(args + [token, hintMask])
|
||||||
else:
|
else:
|
||||||
line = ' '.join(args + [token])
|
line = " ".join(args + [token])
|
||||||
xmlWriter.write(line)
|
xmlWriter.write(line)
|
||||||
xmlWriter.newline()
|
xmlWriter.newline()
|
||||||
args = []
|
args = []
|
||||||
@ -1132,11 +1272,12 @@ class T2CharString(object):
|
|||||||
# NOTE: only CFF2 charstrings/subrs can have numeric arguments on
|
# NOTE: only CFF2 charstrings/subrs can have numeric arguments on
|
||||||
# the stack after the last operator. Compiling this would fail if
|
# the stack after the last operator. Compiling this would fail if
|
||||||
# this is part of CFF 1.0 table.
|
# this is part of CFF 1.0 table.
|
||||||
line = ' '.join(args)
|
line = " ".join(args)
|
||||||
xmlWriter.write(line)
|
xmlWriter.write(line)
|
||||||
|
|
||||||
def fromXML(self, name, attrs, content):
|
def fromXML(self, name, attrs, content):
|
||||||
from fontTools.misc.textTools import binary2num, readHex
|
from fontTools.misc.textTools import binary2num, readHex
|
||||||
|
|
||||||
if attrs.get("raw"):
|
if attrs.get("raw"):
|
||||||
self.setBytecode(readHex(content))
|
self.setBytecode(readHex(content))
|
||||||
return
|
return
|
||||||
@ -1155,7 +1296,7 @@ class T2CharString(object):
|
|||||||
token = strToFixedToFloat(token, precisionBits=16)
|
token = strToFixedToFloat(token, precisionBits=16)
|
||||||
except ValueError:
|
except ValueError:
|
||||||
program.append(token)
|
program.append(token)
|
||||||
if token in ('hintmask', 'cntrmask'):
|
if token in ("hintmask", "cntrmask"):
|
||||||
mask = content[i]
|
mask = content[i]
|
||||||
maskBytes = b""
|
maskBytes = b""
|
||||||
for j in range(0, len(mask), 8):
|
for j in range(0, len(mask), 8):
|
||||||
@ -1168,6 +1309,7 @@ class T2CharString(object):
|
|||||||
program.append(token)
|
program.append(token)
|
||||||
self.setProgram(program)
|
self.setProgram(program)
|
||||||
|
|
||||||
|
|
||||||
class T1CharString(T2CharString):
|
class T1CharString(T2CharString):
|
||||||
|
|
||||||
operandEncoding = t1OperandEncoding
|
operandEncoding = t1OperandEncoding
|
||||||
@ -1201,6 +1343,7 @@ class T1CharString(T2CharString):
|
|||||||
extractor.execute(self)
|
extractor.execute(self)
|
||||||
self.width = extractor.width
|
self.width = extractor.width
|
||||||
|
|
||||||
|
|
||||||
class DictDecompiler(object):
|
class DictDecompiler(object):
|
||||||
|
|
||||||
operandEncoding = cffDictOperandEncoding
|
operandEncoding = cffDictOperandEncoding
|
||||||
@ -1226,6 +1369,7 @@ class DictDecompiler(object):
|
|||||||
value, index = handler(self, b0, data, index)
|
value, index = handler(self, b0, data, index)
|
||||||
if value is not None:
|
if value is not None:
|
||||||
push(value)
|
push(value)
|
||||||
|
|
||||||
def pop(self):
|
def pop(self):
|
||||||
value = self.stack[-1]
|
value = self.stack[-1]
|
||||||
del self.stack[-1]
|
del self.stack[-1]
|
||||||
@ -1270,8 +1414,10 @@ class DictDecompiler(object):
|
|||||||
|
|
||||||
def arg_SID(self, name):
|
def arg_SID(self, name):
|
||||||
return self.strings[self.pop()]
|
return self.strings[self.pop()]
|
||||||
|
|
||||||
def arg_array(self, name):
|
def arg_array(self, name):
|
||||||
return self.popall()
|
return self.popall()
|
||||||
|
|
||||||
def arg_blendList(self, name):
|
def arg_blendList(self, name):
|
||||||
"""
|
"""
|
||||||
There may be non-blend args at the top of the stack. We first calculate
|
There may be non-blend args at the top of the stack. We first calculate
|
||||||
@ -1284,13 +1430,15 @@ class DictDecompiler(object):
|
|||||||
We re-arrange this to be a list of numMaster entries. Each entry starts with the corresponding default font relative value, and is followed by
|
We re-arrange this to be a list of numMaster entries. Each entry starts with the corresponding default font relative value, and is followed by
|
||||||
the delta values. We then convert the default values, the first item in each entry, to an absolute value.
|
the delta values. We then convert the default values, the first item in each entry, to an absolute value.
|
||||||
"""
|
"""
|
||||||
vsindex = self.dict.get('vsindex', 0)
|
vsindex = self.dict.get("vsindex", 0)
|
||||||
numMasters = self.parent.getNumRegions(vsindex) + 1 # only a PrivateDict has blended ops.
|
numMasters = (
|
||||||
|
self.parent.getNumRegions(vsindex) + 1
|
||||||
|
) # only a PrivateDict has blended ops.
|
||||||
numBlends = self.pop()
|
numBlends = self.pop()
|
||||||
args = self.popall()
|
args = self.popall()
|
||||||
numArgs = len(args)
|
numArgs = len(args)
|
||||||
# The spec says that there should be no non-blended Blue Values,.
|
# The spec says that there should be no non-blended Blue Values,.
|
||||||
assert(numArgs == numMasters * numBlends)
|
assert numArgs == numMasters * numBlends
|
||||||
value = [None] * numBlends
|
value = [None] * numBlends
|
||||||
numDeltas = numMasters - 1
|
numDeltas = numMasters - 1
|
||||||
i = 0
|
i = 0
|
||||||
|
@ -24,7 +24,7 @@ import logging
|
|||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
ps_special = b'()<>[]{}%' # / is one too, but we take care of that one differently
|
ps_special = b"()<>[]{}%" # / is one too, but we take care of that one differently
|
||||||
|
|
||||||
skipwhiteRE = re.compile(bytesjoin([b"[", whitespace, b"]*"]))
|
skipwhiteRE = re.compile(bytesjoin([b"[", whitespace, b"]*"]))
|
||||||
endofthingPat = bytesjoin([b"[^][(){}<>/%", whitespace, b"]*"])
|
endofthingPat = bytesjoin([b"[^][(){}<>/%", whitespace, b"]*"])
|
||||||
@ -32,7 +32,7 @@ endofthingRE = re.compile(endofthingPat)
|
|||||||
commentRE = re.compile(b"%[^\n\r]*")
|
commentRE = re.compile(b"%[^\n\r]*")
|
||||||
|
|
||||||
# XXX This not entirely correct as it doesn't allow *nested* embedded parens:
|
# XXX This not entirely correct as it doesn't allow *nested* embedded parens:
|
||||||
stringPat = br"""
|
stringPat = rb"""
|
||||||
\(
|
\(
|
||||||
(
|
(
|
||||||
(
|
(
|
||||||
@ -51,13 +51,17 @@ stringRE = re.compile(stringPat)
|
|||||||
|
|
||||||
hexstringRE = re.compile(bytesjoin([b"<[", whitespace, b"0-9A-Fa-f]*>"]))
|
hexstringRE = re.compile(bytesjoin([b"<[", whitespace, b"0-9A-Fa-f]*>"]))
|
||||||
|
|
||||||
class PSTokenError(Exception): pass
|
|
||||||
class PSError(Exception): pass
|
class PSTokenError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class PSError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
class PSTokenizer(object):
|
class PSTokenizer(object):
|
||||||
|
def __init__(self, buf=b"", encoding="ascii"):
|
||||||
def __init__(self, buf=b'', encoding="ascii"):
|
|
||||||
# Force self.buf to be a byte string
|
# Force self.buf to be a byte string
|
||||||
buf = tobytes(buf)
|
buf = tobytes(buf)
|
||||||
self.buf = buf
|
self.buf = buf
|
||||||
@ -86,14 +90,16 @@ class PSTokenizer(object):
|
|||||||
self.closed = True
|
self.closed = True
|
||||||
del self.buf, self.pos
|
del self.buf, self.pos
|
||||||
|
|
||||||
def getnexttoken(self,
|
def getnexttoken(
|
||||||
|
self,
|
||||||
# localize some stuff, for performance
|
# localize some stuff, for performance
|
||||||
len=len,
|
len=len,
|
||||||
ps_special=ps_special,
|
ps_special=ps_special,
|
||||||
stringmatch=stringRE.match,
|
stringmatch=stringRE.match,
|
||||||
hexstringmatch=hexstringRE.match,
|
hexstringmatch=hexstringRE.match,
|
||||||
commentmatch=commentRE.match,
|
commentmatch=commentRE.match,
|
||||||
endmatch=endofthingRE.match):
|
endmatch=endofthingRE.match,
|
||||||
|
):
|
||||||
|
|
||||||
self.skipwhite()
|
self.skipwhite()
|
||||||
if self.pos >= self.len:
|
if self.pos >= self.len:
|
||||||
@ -102,38 +108,38 @@ class PSTokenizer(object):
|
|||||||
buf = self.buf
|
buf = self.buf
|
||||||
char = bytechr(byteord(buf[pos]))
|
char = bytechr(byteord(buf[pos]))
|
||||||
if char in ps_special:
|
if char in ps_special:
|
||||||
if char in b'{}[]':
|
if char in b"{}[]":
|
||||||
tokentype = 'do_special'
|
tokentype = "do_special"
|
||||||
token = char
|
token = char
|
||||||
elif char == b'%':
|
elif char == b"%":
|
||||||
tokentype = 'do_comment'
|
tokentype = "do_comment"
|
||||||
_, nextpos = commentmatch(buf, pos).span()
|
_, nextpos = commentmatch(buf, pos).span()
|
||||||
token = buf[pos:nextpos]
|
token = buf[pos:nextpos]
|
||||||
elif char == b'(':
|
elif char == b"(":
|
||||||
tokentype = 'do_string'
|
tokentype = "do_string"
|
||||||
m = stringmatch(buf, pos)
|
m = stringmatch(buf, pos)
|
||||||
if m is None:
|
if m is None:
|
||||||
raise PSTokenError('bad string at character %d' % pos)
|
raise PSTokenError("bad string at character %d" % pos)
|
||||||
_, nextpos = m.span()
|
_, nextpos = m.span()
|
||||||
token = buf[pos:nextpos]
|
token = buf[pos:nextpos]
|
||||||
elif char == b'<':
|
elif char == b"<":
|
||||||
tokentype = 'do_hexstring'
|
tokentype = "do_hexstring"
|
||||||
m = hexstringmatch(buf, pos)
|
m = hexstringmatch(buf, pos)
|
||||||
if m is None:
|
if m is None:
|
||||||
raise PSTokenError('bad hexstring at character %d' % pos)
|
raise PSTokenError("bad hexstring at character %d" % pos)
|
||||||
_, nextpos = m.span()
|
_, nextpos = m.span()
|
||||||
token = buf[pos:nextpos]
|
token = buf[pos:nextpos]
|
||||||
else:
|
else:
|
||||||
raise PSTokenError('bad token at character %d' % pos)
|
raise PSTokenError("bad token at character %d" % pos)
|
||||||
else:
|
else:
|
||||||
if char == b'/':
|
if char == b"/":
|
||||||
tokentype = 'do_literal'
|
tokentype = "do_literal"
|
||||||
m = endmatch(buf, pos + 1)
|
m = endmatch(buf, pos + 1)
|
||||||
else:
|
else:
|
||||||
tokentype = ''
|
tokentype = ""
|
||||||
m = endmatch(buf, pos)
|
m = endmatch(buf, pos)
|
||||||
if m is None:
|
if m is None:
|
||||||
raise PSTokenError('bad token at character %d' % pos)
|
raise PSTokenError("bad token at character %d" % pos)
|
||||||
_, nextpos = m.span()
|
_, nextpos = m.span()
|
||||||
token = buf[pos:nextpos]
|
token = buf[pos:nextpos]
|
||||||
self.pos = pos + len(token)
|
self.pos = pos + len(token)
|
||||||
@ -152,14 +158,13 @@ class PSTokenizer(object):
|
|||||||
self.pos = 4
|
self.pos = 4
|
||||||
|
|
||||||
def stopeexec(self):
|
def stopeexec(self):
|
||||||
if not hasattr(self, 'dirtybuf'):
|
if not hasattr(self, "dirtybuf"):
|
||||||
return
|
return
|
||||||
self.buf = self.dirtybuf
|
self.buf = self.dirtybuf
|
||||||
del self.dirtybuf
|
del self.dirtybuf
|
||||||
|
|
||||||
|
|
||||||
class PSInterpreter(PSOperators):
|
class PSInterpreter(PSOperators):
|
||||||
|
|
||||||
def __init__(self, encoding="ascii"):
|
def __init__(self, encoding="ascii"):
|
||||||
systemdict = {}
|
systemdict = {}
|
||||||
userdict = {}
|
userdict = {}
|
||||||
@ -172,18 +177,18 @@ class PSInterpreter(PSOperators):
|
|||||||
|
|
||||||
def fillsystemdict(self):
|
def fillsystemdict(self):
|
||||||
systemdict = self.dictstack[0]
|
systemdict = self.dictstack[0]
|
||||||
systemdict['['] = systemdict['mark'] = self.mark = ps_mark()
|
systemdict["["] = systemdict["mark"] = self.mark = ps_mark()
|
||||||
systemdict[']'] = ps_operator(']', self.do_makearray)
|
systemdict["]"] = ps_operator("]", self.do_makearray)
|
||||||
systemdict['true'] = ps_boolean(1)
|
systemdict["true"] = ps_boolean(1)
|
||||||
systemdict['false'] = ps_boolean(0)
|
systemdict["false"] = ps_boolean(0)
|
||||||
systemdict['StandardEncoding'] = ps_array(ps_StandardEncoding)
|
systemdict["StandardEncoding"] = ps_array(ps_StandardEncoding)
|
||||||
systemdict['FontDirectory'] = ps_dict({})
|
systemdict["FontDirectory"] = ps_dict({})
|
||||||
self.suckoperators(systemdict, self.__class__)
|
self.suckoperators(systemdict, self.__class__)
|
||||||
|
|
||||||
def suckoperators(self, systemdict, klass):
|
def suckoperators(self, systemdict, klass):
|
||||||
for name in dir(klass):
|
for name in dir(klass):
|
||||||
attr = getattr(self, name)
|
attr = getattr(self, name)
|
||||||
if isinstance(attr, Callable) and name[:3] == 'ps_':
|
if isinstance(attr, Callable) and name[:3] == "ps_":
|
||||||
name = name[3:]
|
name = name[3:]
|
||||||
systemdict[name] = ps_operator(name, attr)
|
systemdict[name] = ps_operator(name, attr)
|
||||||
for baseclass in klass.__bases__:
|
for baseclass in klass.__bases__:
|
||||||
@ -211,24 +216,25 @@ class PSInterpreter(PSOperators):
|
|||||||
except:
|
except:
|
||||||
if self.tokenizer is not None:
|
if self.tokenizer is not None:
|
||||||
log.debug(
|
log.debug(
|
||||||
'ps error:\n'
|
"ps error:\n"
|
||||||
'- - - - - - -\n'
|
"- - - - - - -\n"
|
||||||
'%s\n'
|
"%s\n"
|
||||||
'>>>\n'
|
">>>\n"
|
||||||
'%s\n'
|
"%s\n"
|
||||||
'- - - - - - -',
|
"- - - - - - -",
|
||||||
self.tokenizer.buf[self.tokenizer.pos - 50 : self.tokenizer.pos],
|
self.tokenizer.buf[self.tokenizer.pos - 50 : self.tokenizer.pos],
|
||||||
self.tokenizer.buf[self.tokenizer.pos:self.tokenizer.pos+50])
|
self.tokenizer.buf[self.tokenizer.pos : self.tokenizer.pos + 50],
|
||||||
|
)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
def handle_object(self, object):
|
def handle_object(self, object):
|
||||||
if not (self.proclevel or object.literal or object.type == 'proceduretype'):
|
if not (self.proclevel or object.literal or object.type == "proceduretype"):
|
||||||
if object.type != 'operatortype':
|
if object.type != "operatortype":
|
||||||
object = self.resolve_name(object.value)
|
object = self.resolve_name(object.value)
|
||||||
if object.literal:
|
if object.literal:
|
||||||
self.push(object)
|
self.push(object)
|
||||||
else:
|
else:
|
||||||
if object.type == 'proceduretype':
|
if object.type == "proceduretype":
|
||||||
self.call_procedure(object)
|
self.call_procedure(object)
|
||||||
else:
|
else:
|
||||||
object.function()
|
object.function()
|
||||||
@ -245,22 +251,25 @@ class PSInterpreter(PSOperators):
|
|||||||
for i in range(len(dictstack) - 1, -1, -1):
|
for i in range(len(dictstack) - 1, -1, -1):
|
||||||
if name in dictstack[i]:
|
if name in dictstack[i]:
|
||||||
return dictstack[i][name]
|
return dictstack[i][name]
|
||||||
raise PSError('name error: ' + str(name))
|
raise PSError("name error: " + str(name))
|
||||||
|
|
||||||
def do_token(self, token,
|
def do_token(
|
||||||
|
self,
|
||||||
|
token,
|
||||||
int=int,
|
int=int,
|
||||||
float=float,
|
float=float,
|
||||||
ps_name=ps_name,
|
ps_name=ps_name,
|
||||||
ps_integer=ps_integer,
|
ps_integer=ps_integer,
|
||||||
ps_real=ps_real):
|
ps_real=ps_real,
|
||||||
|
):
|
||||||
try:
|
try:
|
||||||
num = int(token)
|
num = int(token)
|
||||||
except (ValueError, OverflowError):
|
except (ValueError, OverflowError):
|
||||||
try:
|
try:
|
||||||
num = float(token)
|
num = float(token)
|
||||||
except (ValueError, OverflowError):
|
except (ValueError, OverflowError):
|
||||||
if '#' in token:
|
if "#" in token:
|
||||||
hashpos = token.find('#')
|
hashpos = token.find("#")
|
||||||
try:
|
try:
|
||||||
base = int(token[:hashpos])
|
base = int(token[:hashpos])
|
||||||
num = int(token[hashpos + 1 :], base)
|
num = int(token[hashpos + 1 :], base)
|
||||||
@ -287,7 +296,7 @@ class PSInterpreter(PSOperators):
|
|||||||
def do_hexstring(self, token):
|
def do_hexstring(self, token):
|
||||||
hexStr = "".join(token[1:-1].split())
|
hexStr = "".join(token[1:-1].split())
|
||||||
if len(hexStr) % 2:
|
if len(hexStr) % 2:
|
||||||
hexStr = hexStr + '0'
|
hexStr = hexStr + "0"
|
||||||
cleanstr = []
|
cleanstr = []
|
||||||
for i in range(0, len(hexStr), 2):
|
for i in range(0, len(hexStr), 2):
|
||||||
cleanstr.append(chr(int(hexStr[i : i + 2], 16)))
|
cleanstr.append(chr(int(hexStr[i : i + 2], 16)))
|
||||||
@ -295,10 +304,10 @@ class PSInterpreter(PSOperators):
|
|||||||
return ps_string(cleanstr)
|
return ps_string(cleanstr)
|
||||||
|
|
||||||
def do_special(self, token):
|
def do_special(self, token):
|
||||||
if token == '{':
|
if token == "{":
|
||||||
self.proclevel = self.proclevel + 1
|
self.proclevel = self.proclevel + 1
|
||||||
return self.procmark
|
return self.procmark
|
||||||
elif token == '}':
|
elif token == "}":
|
||||||
proc = []
|
proc = []
|
||||||
while 1:
|
while 1:
|
||||||
topobject = self.pop()
|
topobject = self.pop()
|
||||||
@ -308,12 +317,12 @@ class PSInterpreter(PSOperators):
|
|||||||
self.proclevel = self.proclevel - 1
|
self.proclevel = self.proclevel - 1
|
||||||
proc.reverse()
|
proc.reverse()
|
||||||
return ps_procedure(proc)
|
return ps_procedure(proc)
|
||||||
elif token == '[':
|
elif token == "[":
|
||||||
return self.mark
|
return self.mark
|
||||||
elif token == ']':
|
elif token == "]":
|
||||||
return ps_name(']')
|
return ps_name("]")
|
||||||
else:
|
else:
|
||||||
raise PSTokenError('huh?')
|
raise PSTokenError("huh?")
|
||||||
|
|
||||||
def push(self, object):
|
def push(self, object):
|
||||||
self.stack.append(object)
|
self.stack.append(object)
|
||||||
@ -321,11 +330,13 @@ class PSInterpreter(PSOperators):
|
|||||||
def pop(self, *types):
|
def pop(self, *types):
|
||||||
stack = self.stack
|
stack = self.stack
|
||||||
if not stack:
|
if not stack:
|
||||||
raise PSError('stack underflow')
|
raise PSError("stack underflow")
|
||||||
object = stack[-1]
|
object = stack[-1]
|
||||||
if types:
|
if types:
|
||||||
if object.type not in types:
|
if object.type not in types:
|
||||||
raise PSError('typecheck, expected %s, found %s' % (repr(types), object.type))
|
raise PSError(
|
||||||
|
"typecheck, expected %s, found %s" % (repr(types), object.type)
|
||||||
|
)
|
||||||
del stack[-1]
|
del stack[-1]
|
||||||
return object
|
return object
|
||||||
|
|
||||||
@ -355,23 +366,26 @@ def unpack_item(item):
|
|||||||
newitem = [None] * len(item.value)
|
newitem = [None] * len(item.value)
|
||||||
for i in range(len(item.value)):
|
for i in range(len(item.value)):
|
||||||
newitem[i] = unpack_item(item.value[i])
|
newitem[i] = unpack_item(item.value[i])
|
||||||
if item.type == 'proceduretype':
|
if item.type == "proceduretype":
|
||||||
newitem = tuple(newitem)
|
newitem = tuple(newitem)
|
||||||
else:
|
else:
|
||||||
newitem = item.value
|
newitem = item.value
|
||||||
return newitem
|
return newitem
|
||||||
|
|
||||||
|
|
||||||
def suckfont(data, encoding="ascii"):
|
def suckfont(data, encoding="ascii"):
|
||||||
m = re.search(br"/FontName\s+/([^ \t\n\r]+)\s+def", data)
|
m = re.search(rb"/FontName\s+/([^ \t\n\r]+)\s+def", data)
|
||||||
if m:
|
if m:
|
||||||
fontName = m.group(1)
|
fontName = m.group(1)
|
||||||
fontName = fontName.decode()
|
fontName = fontName.decode()
|
||||||
else:
|
else:
|
||||||
fontName = None
|
fontName = None
|
||||||
interpreter = PSInterpreter(encoding=encoding)
|
interpreter = PSInterpreter(encoding=encoding)
|
||||||
interpreter.interpret(b"/Helvetica 4 dict dup /Encoding StandardEncoding put definefont pop")
|
interpreter.interpret(
|
||||||
|
b"/Helvetica 4 dict dup /Encoding StandardEncoding put definefont pop"
|
||||||
|
)
|
||||||
interpreter.interpret(data)
|
interpreter.interpret(data)
|
||||||
fontdir = interpreter.dictstack[0]['FontDirectory'].value
|
fontdir = interpreter.dictstack[0]["FontDirectory"].value
|
||||||
if fontName in fontdir:
|
if fontName in fontdir:
|
||||||
rawfont = fontdir[fontName]
|
rawfont = fontdir[fontName]
|
||||||
else:
|
else:
|
||||||
|
@ -23,50 +23,60 @@ class ps_operator(ps_object):
|
|||||||
self.name = name
|
self.name = name
|
||||||
self.function = function
|
self.function = function
|
||||||
self.type = self.__class__.__name__[3:] + "type"
|
self.type = self.__class__.__name__[3:] + "type"
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return "<operator %s>" % self.name
|
return "<operator %s>" % self.name
|
||||||
|
|
||||||
|
|
||||||
class ps_procedure(ps_object):
|
class ps_procedure(ps_object):
|
||||||
literal = 0
|
literal = 0
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return "<procedure>"
|
return "<procedure>"
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
psstring = '{'
|
psstring = "{"
|
||||||
for i in range(len(self.value)):
|
for i in range(len(self.value)):
|
||||||
if i:
|
if i:
|
||||||
psstring = psstring + ' ' + str(self.value[i])
|
psstring = psstring + " " + str(self.value[i])
|
||||||
else:
|
else:
|
||||||
psstring = psstring + str(self.value[i])
|
psstring = psstring + str(self.value[i])
|
||||||
return psstring + '}'
|
return psstring + "}"
|
||||||
|
|
||||||
|
|
||||||
class ps_name(ps_object):
|
class ps_name(ps_object):
|
||||||
literal = 0
|
literal = 0
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
if self.literal:
|
if self.literal:
|
||||||
return '/' + self.value
|
return "/" + self.value
|
||||||
else:
|
else:
|
||||||
return self.value
|
return self.value
|
||||||
|
|
||||||
|
|
||||||
class ps_literal(ps_object):
|
class ps_literal(ps_object):
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return '/' + self.value
|
return "/" + self.value
|
||||||
|
|
||||||
|
|
||||||
class ps_array(ps_object):
|
class ps_array(ps_object):
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
psstring = '['
|
psstring = "["
|
||||||
for i in range(len(self.value)):
|
for i in range(len(self.value)):
|
||||||
item = self.value[i]
|
item = self.value[i]
|
||||||
access = _accessstrings[item.access]
|
access = _accessstrings[item.access]
|
||||||
if access:
|
if access:
|
||||||
access = ' ' + access
|
access = " " + access
|
||||||
if i:
|
if i:
|
||||||
psstring = psstring + ' ' + str(item) + access
|
psstring = psstring + " " + str(item) + access
|
||||||
else:
|
else:
|
||||||
psstring = psstring + str(item) + access
|
psstring = psstring + str(item) + access
|
||||||
return psstring + ']'
|
return psstring + "]"
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return "<array>"
|
return "<array>"
|
||||||
|
|
||||||
|
|
||||||
_type1_pre_eexec_order = [
|
_type1_pre_eexec_order = [
|
||||||
"FontInfo",
|
"FontInfo",
|
||||||
"FontName",
|
"FontName",
|
||||||
@ -77,7 +87,7 @@ _type1_pre_eexec_order = [
|
|||||||
"FontBBox",
|
"FontBBox",
|
||||||
"UniqueID",
|
"UniqueID",
|
||||||
"Metrics",
|
"Metrics",
|
||||||
"StrokeWidth"
|
"StrokeWidth",
|
||||||
]
|
]
|
||||||
|
|
||||||
_type1_fontinfo_order = [
|
_type1_fontinfo_order = [
|
||||||
@ -89,40 +99,43 @@ _type1_fontinfo_order = [
|
|||||||
"ItalicAngle",
|
"ItalicAngle",
|
||||||
"isFixedPitch",
|
"isFixedPitch",
|
||||||
"UnderlinePosition",
|
"UnderlinePosition",
|
||||||
"UnderlineThickness"
|
"UnderlineThickness",
|
||||||
]
|
]
|
||||||
|
|
||||||
_type1_post_eexec_order = [
|
_type1_post_eexec_order = ["Private", "CharStrings", "FID"]
|
||||||
"Private",
|
|
||||||
"CharStrings",
|
|
||||||
"FID"
|
|
||||||
]
|
|
||||||
|
|
||||||
def _type1_item_repr(key, value):
|
def _type1_item_repr(key, value):
|
||||||
psstring = ""
|
psstring = ""
|
||||||
access = _accessstrings[value.access]
|
access = _accessstrings[value.access]
|
||||||
if access:
|
if access:
|
||||||
access = access + ' '
|
access = access + " "
|
||||||
if key == 'CharStrings':
|
if key == "CharStrings":
|
||||||
psstring = psstring + "/%s %s def\n" % (key, _type1_CharString_repr(value.value))
|
psstring = psstring + "/%s %s def\n" % (
|
||||||
elif key == 'Encoding':
|
key,
|
||||||
|
_type1_CharString_repr(value.value),
|
||||||
|
)
|
||||||
|
elif key == "Encoding":
|
||||||
psstring = psstring + _type1_Encoding_repr(value, access)
|
psstring = psstring + _type1_Encoding_repr(value, access)
|
||||||
else:
|
else:
|
||||||
psstring = psstring + "/%s %s %sdef\n" % (str(key), str(value), access)
|
psstring = psstring + "/%s %s %sdef\n" % (str(key), str(value), access)
|
||||||
return psstring
|
return psstring
|
||||||
|
|
||||||
|
|
||||||
def _type1_Encoding_repr(encoding, access):
|
def _type1_Encoding_repr(encoding, access):
|
||||||
encoding = encoding.value
|
encoding = encoding.value
|
||||||
psstring = "/Encoding 256 array\n0 1 255 {1 index exch /.notdef put} for\n"
|
psstring = "/Encoding 256 array\n0 1 255 {1 index exch /.notdef put} for\n"
|
||||||
for i in range(256):
|
for i in range(256):
|
||||||
name = encoding[i].value
|
name = encoding[i].value
|
||||||
if name != '.notdef':
|
if name != ".notdef":
|
||||||
psstring = psstring + "dup %d /%s put\n" % (i, name)
|
psstring = psstring + "dup %d /%s put\n" % (i, name)
|
||||||
return psstring + access + "def\n"
|
return psstring + access + "def\n"
|
||||||
|
|
||||||
|
|
||||||
def _type1_CharString_repr(charstrings):
|
def _type1_CharString_repr(charstrings):
|
||||||
items = sorted(charstrings.items())
|
items = sorted(charstrings.items())
|
||||||
return 'xxx'
|
return "xxx"
|
||||||
|
|
||||||
|
|
||||||
class ps_font(ps_object):
|
class ps_font(ps_object):
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
@ -146,14 +159,22 @@ class ps_font(ps_object):
|
|||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
psstring = psstring + _type1_item_repr(key, value)
|
psstring = psstring + _type1_item_repr(key, value)
|
||||||
return psstring + 'dup/FontName get exch definefont pop\nmark currentfile closefile\n' + \
|
return (
|
||||||
8 * (64 * '0' + '\n') + 'cleartomark' + '\n'
|
psstring
|
||||||
|
+ "dup/FontName get exch definefont pop\nmark currentfile closefile\n"
|
||||||
|
+ 8 * (64 * "0" + "\n")
|
||||||
|
+ "cleartomark"
|
||||||
|
+ "\n"
|
||||||
|
)
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return '<font>'
|
return "<font>"
|
||||||
|
|
||||||
|
|
||||||
class ps_file(ps_object):
|
class ps_file(ps_object):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class ps_dict(ps_object):
|
class ps_dict(ps_object):
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
psstring = "%d dict dup begin\n" % len(self.value)
|
psstring = "%d dict dup begin\n" % len(self.value)
|
||||||
@ -161,62 +182,69 @@ class ps_dict(ps_object):
|
|||||||
for key, value in items:
|
for key, value in items:
|
||||||
access = _accessstrings[value.access]
|
access = _accessstrings[value.access]
|
||||||
if access:
|
if access:
|
||||||
access = access + ' '
|
access = access + " "
|
||||||
psstring = psstring + "/%s %s %sdef\n" % (str(key), str(value), access)
|
psstring = psstring + "/%s %s %sdef\n" % (str(key), str(value), access)
|
||||||
return psstring + 'end '
|
return psstring + "end "
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return "<dict>"
|
return "<dict>"
|
||||||
|
|
||||||
|
|
||||||
class ps_mark(ps_object):
|
class ps_mark(ps_object):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.value = 'mark'
|
self.value = "mark"
|
||||||
self.type = self.__class__.__name__[3:] + "type"
|
self.type = self.__class__.__name__[3:] + "type"
|
||||||
|
|
||||||
|
|
||||||
class ps_procmark(ps_object):
|
class ps_procmark(ps_object):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.value = 'procmark'
|
self.value = "procmark"
|
||||||
self.type = self.__class__.__name__[3:] + "type"
|
self.type = self.__class__.__name__[3:] + "type"
|
||||||
|
|
||||||
|
|
||||||
class ps_null(ps_object):
|
class ps_null(ps_object):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.type = self.__class__.__name__[3:] + "type"
|
self.type = self.__class__.__name__[3:] + "type"
|
||||||
|
|
||||||
|
|
||||||
class ps_boolean(ps_object):
|
class ps_boolean(ps_object):
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
if self.value:
|
if self.value:
|
||||||
return 'true'
|
return "true"
|
||||||
else:
|
else:
|
||||||
return 'false'
|
return "false"
|
||||||
|
|
||||||
|
|
||||||
class ps_string(ps_object):
|
class ps_string(ps_object):
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return "(%s)" % repr(self.value)[1:-1]
|
return "(%s)" % repr(self.value)[1:-1]
|
||||||
|
|
||||||
|
|
||||||
class ps_integer(ps_object):
|
class ps_integer(ps_object):
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return repr(self.value)
|
return repr(self.value)
|
||||||
|
|
||||||
|
|
||||||
class ps_real(ps_object):
|
class ps_real(ps_object):
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return repr(self.value)
|
return repr(self.value)
|
||||||
|
|
||||||
|
|
||||||
class PSOperators(object):
|
class PSOperators(object):
|
||||||
|
|
||||||
def ps_def(self):
|
def ps_def(self):
|
||||||
obj = self.pop()
|
obj = self.pop()
|
||||||
name = self.pop()
|
name = self.pop()
|
||||||
self.dictstack[-1][name.value] = obj
|
self.dictstack[-1][name.value] = obj
|
||||||
|
|
||||||
def ps_bind(self):
|
def ps_bind(self):
|
||||||
proc = self.pop('proceduretype')
|
proc = self.pop("proceduretype")
|
||||||
self.proc_bind(proc)
|
self.proc_bind(proc)
|
||||||
self.push(proc)
|
self.push(proc)
|
||||||
|
|
||||||
def proc_bind(self, proc):
|
def proc_bind(self, proc):
|
||||||
for i in range(len(proc.value)):
|
for i in range(len(proc.value)):
|
||||||
item = proc.value[i]
|
item = proc.value[i]
|
||||||
if item.type == 'proceduretype':
|
if item.type == "proceduretype":
|
||||||
self.proc_bind(item)
|
self.proc_bind(item)
|
||||||
else:
|
else:
|
||||||
if not item.literal:
|
if not item.literal:
|
||||||
@ -225,12 +253,12 @@ class PSOperators(object):
|
|||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
if obj.type == 'operatortype':
|
if obj.type == "operatortype":
|
||||||
proc.value[i] = obj
|
proc.value[i] = obj
|
||||||
|
|
||||||
def ps_exch(self):
|
def ps_exch(self):
|
||||||
if len(self.stack) < 2:
|
if len(self.stack) < 2:
|
||||||
raise RuntimeError('stack underflow')
|
raise RuntimeError("stack underflow")
|
||||||
obj1 = self.pop()
|
obj1 = self.pop()
|
||||||
obj2 = self.pop()
|
obj2 = self.pop()
|
||||||
self.push(obj1)
|
self.push(obj1)
|
||||||
@ -238,12 +266,12 @@ class PSOperators(object):
|
|||||||
|
|
||||||
def ps_dup(self):
|
def ps_dup(self):
|
||||||
if not self.stack:
|
if not self.stack:
|
||||||
raise RuntimeError('stack underflow')
|
raise RuntimeError("stack underflow")
|
||||||
self.push(self.stack[-1])
|
self.push(self.stack[-1])
|
||||||
|
|
||||||
def ps_exec(self):
|
def ps_exec(self):
|
||||||
obj = self.pop()
|
obj = self.pop()
|
||||||
if obj.type == 'proceduretype':
|
if obj.type == "proceduretype":
|
||||||
self.call_procedure(obj)
|
self.call_procedure(obj)
|
||||||
else:
|
else:
|
||||||
self.handle_object(obj)
|
self.handle_object(obj)
|
||||||
@ -267,12 +295,19 @@ class PSOperators(object):
|
|||||||
self.push(obj)
|
self.push(obj)
|
||||||
|
|
||||||
def ps_matrix(self):
|
def ps_matrix(self):
|
||||||
matrix = [ps_real(1.0), ps_integer(0), ps_integer(0), ps_real(1.0), ps_integer(0), ps_integer(0)]
|
matrix = [
|
||||||
|
ps_real(1.0),
|
||||||
|
ps_integer(0),
|
||||||
|
ps_integer(0),
|
||||||
|
ps_real(1.0),
|
||||||
|
ps_integer(0),
|
||||||
|
ps_integer(0),
|
||||||
|
]
|
||||||
self.push(ps_array(matrix))
|
self.push(ps_array(matrix))
|
||||||
|
|
||||||
def ps_string(self):
|
def ps_string(self):
|
||||||
num = self.pop('integertype').value
|
num = self.pop("integertype").value
|
||||||
self.push(ps_string('\0' * num))
|
self.push(ps_string("\0" * num))
|
||||||
|
|
||||||
def ps_type(self):
|
def ps_type(self):
|
||||||
obj = self.pop()
|
obj = self.pop()
|
||||||
@ -306,11 +341,11 @@ class PSOperators(object):
|
|||||||
self.push(ps_file(self.tokenizer))
|
self.push(ps_file(self.tokenizer))
|
||||||
|
|
||||||
def ps_eexec(self):
|
def ps_eexec(self):
|
||||||
f = self.pop('filetype').value
|
f = self.pop("filetype").value
|
||||||
f.starteexec()
|
f.starteexec()
|
||||||
|
|
||||||
def ps_closefile(self):
|
def ps_closefile(self):
|
||||||
f = self.pop('filetype').value
|
f = self.pop("filetype").value
|
||||||
f.skipwhite()
|
f.skipwhite()
|
||||||
f.stopeexec()
|
f.stopeexec()
|
||||||
|
|
||||||
@ -319,12 +354,10 @@ class PSOperators(object):
|
|||||||
while obj != self.mark:
|
while obj != self.mark:
|
||||||
obj = self.pop()
|
obj = self.pop()
|
||||||
|
|
||||||
def ps_readstring(self,
|
def ps_readstring(self, ps_boolean=ps_boolean, len=len):
|
||||||
ps_boolean=ps_boolean,
|
s = self.pop("stringtype")
|
||||||
len=len):
|
|
||||||
s = self.pop('stringtype')
|
|
||||||
oldstr = s.value
|
oldstr = s.value
|
||||||
f = self.pop('filetype')
|
f = self.pop("filetype")
|
||||||
# pad = file.value.read(1)
|
# pad = file.value.read(1)
|
||||||
# for StringIO, this is faster
|
# for StringIO, this is faster
|
||||||
f.value.pos = f.value.pos + 1
|
f.value.pos = f.value.pos + 1
|
||||||
@ -335,18 +368,18 @@ class PSOperators(object):
|
|||||||
|
|
||||||
def ps_known(self):
|
def ps_known(self):
|
||||||
key = self.pop()
|
key = self.pop()
|
||||||
d = self.pop('dicttype', 'fonttype')
|
d = self.pop("dicttype", "fonttype")
|
||||||
self.push(ps_boolean(key.value in d.value))
|
self.push(ps_boolean(key.value in d.value))
|
||||||
|
|
||||||
def ps_if(self):
|
def ps_if(self):
|
||||||
proc = self.pop('proceduretype')
|
proc = self.pop("proceduretype")
|
||||||
if self.pop('booleantype').value:
|
if self.pop("booleantype").value:
|
||||||
self.call_procedure(proc)
|
self.call_procedure(proc)
|
||||||
|
|
||||||
def ps_ifelse(self):
|
def ps_ifelse(self):
|
||||||
proc2 = self.pop('proceduretype')
|
proc2 = self.pop("proceduretype")
|
||||||
proc1 = self.pop('proceduretype')
|
proc1 = self.pop("proceduretype")
|
||||||
if self.pop('booleantype').value:
|
if self.pop("booleantype").value:
|
||||||
self.call_procedure(proc1)
|
self.call_procedure(proc1)
|
||||||
else:
|
else:
|
||||||
self.call_procedure(proc2)
|
self.call_procedure(proc2)
|
||||||
@ -370,19 +403,19 @@ class PSOperators(object):
|
|||||||
self.push(obj)
|
self.push(obj)
|
||||||
|
|
||||||
def ps_not(self):
|
def ps_not(self):
|
||||||
obj = self.pop('booleantype', 'integertype')
|
obj = self.pop("booleantype", "integertype")
|
||||||
if obj.type == 'booleantype':
|
if obj.type == "booleantype":
|
||||||
self.push(ps_boolean(not obj.value))
|
self.push(ps_boolean(not obj.value))
|
||||||
else:
|
else:
|
||||||
self.push(ps_integer(~obj.value))
|
self.push(ps_integer(~obj.value))
|
||||||
|
|
||||||
def ps_print(self):
|
def ps_print(self):
|
||||||
str = self.pop('stringtype')
|
str = self.pop("stringtype")
|
||||||
print('PS output --->', str.value)
|
print("PS output --->", str.value)
|
||||||
|
|
||||||
def ps_anchorsearch(self):
|
def ps_anchorsearch(self):
|
||||||
seek = self.pop('stringtype')
|
seek = self.pop("stringtype")
|
||||||
s = self.pop('stringtype')
|
s = self.pop("stringtype")
|
||||||
seeklen = len(seek.value)
|
seeklen = len(seek.value)
|
||||||
if s.value[:seeklen] == seek.value:
|
if s.value[:seeklen] == seek.value:
|
||||||
self.push(ps_string(s.value[seeklen:]))
|
self.push(ps_string(s.value[seeklen:]))
|
||||||
@ -393,12 +426,12 @@ class PSOperators(object):
|
|||||||
self.push(ps_boolean(0))
|
self.push(ps_boolean(0))
|
||||||
|
|
||||||
def ps_array(self):
|
def ps_array(self):
|
||||||
num = self.pop('integertype')
|
num = self.pop("integertype")
|
||||||
array = ps_array([None] * num.value)
|
array = ps_array([None] * num.value)
|
||||||
self.push(array)
|
self.push(array)
|
||||||
|
|
||||||
def ps_astore(self):
|
def ps_astore(self):
|
||||||
array = self.pop('arraytype')
|
array = self.pop("arraytype")
|
||||||
for i in range(len(array.value) - 1, -1, -1):
|
for i in range(len(array.value) - 1, -1, -1):
|
||||||
array.value[i] = self.pop()
|
array.value[i] = self.pop()
|
||||||
self.push(array)
|
self.push(array)
|
||||||
@ -410,13 +443,13 @@ class PSOperators(object):
|
|||||||
def ps_put(self):
|
def ps_put(self):
|
||||||
obj1 = self.pop()
|
obj1 = self.pop()
|
||||||
obj2 = self.pop()
|
obj2 = self.pop()
|
||||||
obj3 = self.pop('arraytype', 'dicttype', 'stringtype', 'proceduretype')
|
obj3 = self.pop("arraytype", "dicttype", "stringtype", "proceduretype")
|
||||||
tp = obj3.type
|
tp = obj3.type
|
||||||
if tp == 'arraytype' or tp == 'proceduretype':
|
if tp == "arraytype" or tp == "proceduretype":
|
||||||
obj3.value[obj2.value] = obj1
|
obj3.value[obj2.value] = obj1
|
||||||
elif tp == 'dicttype':
|
elif tp == "dicttype":
|
||||||
obj3.value[obj2.value] = obj1
|
obj3.value[obj2.value] = obj1
|
||||||
elif tp == 'stringtype':
|
elif tp == "stringtype":
|
||||||
index = obj2.value
|
index = obj2.value
|
||||||
obj3.value = obj3.value[:index] + chr(obj1.value) + obj3.value[index + 1 :]
|
obj3.value = obj3.value[:index] + chr(obj1.value) + obj3.value[index + 1 :]
|
||||||
|
|
||||||
@ -424,54 +457,56 @@ class PSOperators(object):
|
|||||||
obj1 = self.pop()
|
obj1 = self.pop()
|
||||||
if obj1.value == "Encoding":
|
if obj1.value == "Encoding":
|
||||||
pass
|
pass
|
||||||
obj2 = self.pop('arraytype', 'dicttype', 'stringtype', 'proceduretype', 'fonttype')
|
obj2 = self.pop(
|
||||||
|
"arraytype", "dicttype", "stringtype", "proceduretype", "fonttype"
|
||||||
|
)
|
||||||
tp = obj2.type
|
tp = obj2.type
|
||||||
if tp in ('arraytype', 'proceduretype'):
|
if tp in ("arraytype", "proceduretype"):
|
||||||
self.push(obj2.value[obj1.value])
|
self.push(obj2.value[obj1.value])
|
||||||
elif tp in ('dicttype', 'fonttype'):
|
elif tp in ("dicttype", "fonttype"):
|
||||||
self.push(obj2.value[obj1.value])
|
self.push(obj2.value[obj1.value])
|
||||||
elif tp == 'stringtype':
|
elif tp == "stringtype":
|
||||||
self.push(ps_integer(ord(obj2.value[obj1.value])))
|
self.push(ps_integer(ord(obj2.value[obj1.value])))
|
||||||
else:
|
else:
|
||||||
assert False, "shouldn't get here"
|
assert False, "shouldn't get here"
|
||||||
|
|
||||||
def ps_getinterval(self):
|
def ps_getinterval(self):
|
||||||
obj1 = self.pop('integertype')
|
obj1 = self.pop("integertype")
|
||||||
obj2 = self.pop('integertype')
|
obj2 = self.pop("integertype")
|
||||||
obj3 = self.pop('arraytype', 'stringtype')
|
obj3 = self.pop("arraytype", "stringtype")
|
||||||
tp = obj3.type
|
tp = obj3.type
|
||||||
if tp == 'arraytype':
|
if tp == "arraytype":
|
||||||
self.push(ps_array(obj3.value[obj2.value : obj2.value + obj1.value]))
|
self.push(ps_array(obj3.value[obj2.value : obj2.value + obj1.value]))
|
||||||
elif tp == 'stringtype':
|
elif tp == "stringtype":
|
||||||
self.push(ps_string(obj3.value[obj2.value : obj2.value + obj1.value]))
|
self.push(ps_string(obj3.value[obj2.value : obj2.value + obj1.value]))
|
||||||
|
|
||||||
def ps_putinterval(self):
|
def ps_putinterval(self):
|
||||||
obj1 = self.pop('arraytype', 'stringtype')
|
obj1 = self.pop("arraytype", "stringtype")
|
||||||
obj2 = self.pop('integertype')
|
obj2 = self.pop("integertype")
|
||||||
obj3 = self.pop('arraytype', 'stringtype')
|
obj3 = self.pop("arraytype", "stringtype")
|
||||||
tp = obj3.type
|
tp = obj3.type
|
||||||
if tp == 'arraytype':
|
if tp == "arraytype":
|
||||||
obj3.value[obj2.value : obj2.value + len(obj1.value)] = obj1.value
|
obj3.value[obj2.value : obj2.value + len(obj1.value)] = obj1.value
|
||||||
elif tp == 'stringtype':
|
elif tp == "stringtype":
|
||||||
newstr = obj3.value[: obj2.value]
|
newstr = obj3.value[: obj2.value]
|
||||||
newstr = newstr + obj1.value
|
newstr = newstr + obj1.value
|
||||||
newstr = newstr + obj3.value[obj2.value + len(obj1.value) :]
|
newstr = newstr + obj3.value[obj2.value + len(obj1.value) :]
|
||||||
obj3.value = newstr
|
obj3.value = newstr
|
||||||
|
|
||||||
def ps_cvn(self):
|
def ps_cvn(self):
|
||||||
self.push(ps_name(self.pop('stringtype').value))
|
self.push(ps_name(self.pop("stringtype").value))
|
||||||
|
|
||||||
def ps_index(self):
|
def ps_index(self):
|
||||||
n = self.pop('integertype').value
|
n = self.pop("integertype").value
|
||||||
if n < 0:
|
if n < 0:
|
||||||
raise RuntimeError('index may not be negative')
|
raise RuntimeError("index may not be negative")
|
||||||
self.push(self.stack[-1 - n])
|
self.push(self.stack[-1 - n])
|
||||||
|
|
||||||
def ps_for(self):
|
def ps_for(self):
|
||||||
proc = self.pop('proceduretype')
|
proc = self.pop("proceduretype")
|
||||||
limit = self.pop('integertype', 'realtype').value
|
limit = self.pop("integertype", "realtype").value
|
||||||
increment = self.pop('integertype', 'realtype').value
|
increment = self.pop("integertype", "realtype").value
|
||||||
i = self.pop('integertype', 'realtype').value
|
i = self.pop("integertype", "realtype").value
|
||||||
while 1:
|
while 1:
|
||||||
if increment > 0:
|
if increment > 0:
|
||||||
if i > limit:
|
if i > limit:
|
||||||
@ -487,51 +522,53 @@ class PSOperators(object):
|
|||||||
i = i + increment
|
i = i + increment
|
||||||
|
|
||||||
def ps_forall(self):
|
def ps_forall(self):
|
||||||
proc = self.pop('proceduretype')
|
proc = self.pop("proceduretype")
|
||||||
obj = self.pop('arraytype', 'stringtype', 'dicttype')
|
obj = self.pop("arraytype", "stringtype", "dicttype")
|
||||||
tp = obj.type
|
tp = obj.type
|
||||||
if tp == 'arraytype':
|
if tp == "arraytype":
|
||||||
for item in obj.value:
|
for item in obj.value:
|
||||||
self.push(item)
|
self.push(item)
|
||||||
self.call_procedure(proc)
|
self.call_procedure(proc)
|
||||||
elif tp == 'stringtype':
|
elif tp == "stringtype":
|
||||||
for item in obj.value:
|
for item in obj.value:
|
||||||
self.push(ps_integer(ord(item)))
|
self.push(ps_integer(ord(item)))
|
||||||
self.call_procedure(proc)
|
self.call_procedure(proc)
|
||||||
elif tp == 'dicttype':
|
elif tp == "dicttype":
|
||||||
for key, value in obj.value.items():
|
for key, value in obj.value.items():
|
||||||
self.push(ps_name(key))
|
self.push(ps_name(key))
|
||||||
self.push(value)
|
self.push(value)
|
||||||
self.call_procedure(proc)
|
self.call_procedure(proc)
|
||||||
|
|
||||||
def ps_definefont(self):
|
def ps_definefont(self):
|
||||||
font = self.pop('dicttype')
|
font = self.pop("dicttype")
|
||||||
name = self.pop()
|
name = self.pop()
|
||||||
font = ps_font(font.value)
|
font = ps_font(font.value)
|
||||||
self.dictstack[0]['FontDirectory'].value[name.value] = font
|
self.dictstack[0]["FontDirectory"].value[name.value] = font
|
||||||
self.push(font)
|
self.push(font)
|
||||||
|
|
||||||
def ps_findfont(self):
|
def ps_findfont(self):
|
||||||
name = self.pop()
|
name = self.pop()
|
||||||
font = self.dictstack[0]['FontDirectory'].value[name.value]
|
font = self.dictstack[0]["FontDirectory"].value[name.value]
|
||||||
self.push(font)
|
self.push(font)
|
||||||
|
|
||||||
def ps_pop(self):
|
def ps_pop(self):
|
||||||
self.pop()
|
self.pop()
|
||||||
|
|
||||||
def ps_dict(self):
|
def ps_dict(self):
|
||||||
self.pop('integertype')
|
self.pop("integertype")
|
||||||
self.push(ps_dict({}))
|
self.push(ps_dict({}))
|
||||||
|
|
||||||
def ps_begin(self):
|
def ps_begin(self):
|
||||||
self.dictstack.append(self.pop('dicttype').value)
|
self.dictstack.append(self.pop("dicttype").value)
|
||||||
|
|
||||||
def ps_end(self):
|
def ps_end(self):
|
||||||
if len(self.dictstack) > 2:
|
if len(self.dictstack) > 2:
|
||||||
del self.dictstack[-1]
|
del self.dictstack[-1]
|
||||||
else:
|
else:
|
||||||
raise RuntimeError('dictstack underflow')
|
raise RuntimeError("dictstack underflow")
|
||||||
|
|
||||||
notdef = '.notdef'
|
|
||||||
|
notdef = ".notdef"
|
||||||
from fontTools.encodings.StandardEncoding import StandardEncoding
|
from fontTools.encodings.StandardEncoding import StandardEncoding
|
||||||
|
|
||||||
ps_StandardEncoding = list(map(ps_name, StandardEncoding))
|
ps_StandardEncoding = list(map(ps_name, StandardEncoding))
|
||||||
|
@ -15,9 +15,11 @@ __all__ = [
|
|||||||
"roundFunc",
|
"roundFunc",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
def noRound(value):
|
def noRound(value):
|
||||||
return value
|
return value
|
||||||
|
|
||||||
|
|
||||||
def otRound(value):
|
def otRound(value):
|
||||||
"""Round float value to nearest integer towards ``+Infinity``.
|
"""Round float value to nearest integer towards ``+Infinity``.
|
||||||
|
|
||||||
@ -41,10 +43,12 @@ def otRound(value):
|
|||||||
# https://github.com/fonttools/fonttools/issues/1248#issuecomment-383198166
|
# https://github.com/fonttools/fonttools/issues/1248#issuecomment-383198166
|
||||||
return int(math.floor(value + 0.5))
|
return int(math.floor(value + 0.5))
|
||||||
|
|
||||||
|
|
||||||
def maybeRound(v, tolerance, round=otRound):
|
def maybeRound(v, tolerance, round=otRound):
|
||||||
rounded = round(v)
|
rounded = round(v)
|
||||||
return rounded if abs(rounded - v) <= tolerance else v
|
return rounded if abs(rounded - v) <= tolerance else v
|
||||||
|
|
||||||
|
|
||||||
def roundFunc(tolerance, round=otRound):
|
def roundFunc(tolerance, round=otRound):
|
||||||
if tolerance < 0:
|
if tolerance < 0:
|
||||||
raise ValueError("Rounding tolerance must be positive")
|
raise ValueError("Rounding tolerance must be positive")
|
||||||
@ -52,7 +56,7 @@ def roundFunc(tolerance, round=otRound):
|
|||||||
if tolerance == 0:
|
if tolerance == 0:
|
||||||
return noRound
|
return noRound
|
||||||
|
|
||||||
if tolerance >= .5:
|
if tolerance >= 0.5:
|
||||||
return round
|
return round
|
||||||
|
|
||||||
return functools.partial(maybeRound, tolerance=tolerance, round=round)
|
return functools.partial(maybeRound, tolerance=tolerance, round=round)
|
||||||
@ -85,7 +89,7 @@ def nearestMultipleShortestRepr(value: float, factor: float) -> str:
|
|||||||
return "0.0"
|
return "0.0"
|
||||||
|
|
||||||
value = otRound(value / factor) * factor
|
value = otRound(value / factor) * factor
|
||||||
eps = .5 * factor
|
eps = 0.5 * factor
|
||||||
lo = value - eps
|
lo = value - eps
|
||||||
hi = value + eps
|
hi = value + eps
|
||||||
# If the range of valid choices spans an integer, return the integer.
|
# If the range of valid choices spans an integer, return the integer.
|
||||||
@ -99,7 +103,7 @@ def nearestMultipleShortestRepr(value: float, factor: float) -> str:
|
|||||||
for i in range(len(lo)):
|
for i in range(len(lo)):
|
||||||
if lo[i] != hi[i]:
|
if lo[i] != hi[i]:
|
||||||
break
|
break
|
||||||
period = lo.find('.')
|
period = lo.find(".")
|
||||||
assert period < i
|
assert period < i
|
||||||
fmt = "%%.%df" % (i - period)
|
fmt = "%%.%df" % (i - period)
|
||||||
return fmt % value
|
return fmt % value
|
||||||
|
@ -58,6 +58,7 @@ __copyright__ = "Copyright 1998, Just van Rossum <just@letterror.com>"
|
|||||||
class Error(Exception):
|
class Error(Exception):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
def pack(fmt, obj):
|
def pack(fmt, obj):
|
||||||
formatstring, names, fixes = getformat(fmt, keep_pad_byte=True)
|
formatstring, names, fixes = getformat(fmt, keep_pad_byte=True)
|
||||||
elements = []
|
elements = []
|
||||||
@ -74,6 +75,7 @@ def pack(fmt, obj):
|
|||||||
data = struct.pack(*(formatstring,) + tuple(elements))
|
data = struct.pack(*(formatstring,) + tuple(elements))
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
|
||||||
def unpack(fmt, data, obj=None):
|
def unpack(fmt, data, obj=None):
|
||||||
if obj is None:
|
if obj is None:
|
||||||
obj = {}
|
obj = {}
|
||||||
@ -98,10 +100,12 @@ def unpack(fmt, data, obj=None):
|
|||||||
d[name] = value
|
d[name] = value
|
||||||
return obj
|
return obj
|
||||||
|
|
||||||
|
|
||||||
def unpack2(fmt, data, obj=None):
|
def unpack2(fmt, data, obj=None):
|
||||||
length = calcsize(fmt)
|
length = calcsize(fmt)
|
||||||
return unpack(fmt, data[:length], obj), data[length:]
|
return unpack(fmt, data[:length], obj), data[length:]
|
||||||
|
|
||||||
|
|
||||||
def calcsize(fmt):
|
def calcsize(fmt):
|
||||||
formatstring, names, fixes = getformat(fmt)
|
formatstring, names, fixes = getformat(fmt)
|
||||||
return struct.calcsize(formatstring)
|
return struct.calcsize(formatstring)
|
||||||
@ -125,13 +129,11 @@ _extraRE = re.compile(r"\s*([x@=<>!])\s*(#.*)?$")
|
|||||||
# matches an "empty" string, possibly containing whitespace and/or a comment
|
# matches an "empty" string, possibly containing whitespace and/or a comment
|
||||||
_emptyRE = re.compile(r"\s*(#.*)?$")
|
_emptyRE = re.compile(r"\s*(#.*)?$")
|
||||||
|
|
||||||
_fixedpointmappings = {
|
_fixedpointmappings = {8: "b", 16: "h", 32: "l"}
|
||||||
8: "b",
|
|
||||||
16: "h",
|
|
||||||
32: "l"}
|
|
||||||
|
|
||||||
_formatcache = {}
|
_formatcache = {}
|
||||||
|
|
||||||
|
|
||||||
def getformat(fmt, keep_pad_byte=False):
|
def getformat(fmt, keep_pad_byte=False):
|
||||||
fmt = tostr(fmt, encoding="ascii")
|
fmt = tostr(fmt, encoding="ascii")
|
||||||
try:
|
try:
|
||||||
@ -147,7 +149,7 @@ def getformat(fmt, keep_pad_byte=False):
|
|||||||
m = _extraRE.match(line)
|
m = _extraRE.match(line)
|
||||||
if m:
|
if m:
|
||||||
formatchar = m.group(1)
|
formatchar = m.group(1)
|
||||||
if formatchar != 'x' and formatstring:
|
if formatchar != "x" and formatstring:
|
||||||
raise Error("a special fmt char must be first")
|
raise Error("a special fmt char must be first")
|
||||||
else:
|
else:
|
||||||
m = _elementRE.match(line)
|
m = _elementRE.match(line)
|
||||||
@ -171,6 +173,7 @@ def getformat(fmt, keep_pad_byte=False):
|
|||||||
_formatcache[fmt] = formatstring, names, fixes
|
_formatcache[fmt] = formatstring, names, fixes
|
||||||
return formatstring, names, fixes
|
return formatstring, names, fixes
|
||||||
|
|
||||||
|
|
||||||
def _test():
|
def _test():
|
||||||
fmt = """
|
fmt = """
|
||||||
# comments are allowed
|
# comments are allowed
|
||||||
@ -188,16 +191,16 @@ def _test():
|
|||||||
apad: x
|
apad: x
|
||||||
"""
|
"""
|
||||||
|
|
||||||
print('size:', calcsize(fmt))
|
print("size:", calcsize(fmt))
|
||||||
|
|
||||||
class foo(object):
|
class foo(object):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
i = foo()
|
i = foo()
|
||||||
|
|
||||||
i.ashort = 0x7fff
|
i.ashort = 0x7FFF
|
||||||
i.along = 0x7fffffff
|
i.along = 0x7FFFFFFF
|
||||||
i.abyte = 0x7f
|
i.abyte = 0x7F
|
||||||
i.achar = "a"
|
i.achar = "a"
|
||||||
i.astr = "12345"
|
i.astr = "12345"
|
||||||
i.afloat = 0.5
|
i.afloat = 0.5
|
||||||
@ -206,11 +209,12 @@ def _test():
|
|||||||
i.abool = True
|
i.abool = True
|
||||||
|
|
||||||
data = pack(fmt, i)
|
data = pack(fmt, i)
|
||||||
print('data:', repr(data))
|
print("data:", repr(data))
|
||||||
print(unpack(fmt, data))
|
print(unpack(fmt, data))
|
||||||
i2 = foo()
|
i2 = foo()
|
||||||
unpack(fmt, data, i2)
|
unpack(fmt, data, i2)
|
||||||
print(vars(i2))
|
print(vars(i2))
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
_test()
|
_test()
|
||||||
|
@ -6,13 +6,13 @@ import sys
|
|||||||
|
|
||||||
n = 3 # Max Bezier degree; 3 for cubic, 2 for quadratic
|
n = 3 # Max Bezier degree; 3 for cubic, 2 for quadratic
|
||||||
|
|
||||||
t, x, y = sp.symbols('t x y', real=True)
|
t, x, y = sp.symbols("t x y", real=True)
|
||||||
c = sp.symbols('c', real=False) # Complex representation instead of x/y
|
c = sp.symbols("c", real=False) # Complex representation instead of x/y
|
||||||
|
|
||||||
X = tuple(sp.symbols('x:%d'%(n+1), real=True))
|
X = tuple(sp.symbols("x:%d" % (n + 1), real=True))
|
||||||
Y = tuple(sp.symbols('y:%d'%(n+1), real=True))
|
Y = tuple(sp.symbols("y:%d" % (n + 1), real=True))
|
||||||
P = tuple(zip(*(sp.symbols('p:%d[%s]'%(n+1,w), real=True) for w in '01')))
|
P = tuple(zip(*(sp.symbols("p:%d[%s]" % (n + 1, w), real=True) for w in "01")))
|
||||||
C = tuple(sp.symbols('c:%d'%(n+1), real=False))
|
C = tuple(sp.symbols("c:%d" % (n + 1), real=False))
|
||||||
|
|
||||||
# Cubic Bernstein basis functions
|
# Cubic Bernstein basis functions
|
||||||
BinomialCoefficient = [(1, 0)]
|
BinomialCoefficient = [(1, 0)]
|
||||||
@ -25,15 +25,20 @@ del last, this
|
|||||||
|
|
||||||
BernsteinPolynomial = tuple(
|
BernsteinPolynomial = tuple(
|
||||||
tuple(c * t**i * (1 - t) ** (n - i) for i, c in enumerate(coeffs))
|
tuple(c * t**i * (1 - t) ** (n - i) for i, c in enumerate(coeffs))
|
||||||
for n,coeffs in enumerate(BinomialCoefficient))
|
for n, coeffs in enumerate(BinomialCoefficient)
|
||||||
|
)
|
||||||
|
|
||||||
BezierCurve = tuple(
|
BezierCurve = tuple(
|
||||||
tuple(sum(P[i][j]*bernstein for i,bernstein in enumerate(bernsteins))
|
tuple(
|
||||||
for j in range(2))
|
sum(P[i][j] * bernstein for i, bernstein in enumerate(bernsteins))
|
||||||
for n,bernsteins in enumerate(BernsteinPolynomial))
|
for j in range(2)
|
||||||
|
)
|
||||||
|
for n, bernsteins in enumerate(BernsteinPolynomial)
|
||||||
|
)
|
||||||
BezierCurveC = tuple(
|
BezierCurveC = tuple(
|
||||||
sum(C[i] * bernstein for i, bernstein in enumerate(bernsteins))
|
sum(C[i] * bernstein for i, bernstein in enumerate(bernsteins))
|
||||||
for n,bernsteins in enumerate(BernsteinPolynomial))
|
for n, bernsteins in enumerate(BernsteinPolynomial)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def green(f, curveXY):
|
def green(f, curveXY):
|
||||||
@ -44,17 +49,17 @@ def green(f, curveXY):
|
|||||||
|
|
||||||
|
|
||||||
class _BezierFuncsLazy(dict):
|
class _BezierFuncsLazy(dict):
|
||||||
|
|
||||||
def __init__(self, symfunc):
|
def __init__(self, symfunc):
|
||||||
self._symfunc = symfunc
|
self._symfunc = symfunc
|
||||||
self._bezfuncs = {}
|
self._bezfuncs = {}
|
||||||
|
|
||||||
def __missing__(self, i):
|
def __missing__(self, i):
|
||||||
args = ['p%d'%d for d in range(i+1)]
|
args = ["p%d" % d for d in range(i + 1)]
|
||||||
f = green(self._symfunc, BezierCurve[i])
|
f = green(self._symfunc, BezierCurve[i])
|
||||||
f = sp.gcd_terms(f.collect(sum(P, ()))) # Optimize
|
f = sp.gcd_terms(f.collect(sum(P, ()))) # Optimize
|
||||||
return sp.lambdify(args, f)
|
return sp.lambdify(args, f)
|
||||||
|
|
||||||
|
|
||||||
class GreenPen(BasePen):
|
class GreenPen(BasePen):
|
||||||
|
|
||||||
_BezierFuncs = {}
|
_BezierFuncs = {}
|
||||||
@ -97,6 +102,7 @@ class GreenPen(BasePen):
|
|||||||
p0 = self._getCurrentPoint()
|
p0 = self._getCurrentPoint()
|
||||||
self.value += self._funcs[3](p0, p1, p2, p3)
|
self.value += self._funcs[3](p0, p1, p2, p3)
|
||||||
|
|
||||||
|
|
||||||
# Sample pens.
|
# Sample pens.
|
||||||
# Do not use this in real code.
|
# Do not use this in real code.
|
||||||
# Use fontTools.pens.momentsPen.MomentsPen instead.
|
# Use fontTools.pens.momentsPen.MomentsPen instead.
|
||||||
@ -114,7 +120,7 @@ def printGreenPen(penName, funcs, file=sys.stdout, docstring=None):
|
|||||||
print('"""%s"""' % docstring)
|
print('"""%s"""' % docstring)
|
||||||
|
|
||||||
print(
|
print(
|
||||||
'''from fontTools.pens.basePen import BasePen, OpenContourError
|
"""from fontTools.pens.basePen import BasePen, OpenContourError
|
||||||
try:
|
try:
|
||||||
import cython
|
import cython
|
||||||
except ImportError:
|
except ImportError:
|
||||||
@ -135,10 +141,14 @@ class %s(BasePen):
|
|||||||
|
|
||||||
def __init__(self, glyphset=None):
|
def __init__(self, glyphset=None):
|
||||||
BasePen.__init__(self, glyphset)
|
BasePen.__init__(self, glyphset)
|
||||||
'''% (penName, penName), file=file)
|
"""
|
||||||
|
% (penName, penName),
|
||||||
|
file=file,
|
||||||
|
)
|
||||||
for name, f in funcs:
|
for name, f in funcs:
|
||||||
print(' self.%s = 0' % name, file=file)
|
print(" self.%s = 0" % name, file=file)
|
||||||
print('''
|
print(
|
||||||
|
"""
|
||||||
def _moveTo(self, p0):
|
def _moveTo(self, p0):
|
||||||
self.__startPoint = p0
|
self.__startPoint = p0
|
||||||
|
|
||||||
@ -154,32 +164,40 @@ class %s(BasePen):
|
|||||||
raise OpenContourError(
|
raise OpenContourError(
|
||||||
"Green theorem is not defined on open contours."
|
"Green theorem is not defined on open contours."
|
||||||
)
|
)
|
||||||
''', end='', file=file)
|
""",
|
||||||
|
end="",
|
||||||
|
file=file,
|
||||||
|
)
|
||||||
|
|
||||||
for n in (1, 2, 3):
|
for n in (1, 2, 3):
|
||||||
|
|
||||||
|
|
||||||
subs = {P[i][j]: [X, Y][j][i] for i in range(n + 1) for j in range(2)}
|
subs = {P[i][j]: [X, Y][j][i] for i in range(n + 1) for j in range(2)}
|
||||||
greens = [green(f, BezierCurve[n]) for name, f in funcs]
|
greens = [green(f, BezierCurve[n]) for name, f in funcs]
|
||||||
greens = [sp.gcd_terms(f.collect(sum(P, ()))) for f in greens] # Optimize
|
greens = [sp.gcd_terms(f.collect(sum(P, ()))) for f in greens] # Optimize
|
||||||
greens = [f.subs(subs) for f in greens] # Convert to p to x/y
|
greens = [f.subs(subs) for f in greens] # Convert to p to x/y
|
||||||
defs, exprs = sp.cse(greens,
|
defs, exprs = sp.cse(
|
||||||
optimizations='basic',
|
greens,
|
||||||
symbols=(sp.Symbol('r%d'%i) for i in count()))
|
optimizations="basic",
|
||||||
|
symbols=(sp.Symbol("r%d" % i) for i in count()),
|
||||||
|
)
|
||||||
|
|
||||||
print()
|
print()
|
||||||
for name, value in defs:
|
for name, value in defs:
|
||||||
print(' @cython.locals(%s=cython.double)' % name, file=file)
|
print(" @cython.locals(%s=cython.double)" % name, file=file)
|
||||||
if n == 1:
|
if n == 1:
|
||||||
print('''\
|
print(
|
||||||
|
"""\
|
||||||
@cython.locals(x0=cython.double, y0=cython.double)
|
@cython.locals(x0=cython.double, y0=cython.double)
|
||||||
@cython.locals(x1=cython.double, y1=cython.double)
|
@cython.locals(x1=cython.double, y1=cython.double)
|
||||||
def _lineTo(self, p1):
|
def _lineTo(self, p1):
|
||||||
x0,y0 = self._getCurrentPoint()
|
x0,y0 = self._getCurrentPoint()
|
||||||
x1,y1 = p1
|
x1,y1 = p1
|
||||||
''', file=file)
|
""",
|
||||||
|
file=file,
|
||||||
|
)
|
||||||
elif n == 2:
|
elif n == 2:
|
||||||
print('''\
|
print(
|
||||||
|
"""\
|
||||||
@cython.locals(x0=cython.double, y0=cython.double)
|
@cython.locals(x0=cython.double, y0=cython.double)
|
||||||
@cython.locals(x1=cython.double, y1=cython.double)
|
@cython.locals(x1=cython.double, y1=cython.double)
|
||||||
@cython.locals(x2=cython.double, y2=cython.double)
|
@cython.locals(x2=cython.double, y2=cython.double)
|
||||||
@ -187,9 +205,12 @@ class %s(BasePen):
|
|||||||
x0,y0 = self._getCurrentPoint()
|
x0,y0 = self._getCurrentPoint()
|
||||||
x1,y1 = p1
|
x1,y1 = p1
|
||||||
x2,y2 = p2
|
x2,y2 = p2
|
||||||
''', file=file)
|
""",
|
||||||
|
file=file,
|
||||||
|
)
|
||||||
elif n == 3:
|
elif n == 3:
|
||||||
print('''\
|
print(
|
||||||
|
"""\
|
||||||
@cython.locals(x0=cython.double, y0=cython.double)
|
@cython.locals(x0=cython.double, y0=cython.double)
|
||||||
@cython.locals(x1=cython.double, y1=cython.double)
|
@cython.locals(x1=cython.double, y1=cython.double)
|
||||||
@cython.locals(x2=cython.double, y2=cython.double)
|
@cython.locals(x2=cython.double, y2=cython.double)
|
||||||
@ -199,24 +220,30 @@ class %s(BasePen):
|
|||||||
x1,y1 = p1
|
x1,y1 = p1
|
||||||
x2,y2 = p2
|
x2,y2 = p2
|
||||||
x3,y3 = p3
|
x3,y3 = p3
|
||||||
''', file=file)
|
""",
|
||||||
|
file=file,
|
||||||
|
)
|
||||||
for name, value in defs:
|
for name, value in defs:
|
||||||
print(' %s = %s' % (name, value), file=file)
|
print(" %s = %s" % (name, value), file=file)
|
||||||
|
|
||||||
print(file=file)
|
print(file=file)
|
||||||
for name, value in zip([f[0] for f in funcs], exprs):
|
for name, value in zip([f[0] for f in funcs], exprs):
|
||||||
print(' self.%s += %s' % (name, value), file=file)
|
print(" self.%s += %s" % (name, value), file=file)
|
||||||
|
|
||||||
print('''
|
print(
|
||||||
|
"""
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
from fontTools.misc.symfont import x, y, printGreenPen
|
from fontTools.misc.symfont import x, y, printGreenPen
|
||||||
printGreenPen('%s', ['''%penName, file=file)
|
printGreenPen('%s', ["""
|
||||||
|
% penName,
|
||||||
|
file=file,
|
||||||
|
)
|
||||||
for name, f in funcs:
|
for name, f in funcs:
|
||||||
print(" ('%s', %s)," % (name, str(f)), file=file)
|
print(" ('%s', %s)," % (name, str(f)), file=file)
|
||||||
print(' ])', file=file)
|
print(" ])", file=file)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == "__main__":
|
||||||
pen = AreaPen()
|
pen = AreaPen()
|
||||||
pen.moveTo((100, 100))
|
pen.moveTo((100, 100))
|
||||||
pen.lineTo((100, 200))
|
pen.lineTo((100, 200))
|
||||||
|
@ -29,12 +29,14 @@ def parseXML(xmlSnippet):
|
|||||||
if isinstance(xmlSnippet, bytes):
|
if isinstance(xmlSnippet, bytes):
|
||||||
xml += xmlSnippet
|
xml += xmlSnippet
|
||||||
elif isinstance(xmlSnippet, str):
|
elif isinstance(xmlSnippet, str):
|
||||||
xml += tobytes(xmlSnippet, 'utf-8')
|
xml += tobytes(xmlSnippet, "utf-8")
|
||||||
elif isinstance(xmlSnippet, Iterable):
|
elif isinstance(xmlSnippet, Iterable):
|
||||||
xml += b"".join(tobytes(s, 'utf-8') for s in xmlSnippet)
|
xml += b"".join(tobytes(s, "utf-8") for s in xmlSnippet)
|
||||||
else:
|
else:
|
||||||
raise TypeError("expected string or sequence of strings; found %r"
|
raise TypeError(
|
||||||
% type(xmlSnippet).__name__)
|
"expected string or sequence of strings; found %r"
|
||||||
|
% type(xmlSnippet).__name__
|
||||||
|
)
|
||||||
xml += b"</root>"
|
xml += b"</root>"
|
||||||
reader.parser.Parse(xml, 0)
|
reader.parser.Parse(xml, 0)
|
||||||
return reader.root[2]
|
return reader.root[2]
|
||||||
@ -76,6 +78,7 @@ class FakeFont:
|
|||||||
return self.glyphOrder_[glyphID]
|
return self.glyphOrder_[glyphID]
|
||||||
else:
|
else:
|
||||||
return "glyph%.5d" % glyphID
|
return "glyph%.5d" % glyphID
|
||||||
|
|
||||||
def getGlyphNameMany(self, lst):
|
def getGlyphNameMany(self, lst):
|
||||||
return [self.getGlyphName(gid) for gid in lst]
|
return [self.getGlyphName(gid) for gid in lst]
|
||||||
|
|
||||||
@ -92,6 +95,7 @@ class FakeFont:
|
|||||||
class TestXMLReader_(object):
|
class TestXMLReader_(object):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
from xml.parsers.expat import ParserCreate
|
from xml.parsers.expat import ParserCreate
|
||||||
|
|
||||||
self.parser = ParserCreate()
|
self.parser = ParserCreate()
|
||||||
self.parser.StartElementHandler = self.startElement_
|
self.parser.StartElementHandler = self.startElement_
|
||||||
self.parser.EndElementHandler = self.endElement_
|
self.parser.EndElementHandler = self.endElement_
|
||||||
@ -114,7 +118,7 @@ class TestXMLReader_(object):
|
|||||||
self.stack[-1][2].append(data)
|
self.stack[-1][2].append(data)
|
||||||
|
|
||||||
|
|
||||||
def makeXMLWriter(newlinestr='\n'):
|
def makeXMLWriter(newlinestr="\n"):
|
||||||
# don't write OS-specific new lines
|
# don't write OS-specific new lines
|
||||||
writer = XMLWriter(BytesIO(), newlinestr=newlinestr)
|
writer = XMLWriter(BytesIO(), newlinestr=newlinestr)
|
||||||
# erase XML declaration
|
# erase XML declaration
|
||||||
@ -166,7 +170,7 @@ class MockFont(object):
|
|||||||
to its glyphOrder."""
|
to its glyphOrder."""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self._glyphOrder = ['.notdef']
|
self._glyphOrder = [".notdef"]
|
||||||
|
|
||||||
class AllocatingDict(dict):
|
class AllocatingDict(dict):
|
||||||
def __missing__(reverseDict, key):
|
def __missing__(reverseDict, key):
|
||||||
@ -174,7 +178,8 @@ class MockFont(object):
|
|||||||
gid = len(reverseDict)
|
gid = len(reverseDict)
|
||||||
reverseDict[key] = gid
|
reverseDict[key] = gid
|
||||||
return gid
|
return gid
|
||||||
self._reverseGlyphOrder = AllocatingDict({'.notdef': 0})
|
|
||||||
|
self._reverseGlyphOrder = AllocatingDict({".notdef": 0})
|
||||||
self.lazy = False
|
self.lazy = False
|
||||||
|
|
||||||
def getGlyphID(self, glyph):
|
def getGlyphID(self, glyph):
|
||||||
@ -192,7 +197,6 @@ class MockFont(object):
|
|||||||
|
|
||||||
|
|
||||||
class TestCase(_TestCase):
|
class TestCase(_TestCase):
|
||||||
|
|
||||||
def __init__(self, methodName):
|
def __init__(self, methodName):
|
||||||
_TestCase.__init__(self, methodName)
|
_TestCase.__init__(self, methodName)
|
||||||
# Python 3 renamed assertRaisesRegexp to assertRaisesRegex,
|
# Python 3 renamed assertRaisesRegexp to assertRaisesRegex,
|
||||||
@ -202,7 +206,6 @@ class TestCase(_TestCase):
|
|||||||
|
|
||||||
|
|
||||||
class DataFilesHandler(TestCase):
|
class DataFilesHandler(TestCase):
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
self.tempdir = None
|
self.tempdir = None
|
||||||
self.num_tempfiles = 0
|
self.num_tempfiles = 0
|
||||||
|
@ -51,7 +51,7 @@ def deHexStr(hexdata):
|
|||||||
def hexStr(data):
|
def hexStr(data):
|
||||||
"""Convert binary data to a hex string."""
|
"""Convert binary data to a hex string."""
|
||||||
h = string.hexdigits
|
h = string.hexdigits
|
||||||
r = ''
|
r = ""
|
||||||
for c in data:
|
for c in data:
|
||||||
i = byteord(c)
|
i = byteord(c)
|
||||||
r = r + h[(i >> 4) & 0xF] + h[i & 0xF]
|
r = r + h[(i >> 4) & 0xF] + h[i & 0xF]
|
||||||
@ -74,7 +74,7 @@ def num2binary(l, bits=32):
|
|||||||
items.append(binary)
|
items.append(binary)
|
||||||
items.reverse()
|
items.reverse()
|
||||||
assert l in (0, -1), "number doesn't fit in number of bits"
|
assert l in (0, -1), "number doesn't fit in number of bits"
|
||||||
return ' '.join(items)
|
return " ".join(items)
|
||||||
|
|
||||||
|
|
||||||
def binary2num(bin):
|
def binary2num(bin):
|
||||||
@ -151,4 +151,5 @@ def bytesjoin(iterable, joiner=b""):
|
|||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
import doctest, sys
|
import doctest, sys
|
||||||
|
|
||||||
sys.exit(doctest.testmod().failed)
|
sys.exit(doctest.testmod().failed)
|
||||||
|
@ -10,8 +10,21 @@ import calendar
|
|||||||
epoch_diff = calendar.timegm((1904, 1, 1, 0, 0, 0, 0, 0, 0))
|
epoch_diff = calendar.timegm((1904, 1, 1, 0, 0, 0, 0, 0, 0))
|
||||||
|
|
||||||
DAYNAMES = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
|
DAYNAMES = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
|
||||||
MONTHNAMES = [None, "Jan", "Feb", "Mar", "Apr", "May", "Jun",
|
MONTHNAMES = [
|
||||||
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
|
None,
|
||||||
|
"Jan",
|
||||||
|
"Feb",
|
||||||
|
"Mar",
|
||||||
|
"Apr",
|
||||||
|
"May",
|
||||||
|
"Jun",
|
||||||
|
"Jul",
|
||||||
|
"Aug",
|
||||||
|
"Sep",
|
||||||
|
"Oct",
|
||||||
|
"Nov",
|
||||||
|
"Dec",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
def asctime(t=None):
|
def asctime(t=None):
|
||||||
@ -35,22 +48,27 @@ def asctime(t=None):
|
|||||||
if t is None:
|
if t is None:
|
||||||
t = time.localtime()
|
t = time.localtime()
|
||||||
s = "%s %s %2s %s" % (
|
s = "%s %s %2s %s" % (
|
||||||
DAYNAMES[t.tm_wday], MONTHNAMES[t.tm_mon], t.tm_mday,
|
DAYNAMES[t.tm_wday],
|
||||||
time.strftime("%H:%M:%S %Y", t))
|
MONTHNAMES[t.tm_mon],
|
||||||
|
t.tm_mday,
|
||||||
|
time.strftime("%H:%M:%S %Y", t),
|
||||||
|
)
|
||||||
return s
|
return s
|
||||||
|
|
||||||
|
|
||||||
def timestampToString(value):
|
def timestampToString(value):
|
||||||
return asctime(time.gmtime(max(0, value + epoch_diff)))
|
return asctime(time.gmtime(max(0, value + epoch_diff)))
|
||||||
|
|
||||||
|
|
||||||
def timestampFromString(value):
|
def timestampFromString(value):
|
||||||
wkday, mnth = value[:7].split()
|
wkday, mnth = value[:7].split()
|
||||||
t = datetime.strptime(value[7:], ' %d %H:%M:%S %Y')
|
t = datetime.strptime(value[7:], " %d %H:%M:%S %Y")
|
||||||
t = t.replace(month=MONTHNAMES.index(mnth), tzinfo=timezone.utc)
|
t = t.replace(month=MONTHNAMES.index(mnth), tzinfo=timezone.utc)
|
||||||
wkday_idx = DAYNAMES.index(wkday)
|
wkday_idx = DAYNAMES.index(wkday)
|
||||||
assert t.weekday() == wkday_idx, '"' + value + '" has inconsistent weekday'
|
assert t.weekday() == wkday_idx, '"' + value + '" has inconsistent weekday'
|
||||||
return int(t.timestamp()) - epoch_diff
|
return int(t.timestamp()) - epoch_diff
|
||||||
|
|
||||||
|
|
||||||
def timestampNow():
|
def timestampNow():
|
||||||
# https://reproducible-builds.org/specs/source-date-epoch/
|
# https://reproducible-builds.org/specs/source-date-epoch/
|
||||||
source_date_epoch = os.environ.get("SOURCE_DATE_EPOCH")
|
source_date_epoch = os.environ.get("SOURCE_DATE_EPOCH")
|
||||||
@ -58,6 +76,7 @@ def timestampNow():
|
|||||||
return int(source_date_epoch) - epoch_diff
|
return int(source_date_epoch) - epoch_diff
|
||||||
return int(time.time() - epoch_diff)
|
return int(time.time() - epoch_diff)
|
||||||
|
|
||||||
|
|
||||||
def timestampSinceEpoch(value):
|
def timestampSinceEpoch(value):
|
||||||
return int(value - epoch_diff)
|
return int(value - epoch_diff)
|
||||||
|
|
||||||
@ -65,4 +84,5 @@ def timestampSinceEpoch(value):
|
|||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
import sys
|
import sys
|
||||||
import doctest
|
import doctest
|
||||||
|
|
||||||
sys.exit(doctest.testmod().failed)
|
sys.exit(doctest.testmod().failed)
|
||||||
|
@ -248,6 +248,7 @@ class Transform(NamedTuple):
|
|||||||
>>>
|
>>>
|
||||||
"""
|
"""
|
||||||
import math
|
import math
|
||||||
|
|
||||||
c = _normSinCos(math.cos(angle))
|
c = _normSinCos(math.cos(angle))
|
||||||
s = _normSinCos(math.sin(angle))
|
s = _normSinCos(math.sin(angle))
|
||||||
return self.transform((c, s, -s, c, 0, 0))
|
return self.transform((c, s, -s, c, 0, 0))
|
||||||
@ -263,6 +264,7 @@ class Transform(NamedTuple):
|
|||||||
>>>
|
>>>
|
||||||
"""
|
"""
|
||||||
import math
|
import math
|
||||||
|
|
||||||
return self.transform((1, math.tan(y), math.tan(x), 1, 0, 0))
|
return self.transform((1, math.tan(y), math.tan(x), 1, 0, 0))
|
||||||
|
|
||||||
def transform(self, other):
|
def transform(self, other):
|
||||||
@ -283,7 +285,8 @@ class Transform(NamedTuple):
|
|||||||
yx1 * xx2 + yy1 * yx2,
|
yx1 * xx2 + yy1 * yx2,
|
||||||
yx1 * xy2 + yy1 * yy2,
|
yx1 * xy2 + yy1 * yy2,
|
||||||
xx2 * dx1 + yx2 * dy1 + dx2,
|
xx2 * dx1 + yx2 * dy1 + dx2,
|
||||||
xy2*dx1 + yy2*dy1 + dy2)
|
xy2 * dx1 + yy2 * dy1 + dy2,
|
||||||
|
)
|
||||||
|
|
||||||
def reverseTransform(self, other):
|
def reverseTransform(self, other):
|
||||||
"""Return a new transformation, which is the other transformation
|
"""Return a new transformation, which is the other transformation
|
||||||
@ -306,7 +309,8 @@ class Transform(NamedTuple):
|
|||||||
yx1 * xx2 + yy1 * yx2,
|
yx1 * xx2 + yy1 * yx2,
|
||||||
yx1 * xy2 + yy1 * yy2,
|
yx1 * xy2 + yy1 * yy2,
|
||||||
xx2 * dx1 + yx2 * dy1 + dx2,
|
xx2 * dx1 + yx2 * dy1 + dx2,
|
||||||
xy2*dx1 + yy2*dy1 + dy2)
|
xy2 * dx1 + yy2 * dy1 + dy2,
|
||||||
|
)
|
||||||
|
|
||||||
def inverse(self):
|
def inverse(self):
|
||||||
"""Return the inverse transformation.
|
"""Return the inverse transformation.
|
||||||
@ -368,6 +372,7 @@ class Transform(NamedTuple):
|
|||||||
|
|
||||||
Identity = Transform()
|
Identity = Transform()
|
||||||
|
|
||||||
|
|
||||||
def Offset(x=0, y=0):
|
def Offset(x=0, y=0):
|
||||||
"""Return the identity transformation offset by x, y.
|
"""Return the identity transformation offset by x, y.
|
||||||
|
|
||||||
@ -378,6 +383,7 @@ def Offset(x=0, y=0):
|
|||||||
"""
|
"""
|
||||||
return Transform(1, 0, 0, 1, x, y)
|
return Transform(1, 0, 0, 1, x, y)
|
||||||
|
|
||||||
|
|
||||||
def Scale(x, y=None):
|
def Scale(x, y=None):
|
||||||
"""Return the identity transformation scaled by x, y. The 'y' argument
|
"""Return the identity transformation scaled by x, y. The 'y' argument
|
||||||
may be None, which implies to use the x value for y as well.
|
may be None, which implies to use the x value for y as well.
|
||||||
@ -395,4 +401,5 @@ def Scale(x, y=None):
|
|||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
import sys
|
import sys
|
||||||
import doctest
|
import doctest
|
||||||
|
|
||||||
sys.exit(doctest.testmod().failed)
|
sys.exit(doctest.testmod().failed)
|
||||||
|
@ -8,15 +8,19 @@ import logging
|
|||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
class TTXParseError(Exception): pass
|
|
||||||
|
class TTXParseError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
BUFSIZE = 0x4000
|
BUFSIZE = 0x4000
|
||||||
|
|
||||||
|
|
||||||
class XMLReader(object):
|
class XMLReader(object):
|
||||||
|
def __init__(
|
||||||
def __init__(self, fileOrPath, ttFont, progress=None, quiet=None, contentOnly=False):
|
self, fileOrPath, ttFont, progress=None, quiet=None, contentOnly=False
|
||||||
if fileOrPath == '-':
|
):
|
||||||
|
if fileOrPath == "-":
|
||||||
fileOrPath = sys.stdin
|
fileOrPath = sys.stdin
|
||||||
if not hasattr(fileOrPath, "read"):
|
if not hasattr(fileOrPath, "read"):
|
||||||
self.file = open(fileOrPath, "rb")
|
self.file = open(fileOrPath, "rb")
|
||||||
@ -29,6 +33,7 @@ class XMLReader(object):
|
|||||||
self.progress = progress
|
self.progress = progress
|
||||||
if quiet is not None:
|
if quiet is not None:
|
||||||
from fontTools.misc.loggingTools import deprecateArgument
|
from fontTools.misc.loggingTools import deprecateArgument
|
||||||
|
|
||||||
deprecateArgument("quiet", "configure logging instead")
|
deprecateArgument("quiet", "configure logging instead")
|
||||||
self.quiet = quiet
|
self.quiet = quiet
|
||||||
self.root = None
|
self.root = None
|
||||||
@ -55,6 +60,7 @@ class XMLReader(object):
|
|||||||
|
|
||||||
def _parseFile(self, file):
|
def _parseFile(self, file):
|
||||||
from xml.parsers.expat import ParserCreate
|
from xml.parsers.expat import ParserCreate
|
||||||
|
|
||||||
parser = ParserCreate()
|
parser = ParserCreate()
|
||||||
parser.StartElementHandler = self._startElementHandler
|
parser.StartElementHandler = self._startElementHandler
|
||||||
parser.EndElementHandler = self._endElementHandler
|
parser.EndElementHandler = self._endElementHandler
|
||||||
@ -83,7 +89,7 @@ class XMLReader(object):
|
|||||||
self.stackSize = stackSize + 1
|
self.stackSize = stackSize + 1
|
||||||
subFile = attrs.get("src")
|
subFile = attrs.get("src")
|
||||||
if subFile is not None:
|
if subFile is not None:
|
||||||
if hasattr(self.file, 'name'):
|
if hasattr(self.file, "name"):
|
||||||
# if file has a name, get its parent directory
|
# if file has a name, get its parent directory
|
||||||
dirname = os.path.dirname(self.file.name)
|
dirname = os.path.dirname(self.file.name)
|
||||||
else:
|
else:
|
||||||
@ -113,13 +119,13 @@ class XMLReader(object):
|
|||||||
log.info(msg)
|
log.info(msg)
|
||||||
if tag == "GlyphOrder":
|
if tag == "GlyphOrder":
|
||||||
tableClass = ttLib.GlyphOrder
|
tableClass = ttLib.GlyphOrder
|
||||||
elif "ERROR" in attrs or ('raw' in attrs and safeEval(attrs['raw'])):
|
elif "ERROR" in attrs or ("raw" in attrs and safeEval(attrs["raw"])):
|
||||||
tableClass = DefaultTable
|
tableClass = DefaultTable
|
||||||
else:
|
else:
|
||||||
tableClass = ttLib.getTableClass(tag)
|
tableClass = ttLib.getTableClass(tag)
|
||||||
if tableClass is None:
|
if tableClass is None:
|
||||||
tableClass = DefaultTable
|
tableClass = DefaultTable
|
||||||
if tag == 'loca' and tag in self.ttFont:
|
if tag == "loca" and tag in self.ttFont:
|
||||||
# Special-case the 'loca' table as we need the
|
# Special-case the 'loca' table as we need the
|
||||||
# original if the 'glyf' table isn't recompiled.
|
# original if the 'glyf' table isn't recompiled.
|
||||||
self.currentTable = self.ttFont[tag]
|
self.currentTable = self.ttFont[tag]
|
||||||
@ -157,7 +163,6 @@ class XMLReader(object):
|
|||||||
|
|
||||||
|
|
||||||
class ProgressPrinter(object):
|
class ProgressPrinter(object):
|
||||||
|
|
||||||
def __init__(self, title, maxval=100):
|
def __init__(self, title, maxval=100):
|
||||||
print(title)
|
print(title)
|
||||||
|
|
||||||
|
@ -9,12 +9,17 @@ INDENT = " "
|
|||||||
|
|
||||||
|
|
||||||
class XMLWriter(object):
|
class XMLWriter(object):
|
||||||
|
def __init__(
|
||||||
def __init__(self, fileOrPath, indentwhite=INDENT, idlefunc=None, encoding="utf_8",
|
self,
|
||||||
newlinestr="\n"):
|
fileOrPath,
|
||||||
if encoding.lower().replace('-','').replace('_','') != 'utf8':
|
indentwhite=INDENT,
|
||||||
raise Exception('Only UTF-8 encoding is supported.')
|
idlefunc=None,
|
||||||
if fileOrPath == '-':
|
encoding="utf_8",
|
||||||
|
newlinestr="\n",
|
||||||
|
):
|
||||||
|
if encoding.lower().replace("-", "").replace("_", "") != "utf8":
|
||||||
|
raise Exception("Only UTF-8 encoding is supported.")
|
||||||
|
if fileOrPath == "-":
|
||||||
fileOrPath = sys.stdout
|
fileOrPath = sys.stdout
|
||||||
if not hasattr(fileOrPath, "write"):
|
if not hasattr(fileOrPath, "write"):
|
||||||
self.filename = fileOrPath
|
self.filename = fileOrPath
|
||||||
@ -30,11 +35,11 @@ class XMLWriter(object):
|
|||||||
try:
|
try:
|
||||||
# The bytes check should be first. See:
|
# The bytes check should be first. See:
|
||||||
# https://github.com/fonttools/fonttools/pull/233
|
# https://github.com/fonttools/fonttools/pull/233
|
||||||
self.file.write(b'')
|
self.file.write(b"")
|
||||||
self.totype = tobytes
|
self.totype = tobytes
|
||||||
except TypeError:
|
except TypeError:
|
||||||
# This better not fail.
|
# This better not fail.
|
||||||
self.file.write('')
|
self.file.write("")
|
||||||
self.totype = tostr
|
self.totype = tostr
|
||||||
self.indentwhite = self.totype(indentwhite)
|
self.indentwhite = self.totype(indentwhite)
|
||||||
if newlinestr is None:
|
if newlinestr is None:
|
||||||
@ -84,7 +89,7 @@ class XMLWriter(object):
|
|||||||
self.file.write(self.indentlevel * self.indentwhite)
|
self.file.write(self.indentlevel * self.indentwhite)
|
||||||
self.needindent = 0
|
self.needindent = 0
|
||||||
s = self.totype(data, encoding="utf_8")
|
s = self.totype(data, encoding="utf_8")
|
||||||
if (strip):
|
if strip:
|
||||||
s = s.strip()
|
s = s.strip()
|
||||||
self.file.write(s)
|
self.file.write(s)
|
||||||
|
|
||||||
@ -163,31 +168,36 @@ class XMLWriter(object):
|
|||||||
|
|
||||||
|
|
||||||
def escape(data):
|
def escape(data):
|
||||||
data = tostr(data, 'utf_8')
|
data = tostr(data, "utf_8")
|
||||||
data = data.replace("&", "&")
|
data = data.replace("&", "&")
|
||||||
data = data.replace("<", "<")
|
data = data.replace("<", "<")
|
||||||
data = data.replace(">", ">")
|
data = data.replace(">", ">")
|
||||||
data = data.replace("\r", " ")
|
data = data.replace("\r", " ")
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
|
||||||
def escapeattr(data):
|
def escapeattr(data):
|
||||||
data = escape(data)
|
data = escape(data)
|
||||||
data = data.replace('"', """)
|
data = data.replace('"', """)
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
|
||||||
def escape8bit(data):
|
def escape8bit(data):
|
||||||
"""Input is Unicode string."""
|
"""Input is Unicode string."""
|
||||||
|
|
||||||
def escapechar(c):
|
def escapechar(c):
|
||||||
n = ord(c)
|
n = ord(c)
|
||||||
if 32 <= n <= 127 and c not in "<&>":
|
if 32 <= n <= 127 and c not in "<&>":
|
||||||
return c
|
return c
|
||||||
else:
|
else:
|
||||||
return "&#" + repr(n) + ";"
|
return "&#" + repr(n) + ";"
|
||||||
return strjoin(map(escapechar, data.decode('latin-1')))
|
|
||||||
|
return strjoin(map(escapechar, data.decode("latin-1")))
|
||||||
|
|
||||||
|
|
||||||
def hexStr(s):
|
def hexStr(s):
|
||||||
h = string.hexdigits
|
h = string.hexdigits
|
||||||
r = ''
|
r = ""
|
||||||
for c in s:
|
for c in s:
|
||||||
i = byteord(c)
|
i = byteord(c)
|
||||||
r = r + h[(i >> 4) & 0xF] + h[i & 0xF]
|
r = r + h[(i >> 4) & 0xF] + h[i & 0xF]
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -1,5 +1,5 @@
|
|||||||
import sys
|
import sys
|
||||||
from fontTools.mtiLib import main
|
from fontTools.mtiLib import main
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == "__main__":
|
||||||
sys.exit(main())
|
sys.exit(main())
|
||||||
|
@ -2,5 +2,5 @@ import sys
|
|||||||
from fontTools.otlLib.optimize import main
|
from fontTools.otlLib.optimize import main
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == "__main__":
|
||||||
sys.exit(main())
|
sys.exit(main())
|
||||||
|
@ -7,7 +7,6 @@ __all__ = ["AreaPen"]
|
|||||||
|
|
||||||
|
|
||||||
class AreaPen(BasePen):
|
class AreaPen(BasePen):
|
||||||
|
|
||||||
def __init__(self, glyphset=None):
|
def __init__(self, glyphset=None):
|
||||||
BasePen.__init__(self, glyphset)
|
BasePen.__init__(self, glyphset)
|
||||||
self.value = 0
|
self.value = 0
|
||||||
@ -18,7 +17,7 @@ class AreaPen(BasePen):
|
|||||||
def _lineTo(self, p1):
|
def _lineTo(self, p1):
|
||||||
x0, y0 = self._p0
|
x0, y0 = self._p0
|
||||||
x1, y1 = p1
|
x1, y1 = p1
|
||||||
self.value -= (x1 - x0) * (y1 + y0) * .5
|
self.value -= (x1 - x0) * (y1 + y0) * 0.5
|
||||||
self._p0 = p1
|
self._p0 = p1
|
||||||
|
|
||||||
def _qCurveToOne(self, p1, p2):
|
def _qCurveToOne(self, p1, p2):
|
||||||
@ -38,11 +37,7 @@ class AreaPen(BasePen):
|
|||||||
x1, y1 = p1[0] - x0, p1[1] - y0
|
x1, y1 = p1[0] - x0, p1[1] - y0
|
||||||
x2, y2 = p2[0] - x0, p2[1] - y0
|
x2, y2 = p2[0] - x0, p2[1] - y0
|
||||||
x3, y3 = p3[0] - x0, p3[1] - y0
|
x3, y3 = p3[0] - x0, p3[1] - y0
|
||||||
self.value -= (
|
self.value -= (x1 * (-y2 - y3) + x2 * (y1 - 2 * y3) + x3 * (y1 + 2 * y2)) * 0.15
|
||||||
x1 * ( - y2 - y3) +
|
|
||||||
x2 * (y1 - 2*y3) +
|
|
||||||
x3 * (y1 + 2*y2 )
|
|
||||||
) * 0.15
|
|
||||||
self._lineTo(p3)
|
self._lineTo(p3)
|
||||||
self._p0 = p3
|
self._p0 = p3
|
||||||
|
|
||||||
|
@ -40,19 +40,25 @@ from typing import Tuple
|
|||||||
|
|
||||||
from fontTools.misc.loggingTools import LogMixin
|
from fontTools.misc.loggingTools import LogMixin
|
||||||
|
|
||||||
__all__ = ["AbstractPen", "NullPen", "BasePen", "PenError",
|
__all__ = [
|
||||||
"decomposeSuperBezierSegment", "decomposeQuadraticSegment"]
|
"AbstractPen",
|
||||||
|
"NullPen",
|
||||||
|
"BasePen",
|
||||||
|
"PenError",
|
||||||
|
"decomposeSuperBezierSegment",
|
||||||
|
"decomposeQuadraticSegment",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
class PenError(Exception):
|
class PenError(Exception):
|
||||||
"""Represents an error during penning."""
|
"""Represents an error during penning."""
|
||||||
|
|
||||||
|
|
||||||
class OpenContourError(PenError):
|
class OpenContourError(PenError):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class AbstractPen:
|
class AbstractPen:
|
||||||
|
|
||||||
def moveTo(self, pt: Tuple[float, float]) -> None:
|
def moveTo(self, pt: Tuple[float, float]) -> None:
|
||||||
"""Begin a new sub path, set the current point to 'pt'. You must
|
"""Begin a new sub path, set the current point to 'pt'. You must
|
||||||
end each sub path with a call to pen.closePath() or pen.endPath().
|
end each sub path with a call to pen.closePath() or pen.endPath().
|
||||||
@ -116,7 +122,7 @@ class AbstractPen:
|
|||||||
def addComponent(
|
def addComponent(
|
||||||
self,
|
self,
|
||||||
glyphName: str,
|
glyphName: str,
|
||||||
transformation: Tuple[float, float, float, float, float, float]
|
transformation: Tuple[float, float, float, float, float, float],
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Add a sub glyph. The 'transformation' argument must be a 6-tuple
|
"""Add a sub glyph. The 'transformation' argument must be a 6-tuple
|
||||||
containing an affine transformation, or a Transform object from the
|
containing an affine transformation, or a Transform object from the
|
||||||
@ -128,8 +134,7 @@ class AbstractPen:
|
|||||||
|
|
||||||
class NullPen(AbstractPen):
|
class NullPen(AbstractPen):
|
||||||
|
|
||||||
"""A pen that does nothing.
|
"""A pen that does nothing."""
|
||||||
"""
|
|
||||||
|
|
||||||
def moveTo(self, pt):
|
def moveTo(self, pt):
|
||||||
pass
|
pass
|
||||||
@ -154,8 +159,8 @@ class NullPen(AbstractPen):
|
|||||||
|
|
||||||
|
|
||||||
class LoggingPen(LogMixin, AbstractPen):
|
class LoggingPen(LogMixin, AbstractPen):
|
||||||
"""A pen with a ``log`` property (see fontTools.misc.loggingTools.LogMixin)
|
"""A pen with a ``log`` property (see fontTools.misc.loggingTools.LogMixin)"""
|
||||||
"""
|
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
@ -187,16 +192,15 @@ class DecomposingPen(LoggingPen):
|
|||||||
self.glyphSet = glyphSet
|
self.glyphSet = glyphSet
|
||||||
|
|
||||||
def addComponent(self, glyphName, transformation):
|
def addComponent(self, glyphName, transformation):
|
||||||
""" Transform the points of the base glyph and draw it onto self.
|
"""Transform the points of the base glyph and draw it onto self."""
|
||||||
"""
|
|
||||||
from fontTools.pens.transformPen import TransformPen
|
from fontTools.pens.transformPen import TransformPen
|
||||||
|
|
||||||
try:
|
try:
|
||||||
glyph = self.glyphSet[glyphName]
|
glyph = self.glyphSet[glyphName]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
if not self.skipMissingComponents:
|
if not self.skipMissingComponents:
|
||||||
raise MissingComponentError(glyphName)
|
raise MissingComponentError(glyphName)
|
||||||
self.log.warning(
|
self.log.warning("glyph '%s' is missing from glyphSet; skipped" % glyphName)
|
||||||
"glyph '%s' is missing from glyphSet; skipped" % glyphName)
|
|
||||||
else:
|
else:
|
||||||
tPen = TransformPen(self, transformation)
|
tPen = TransformPen(self, transformation)
|
||||||
glyph.draw(tPen)
|
glyph.draw(tPen)
|
||||||
@ -350,13 +354,14 @@ def decomposeSuperBezierSegment(points):
|
|||||||
factor = j / nDivisions
|
factor = j / nDivisions
|
||||||
temp1 = points[i - 1]
|
temp1 = points[i - 1]
|
||||||
temp2 = points[i - 2]
|
temp2 = points[i - 2]
|
||||||
temp = (temp2[0] + factor * (temp1[0] - temp2[0]),
|
temp = (
|
||||||
temp2[1] + factor * (temp1[1] - temp2[1]))
|
temp2[0] + factor * (temp1[0] - temp2[0]),
|
||||||
|
temp2[1] + factor * (temp1[1] - temp2[1]),
|
||||||
|
)
|
||||||
if pt2 is None:
|
if pt2 is None:
|
||||||
pt2 = temp
|
pt2 = temp
|
||||||
else:
|
else:
|
||||||
pt3 = (0.5 * (pt2[0] + temp[0]),
|
pt3 = (0.5 * (pt2[0] + temp[0]), 0.5 * (pt2[1] + temp[1]))
|
||||||
0.5 * (pt2[1] + temp[1]))
|
|
||||||
bezierSegments.append((pt1, pt2, pt3))
|
bezierSegments.append((pt1, pt2, pt3))
|
||||||
pt1, pt2, pt3 = temp, None, None
|
pt1, pt2, pt3 = temp, None, None
|
||||||
bezierSegments.append((pt1, points[-2], points[-1]))
|
bezierSegments.append((pt1, points[-2], points[-1]))
|
||||||
@ -387,13 +392,19 @@ def decomposeQuadraticSegment(points):
|
|||||||
|
|
||||||
class _TestPen(BasePen):
|
class _TestPen(BasePen):
|
||||||
"""Test class that prints PostScript to stdout."""
|
"""Test class that prints PostScript to stdout."""
|
||||||
|
|
||||||
def _moveTo(self, pt):
|
def _moveTo(self, pt):
|
||||||
print("%s %s moveto" % (pt[0], pt[1]))
|
print("%s %s moveto" % (pt[0], pt[1]))
|
||||||
|
|
||||||
def _lineTo(self, pt):
|
def _lineTo(self, pt):
|
||||||
print("%s %s lineto" % (pt[0], pt[1]))
|
print("%s %s lineto" % (pt[0], pt[1]))
|
||||||
|
|
||||||
def _curveToOne(self, bcp1, bcp2, pt):
|
def _curveToOne(self, bcp1, bcp2, pt):
|
||||||
print("%s %s %s %s %s %s curveto" % (bcp1[0], bcp1[1],
|
print(
|
||||||
bcp2[0], bcp2[1], pt[0], pt[1]))
|
"%s %s %s %s %s %s curveto"
|
||||||
|
% (bcp1[0], bcp1[1], bcp2[0], bcp2[1], pt[0], pt[1])
|
||||||
|
)
|
||||||
|
|
||||||
def _closePath(self):
|
def _closePath(self):
|
||||||
print("closepath")
|
print("closepath")
|
||||||
|
|
||||||
|
@ -84,8 +84,9 @@ class BoundsPen(ControlBoundsPen):
|
|||||||
bounds = self.bounds
|
bounds = self.bounds
|
||||||
bounds = updateBounds(bounds, pt)
|
bounds = updateBounds(bounds, pt)
|
||||||
if not pointInRect(bcp1, bounds) or not pointInRect(bcp2, bounds):
|
if not pointInRect(bcp1, bounds) or not pointInRect(bcp2, bounds):
|
||||||
bounds = unionRect(bounds, calcCubicBounds(
|
bounds = unionRect(
|
||||||
self._getCurrentPoint(), bcp1, bcp2, pt))
|
bounds, calcCubicBounds(self._getCurrentPoint(), bcp1, bcp2, pt)
|
||||||
|
)
|
||||||
self.bounds = bounds
|
self.bounds = bounds
|
||||||
|
|
||||||
def _qCurveToOne(self, bcp, pt):
|
def _qCurveToOne(self, bcp, pt):
|
||||||
@ -93,6 +94,7 @@ class BoundsPen(ControlBoundsPen):
|
|||||||
bounds = self.bounds
|
bounds = self.bounds
|
||||||
bounds = updateBounds(bounds, pt)
|
bounds = updateBounds(bounds, pt)
|
||||||
if not pointInRect(bcp, bounds):
|
if not pointInRect(bcp, bounds):
|
||||||
bounds = unionRect(bounds, calcQuadraticBounds(
|
bounds = unionRect(
|
||||||
self._getCurrentPoint(), bcp, pt))
|
bounds, calcQuadraticBounds(self._getCurrentPoint(), bcp, pt)
|
||||||
|
)
|
||||||
self.bounds = bounds
|
self.bounds = bounds
|
||||||
|
@ -5,11 +5,11 @@ __all__ = ["CocoaPen"]
|
|||||||
|
|
||||||
|
|
||||||
class CocoaPen(BasePen):
|
class CocoaPen(BasePen):
|
||||||
|
|
||||||
def __init__(self, glyphSet, path=None):
|
def __init__(self, glyphSet, path=None):
|
||||||
BasePen.__init__(self, glyphSet)
|
BasePen.__init__(self, glyphSet)
|
||||||
if path is None:
|
if path is None:
|
||||||
from AppKit import NSBezierPath
|
from AppKit import NSBezierPath
|
||||||
|
|
||||||
path = NSBezierPath.bezierPath()
|
path = NSBezierPath.bezierPath()
|
||||||
self.path = path
|
self.path = path
|
||||||
|
|
||||||
|
@ -40,8 +40,14 @@ class Cu2QuPen(AbstractPen):
|
|||||||
but are handled separately as anchors.
|
but are handled separately as anchors.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, other_pen, max_err, reverse_direction=False,
|
def __init__(
|
||||||
stats=None, ignore_single_points=False):
|
self,
|
||||||
|
other_pen,
|
||||||
|
max_err,
|
||||||
|
reverse_direction=False,
|
||||||
|
stats=None,
|
||||||
|
ignore_single_points=False,
|
||||||
|
):
|
||||||
if reverse_direction:
|
if reverse_direction:
|
||||||
self.pen = ReverseContourPen(other_pen)
|
self.pen = ReverseContourPen(other_pen)
|
||||||
else:
|
else:
|
||||||
@ -50,9 +56,13 @@ class Cu2QuPen(AbstractPen):
|
|||||||
self.stats = stats
|
self.stats = stats
|
||||||
if ignore_single_points:
|
if ignore_single_points:
|
||||||
import warnings
|
import warnings
|
||||||
warnings.warn("ignore_single_points is deprecated and "
|
|
||||||
|
warnings.warn(
|
||||||
|
"ignore_single_points is deprecated and "
|
||||||
"will be removed in future versions",
|
"will be removed in future versions",
|
||||||
UserWarning, stacklevel=2)
|
UserWarning,
|
||||||
|
stacklevel=2,
|
||||||
|
)
|
||||||
self.ignore_single_points = ignore_single_points
|
self.ignore_single_points = ignore_single_points
|
||||||
self.start_pt = None
|
self.start_pt = None
|
||||||
self.current_pt = None
|
self.current_pt = None
|
||||||
@ -149,8 +159,7 @@ class Cu2QuPointPen(BasePointToSegmentPen):
|
|||||||
stats: a dictionary counting the point numbers of quadratic segments.
|
stats: a dictionary counting the point numbers of quadratic segments.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, other_point_pen, max_err, reverse_direction=False,
|
def __init__(self, other_point_pen, max_err, reverse_direction=False, stats=None):
|
||||||
stats=None):
|
|
||||||
BasePointToSegmentPen.__init__(self)
|
BasePointToSegmentPen.__init__(self)
|
||||||
if reverse_direction:
|
if reverse_direction:
|
||||||
self.pen = ReverseContourPointPen(other_point_pen)
|
self.pen = ReverseContourPointPen(other_point_pen)
|
||||||
@ -166,7 +175,7 @@ class Cu2QuPointPen(BasePointToSegmentPen):
|
|||||||
prev_points = segments[-1][1]
|
prev_points = segments[-1][1]
|
||||||
prev_on_curve = prev_points[-1][0]
|
prev_on_curve = prev_points[-1][0]
|
||||||
for segment_type, points in segments:
|
for segment_type, points in segments:
|
||||||
if segment_type == 'curve':
|
if segment_type == "curve":
|
||||||
for sub_points in self._split_super_bezier_segments(points):
|
for sub_points in self._split_super_bezier_segments(points):
|
||||||
on_curve, smooth, name, kwargs = sub_points[-1]
|
on_curve, smooth, name, kwargs = sub_points[-1]
|
||||||
bcp1, bcp2 = sub_points[0][0], sub_points[1][0]
|
bcp1, bcp2 = sub_points[0][0], sub_points[1][0]
|
||||||
@ -200,8 +209,9 @@ class Cu2QuPointPen(BasePointToSegmentPen):
|
|||||||
# a "super" bezier; decompose it
|
# a "super" bezier; decompose it
|
||||||
on_curve, smooth, name, kwargs = points[-1]
|
on_curve, smooth, name, kwargs = points[-1]
|
||||||
num_sub_segments = n - 1
|
num_sub_segments = n - 1
|
||||||
for i, sub_points in enumerate(decomposeSuperBezierSegment([
|
for i, sub_points in enumerate(
|
||||||
pt for pt, _, _, _ in points])):
|
decomposeSuperBezierSegment([pt for pt, _, _, _ in points])
|
||||||
|
):
|
||||||
new_segment = []
|
new_segment = []
|
||||||
for point in sub_points[:-1]:
|
for point in sub_points[:-1]:
|
||||||
new_segment.append((point, False, None, {}))
|
new_segment.append((point, False, None, {}))
|
||||||
@ -213,8 +223,7 @@ class Cu2QuPointPen(BasePointToSegmentPen):
|
|||||||
new_segment.append((sub_points[-1], True, None, {}))
|
new_segment.append((sub_points[-1], True, None, {}))
|
||||||
sub_segments.append(new_segment)
|
sub_segments.append(new_segment)
|
||||||
else:
|
else:
|
||||||
raise AssertionError(
|
raise AssertionError("expected 2 control points, found: %d" % n)
|
||||||
"expected 2 control points, found: %d" % n)
|
|
||||||
return sub_segments
|
return sub_segments
|
||||||
|
|
||||||
def _drawPoints(self, segments):
|
def _drawPoints(self, segments):
|
||||||
@ -223,13 +232,15 @@ class Cu2QuPointPen(BasePointToSegmentPen):
|
|||||||
last_offcurves = []
|
last_offcurves = []
|
||||||
for i, (segment_type, points) in enumerate(segments):
|
for i, (segment_type, points) in enumerate(segments):
|
||||||
if segment_type in ("move", "line"):
|
if segment_type in ("move", "line"):
|
||||||
assert len(points) == 1, (
|
assert len(points) == 1, "illegal line segment point count: %d" % len(
|
||||||
"illegal line segment point count: %d" % len(points))
|
points
|
||||||
|
)
|
||||||
pt, smooth, name, kwargs = points[0]
|
pt, smooth, name, kwargs = points[0]
|
||||||
pen.addPoint(pt, segment_type, smooth, name, **kwargs)
|
pen.addPoint(pt, segment_type, smooth, name, **kwargs)
|
||||||
elif segment_type == "qcurve":
|
elif segment_type == "qcurve":
|
||||||
assert len(points) >= 2, (
|
assert len(points) >= 2, "illegal qcurve segment point count: %d" % len(
|
||||||
"illegal qcurve segment point count: %d" % len(points))
|
points
|
||||||
|
)
|
||||||
offcurves = points[:-1]
|
offcurves = points[:-1]
|
||||||
if offcurves:
|
if offcurves:
|
||||||
if i == 0:
|
if i == 0:
|
||||||
@ -249,8 +260,7 @@ class Cu2QuPointPen(BasePointToSegmentPen):
|
|||||||
pen.addPoint(pt, segment_type, smooth, name, **kwargs)
|
pen.addPoint(pt, segment_type, smooth, name, **kwargs)
|
||||||
else:
|
else:
|
||||||
# 'curve' segments must have been converted to 'qcurve' by now
|
# 'curve' segments must have been converted to 'qcurve' by now
|
||||||
raise AssertionError(
|
raise AssertionError("unexpected segment type: %r" % segment_type)
|
||||||
"unexpected segment type: %r" % segment_type)
|
|
||||||
for (pt, smooth, name, kwargs) in last_offcurves:
|
for (pt, smooth, name, kwargs) in last_offcurves:
|
||||||
pen.addPoint(pt, None, smooth, name, **kwargs)
|
pen.addPoint(pt, None, smooth, name, **kwargs)
|
||||||
pen.endPath()
|
pen.endPath()
|
||||||
@ -260,7 +270,6 @@ class Cu2QuPointPen(BasePointToSegmentPen):
|
|||||||
self.pen.addComponent(baseGlyphName, transformation)
|
self.pen.addComponent(baseGlyphName, transformation)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class Cu2QuMultiPen:
|
class Cu2QuMultiPen:
|
||||||
"""A filter multi-pen to convert cubic bezier curves to quadratic b-splines
|
"""A filter multi-pen to convert cubic bezier curves to quadratic b-splines
|
||||||
in a interpolation-compatible manner, using the FontTools SegmentPen protocol.
|
in a interpolation-compatible manner, using the FontTools SegmentPen protocol.
|
||||||
@ -281,7 +290,10 @@ class Cu2QuMultiPen:
|
|||||||
|
|
||||||
def __init__(self, other_pens, max_err, reverse_direction=False):
|
def __init__(self, other_pens, max_err, reverse_direction=False):
|
||||||
if reverse_direction:
|
if reverse_direction:
|
||||||
other_pens = [ReverseContourPen(pen, outputImpliedClosingLine=True) for pen in other_pens]
|
other_pens = [
|
||||||
|
ReverseContourPen(pen, outputImpliedClosingLine=True)
|
||||||
|
for pen in other_pens
|
||||||
|
]
|
||||||
self.pens = other_pens
|
self.pens = other_pens
|
||||||
self.max_err = max_err
|
self.max_err = max_err
|
||||||
self.start_pts = None
|
self.start_pts = None
|
||||||
|
@ -4,7 +4,6 @@ from fontTools.pens.recordingPen import RecordingPen
|
|||||||
|
|
||||||
|
|
||||||
class _PassThruComponentsMixin(object):
|
class _PassThruComponentsMixin(object):
|
||||||
|
|
||||||
def addComponent(self, glyphName, transformation, **kwargs):
|
def addComponent(self, glyphName, transformation, **kwargs):
|
||||||
self._outPen.addComponent(glyphName, transformation, **kwargs)
|
self._outPen.addComponent(glyphName, transformation, **kwargs)
|
||||||
|
|
||||||
|
@ -65,9 +65,7 @@ class HashPointPen(AbstractPointPen):
|
|||||||
pt_type = segmentType[0]
|
pt_type = segmentType[0]
|
||||||
self.data.append(f"{pt_type}{pt[0]:g}{pt[1]:+g}")
|
self.data.append(f"{pt_type}{pt[0]:g}{pt[1]:+g}")
|
||||||
|
|
||||||
def addComponent(
|
def addComponent(self, baseGlyphName, transformation, identifier=None, **kwargs):
|
||||||
self, baseGlyphName, transformation, identifier=None, **kwargs
|
|
||||||
):
|
|
||||||
tr = "".join([f"{t:+}" for t in transformation])
|
tr = "".join([f"{t:+}" for t in transformation])
|
||||||
self.data.append("[")
|
self.data.append("[")
|
||||||
try:
|
try:
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
from fontTools.pens.basePen import BasePen, OpenContourError
|
from fontTools.pens.basePen import BasePen, OpenContourError
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import cython
|
import cython
|
||||||
except ImportError:
|
except ImportError:
|
||||||
@ -15,8 +16,8 @@ else:
|
|||||||
|
|
||||||
__all__ = ["MomentsPen"]
|
__all__ = ["MomentsPen"]
|
||||||
|
|
||||||
class MomentsPen(BasePen):
|
|
||||||
|
|
||||||
|
class MomentsPen(BasePen):
|
||||||
def __init__(self, glyphset=None):
|
def __init__(self, glyphset=None):
|
||||||
BasePen.__init__(self, glyphset)
|
BasePen.__init__(self, glyphset)
|
||||||
|
|
||||||
@ -39,9 +40,7 @@ class MomentsPen(BasePen):
|
|||||||
p0 = self._getCurrentPoint()
|
p0 = self._getCurrentPoint()
|
||||||
if p0 != self.__startPoint:
|
if p0 != self.__startPoint:
|
||||||
# Green theorem is not defined on open contours.
|
# Green theorem is not defined on open contours.
|
||||||
raise OpenContourError(
|
raise OpenContourError("Green theorem is not defined on open contours.")
|
||||||
"Green theorem is not defined on open contours."
|
|
||||||
)
|
|
||||||
|
|
||||||
@cython.locals(r0=cython.double)
|
@cython.locals(r0=cython.double)
|
||||||
@cython.locals(r1=cython.double)
|
@cython.locals(r1=cython.double)
|
||||||
@ -78,10 +77,30 @@ class MomentsPen(BasePen):
|
|||||||
|
|
||||||
self.area += -r0 / 2 - r1 / 2 + x0 * (y0 + y1) / 2
|
self.area += -r0 / 2 - r1 / 2 + x0 * (y0 + y1) / 2
|
||||||
self.momentX += -r2 * y0 / 6 - r3 / 3 - r5 * x1 / 6 + r6 * (r7 + y1) / 6
|
self.momentX += -r2 * y0 / 6 - r3 / 3 - r5 * x1 / 6 + r6 * (r7 + y1) / 6
|
||||||
self.momentY += -r0*y1/6 - r8*x1/6 - r9*x1/6 + x0*(r8 + r9 + y0*y1)/6
|
self.momentY += (
|
||||||
self.momentXX += -r10*y0/12 - r10*y1/4 - r2*r5/12 - r4*r6*x1/12 + x0**3*(3*y0 + y1)/12
|
-r0 * y1 / 6 - r8 * x1 / 6 - r9 * x1 / 6 + x0 * (r8 + r9 + y0 * y1) / 6
|
||||||
self.momentXY += -r2*r8/24 - r2*r9/8 - r3*r7/24 + r6*(r7*y1 + 3*r8 + r9)/24 - x0*x1*(r8 - r9)/12
|
)
|
||||||
self.momentYY += -r0*r9/12 - r1*r8/12 - r11*x1/12 - r12*x1/12 + x0*(r11 + r12 + r8*y1 + r9*y0)/12
|
self.momentXX += (
|
||||||
|
-r10 * y0 / 12
|
||||||
|
- r10 * y1 / 4
|
||||||
|
- r2 * r5 / 12
|
||||||
|
- r4 * r6 * x1 / 12
|
||||||
|
+ x0**3 * (3 * y0 + y1) / 12
|
||||||
|
)
|
||||||
|
self.momentXY += (
|
||||||
|
-r2 * r8 / 24
|
||||||
|
- r2 * r9 / 8
|
||||||
|
- r3 * r7 / 24
|
||||||
|
+ r6 * (r7 * y1 + 3 * r8 + r9) / 24
|
||||||
|
- x0 * x1 * (r8 - r9) / 12
|
||||||
|
)
|
||||||
|
self.momentYY += (
|
||||||
|
-r0 * r9 / 12
|
||||||
|
- r1 * r8 / 12
|
||||||
|
- r11 * x1 / 12
|
||||||
|
- r12 * x1 / 12
|
||||||
|
+ x0 * (r11 + r12 + r8 * y1 + r9 * y0) / 12
|
||||||
|
)
|
||||||
|
|
||||||
@cython.locals(r0=cython.double)
|
@cython.locals(r0=cython.double)
|
||||||
@cython.locals(r1=cython.double)
|
@cython.locals(r1=cython.double)
|
||||||
@ -200,12 +219,99 @@ class MomentsPen(BasePen):
|
|||||||
r52 = 10 * y1
|
r52 = 10 * y1
|
||||||
r53 = 12 * y1
|
r53 = 12 * y1
|
||||||
|
|
||||||
self.area += -r1/6 - r3/6 + x0*(r0 + r5 + y2)/6 + x1*y2/3 - y0*(r4 + x2)/6
|
self.area += (
|
||||||
self.momentX += -r11*(-r10 + y1)/30 + r12*(r13 + r8 + y2)/30 + r6*y2/15 - r7*r8/30 - r7*r9/30 + x0*(r14 - r15 - r16*y0 + r17)/30 - y0*(r11 + 2*r6 + r7)/30
|
-r1 / 6
|
||||||
self.momentY += -r18/30 - r20*x2/30 - r23/30 - r24*(r16 + x2)/30 + x0*(r0*y2 + r20 + r21 + r25 + r26 + r8*y0)/30 + x1*y2*(r10 + y1)/15 - y0*(r1 + r17)/30
|
- r3 / 6
|
||||||
self.momentXX += r12*(r1 - 5*r15 - r34*y0 + r36 + r9*x1)/420 + 2*r27*y2/105 - r28*r29/420 - r28*y2/4 - r31*(r0 - 3*y2)/420 - r6*x2*(r0 - r32)/105 + x0**3*(r30 + 21*y0 + y2)/84 - x0*(r0*r7 + r15*r37 - r2*r37 - r33*y2 + r38*y0 - r39 - r40 + r5*r7)/420 - y0*(8*r27 + 5*r28 + r31 + r33*x2)/420
|
+ x0 * (r0 + r5 + y2) / 6
|
||||||
self.momentXY += r12*(r13*y2 + 3*r21 + 105*r24 + r41*y0 + r42 + r46*y1)/840 - r16*x2*(r43 - r44)/840 - r21*r7/8 - r24*(r38 + r45*x1 + 3*r7)/840 - r41*r7*y2/840 - r42*r7/840 + r6*y2*(r32 + r8)/210 + x0*(-r15*r8 + r16*r25 + r18 + r21*r47 - r24*r34 - r26*x2 + r35*r46 + r48)/420 - y0*(r16*r2 + r30*r7 + r35*r45 + r39 + r40)/420
|
+ x1 * y2 / 3
|
||||||
self.momentYY += -r2*r42/420 - r22*r29/420 - r24*(r14 + r36 + r52*x2)/420 - r49*x2/420 - r50*x2/12 - r51*(r47 + x2)/84 + x0*(r19*r46 + r21*r5 + r21*r52 + r24*r29 + r25*r53 + r26*y2 + r42*y0 + r49 + 5*r50 + 35*r51)/420 + x1*y2*(r43 + r44 + r9*y1)/210 - y0*(r19*r45 + r2*r53 - r21*r4 + r48)/420
|
- y0 * (r4 + x2) / 6
|
||||||
|
)
|
||||||
|
self.momentX += (
|
||||||
|
-r11 * (-r10 + y1) / 30
|
||||||
|
+ r12 * (r13 + r8 + y2) / 30
|
||||||
|
+ r6 * y2 / 15
|
||||||
|
- r7 * r8 / 30
|
||||||
|
- r7 * r9 / 30
|
||||||
|
+ x0 * (r14 - r15 - r16 * y0 + r17) / 30
|
||||||
|
- y0 * (r11 + 2 * r6 + r7) / 30
|
||||||
|
)
|
||||||
|
self.momentY += (
|
||||||
|
-r18 / 30
|
||||||
|
- r20 * x2 / 30
|
||||||
|
- r23 / 30
|
||||||
|
- r24 * (r16 + x2) / 30
|
||||||
|
+ x0 * (r0 * y2 + r20 + r21 + r25 + r26 + r8 * y0) / 30
|
||||||
|
+ x1 * y2 * (r10 + y1) / 15
|
||||||
|
- y0 * (r1 + r17) / 30
|
||||||
|
)
|
||||||
|
self.momentXX += (
|
||||||
|
r12 * (r1 - 5 * r15 - r34 * y0 + r36 + r9 * x1) / 420
|
||||||
|
+ 2 * r27 * y2 / 105
|
||||||
|
- r28 * r29 / 420
|
||||||
|
- r28 * y2 / 4
|
||||||
|
- r31 * (r0 - 3 * y2) / 420
|
||||||
|
- r6 * x2 * (r0 - r32) / 105
|
||||||
|
+ x0**3 * (r30 + 21 * y0 + y2) / 84
|
||||||
|
- x0
|
||||||
|
* (
|
||||||
|
r0 * r7
|
||||||
|
+ r15 * r37
|
||||||
|
- r2 * r37
|
||||||
|
- r33 * y2
|
||||||
|
+ r38 * y0
|
||||||
|
- r39
|
||||||
|
- r40
|
||||||
|
+ r5 * r7
|
||||||
|
)
|
||||||
|
/ 420
|
||||||
|
- y0 * (8 * r27 + 5 * r28 + r31 + r33 * x2) / 420
|
||||||
|
)
|
||||||
|
self.momentXY += (
|
||||||
|
r12 * (r13 * y2 + 3 * r21 + 105 * r24 + r41 * y0 + r42 + r46 * y1) / 840
|
||||||
|
- r16 * x2 * (r43 - r44) / 840
|
||||||
|
- r21 * r7 / 8
|
||||||
|
- r24 * (r38 + r45 * x1 + 3 * r7) / 840
|
||||||
|
- r41 * r7 * y2 / 840
|
||||||
|
- r42 * r7 / 840
|
||||||
|
+ r6 * y2 * (r32 + r8) / 210
|
||||||
|
+ x0
|
||||||
|
* (
|
||||||
|
-r15 * r8
|
||||||
|
+ r16 * r25
|
||||||
|
+ r18
|
||||||
|
+ r21 * r47
|
||||||
|
- r24 * r34
|
||||||
|
- r26 * x2
|
||||||
|
+ r35 * r46
|
||||||
|
+ r48
|
||||||
|
)
|
||||||
|
/ 420
|
||||||
|
- y0 * (r16 * r2 + r30 * r7 + r35 * r45 + r39 + r40) / 420
|
||||||
|
)
|
||||||
|
self.momentYY += (
|
||||||
|
-r2 * r42 / 420
|
||||||
|
- r22 * r29 / 420
|
||||||
|
- r24 * (r14 + r36 + r52 * x2) / 420
|
||||||
|
- r49 * x2 / 420
|
||||||
|
- r50 * x2 / 12
|
||||||
|
- r51 * (r47 + x2) / 84
|
||||||
|
+ x0
|
||||||
|
* (
|
||||||
|
r19 * r46
|
||||||
|
+ r21 * r5
|
||||||
|
+ r21 * r52
|
||||||
|
+ r24 * r29
|
||||||
|
+ r25 * r53
|
||||||
|
+ r26 * y2
|
||||||
|
+ r42 * y0
|
||||||
|
+ r49
|
||||||
|
+ 5 * r50
|
||||||
|
+ 35 * r51
|
||||||
|
)
|
||||||
|
/ 420
|
||||||
|
+ x1 * y2 * (r43 + r44 + r9 * y1) / 210
|
||||||
|
- y0 * (r19 * r45 + r2 * r53 - r21 * r4 + r48) / 420
|
||||||
|
)
|
||||||
|
|
||||||
@cython.locals(r0=cython.double)
|
@cython.locals(r0=cython.double)
|
||||||
@cython.locals(r1=cython.double)
|
@cython.locals(r1=cython.double)
|
||||||
@ -484,20 +590,296 @@ class MomentsPen(BasePen):
|
|||||||
r131 = 189 * r53
|
r131 = 189 * r53
|
||||||
r132 = 90 * y2
|
r132 = 90 * y2
|
||||||
|
|
||||||
self.area += -r1/20 - r3/20 - r4*(x2 + x3)/20 + x0*(r7 + r8 + 10*y0 + y3)/20 + 3*x1*(y2 + y3)/20 + 3*x2*y3/10 - y0*(r5 + r6 + x3)/20
|
self.area += (
|
||||||
self.momentX += r11/840 - r13/8 - r14/3 - r17*(-r15 + r8)/840 + r19*(r8 + 2*y3)/840 + r20*(r0 + r21 + 56*y0 + y3)/168 + r29*(-r23 + r25 + r28)/840 - r4*(10*r12 + r17 + r22)/840 + x0*(12*r27 + r30*y2 + r34 - r35*x1 - r37 - r38*y0 + r39*x1 - r4*x3 + r45)/840 - y0*(r17 + r30*x2 + r31*x1 + r32 + r33 + 18*r9)/840
|
-r1 / 20
|
||||||
self.momentY += -r4*(r25 + r58)/840 - r47/8 - r50/840 - r52/6 - r54*(r6 + 2*x3)/840 - r55*(r56 + r57 + x3)/168 + x0*(r35*y1 + r40*y0 + r44*y2 + 18*r48 + 140*r55 + r59 + r63 + 12*r64 + r65 + r66)/840 + x1*(r24*y1 + 10*r51 + r59 + r60 + r7*y3)/280 + x2*y3*(r15 + r8)/56 - y0*(r16*y1 + r31*y2 + r44*x2 + r45 + r61 - r62*x1)/840
|
- r3 / 20
|
||||||
self.momentXX += -r12*r72*(-r40 + r8)/9240 + 3*r18*(r28 + r34 - r38*y1 + r75)/3080 + r20*(r24*x3 - r72*y0 - r76*y0 - r77*y0 + r78 + r79*y3 + r80*y1 + 210*r81 + r84)/9240 - r29*(r12*r21 + 14*r13 + r44*r9 - r73*y3 + 54*r86 - 84*r87 - r89 - r90)/9240 - r4*(70*r12*x2 + 27*r67 + 42*r68 + r74)/9240 + 3*r67*y3/220 - r68*r69/9240 - r68*y3/4 - r70*r9*(-r62 + y2)/9240 + 3*r71*(r24 + r40)/3080 + x0**3*(r24 + r44 + 165*y0 + y3)/660 + x0*(r100*r27 + 162*r101 + r102 + r11 + 63*r18*y3 + r27*r91 - r33*y0 - r37*x3 + r43*x3 - r73*y0 - r88*y1 + r92*y2 - r93*y0 - 9*r94 - r95*y0 - r96*y0 - r97*y1 - 18*r98 + r99*x1*y3)/9240 - y0*(r12*r56 + r12*r80 + r32*x3 + 45*r67 + 14*r68 + 126*r71 + r74 + r85*r91 + 135*r9*x1 + r92*x2)/9240
|
- r4 * (x2 + x3) / 20
|
||||||
self.momentXY += -r103*r12/18480 - r12*r51/8 - 3*r14*y2/44 + 3*r18*(r105 + r2*y1 + 18*r46 + 15*r48 + 7*r51)/6160 + r20*(1260*r106 + r107*y1 + r108 + 28*r109 + r110 + r111 + r112 + 30*r46 + 2310*r55 + r66)/18480 - r54*(7*r12 + 18*r85 + 15*r9)/18480 - r55*(r33 + r73 + r93 + r95 + r96 + r97)/18480 - r7*(42*r13 + r82*x3 + 28*r87 + r89 + r90)/18480 - 3*r85*(r48 - r66)/220 + 3*r9*y3*(r62 + 2*y2)/440 + x0*(-r1*y0 - 84*r106*x2 + r109*r56 + 54*r114 + r117*y1 + 15*r118 + 21*r119 + 81*r120 + r121*r46 + 54*r122 + 60*r123 + r124 - r21*x3*y0 + r23*y3 - r54*x3 - r55*r72 - r55*r76 - r55*r77 + r57*y0*y3 + r60*x3 + 84*r81*y0 + 189*r81*y1)/9240 + x1*(r104*r27 - r105*x3 - r113*r53 + 63*r114 + r115 - r16*r53 + 28*r47 + r51*r80)/3080 - y0*(54*r101 + r102 + r116*r5 + r117*x3 + 21*r13 - r19*y3 + r22*y3 + r78*x3 + 189*r83*x2 + 60*r86 + 81*r9*y1 + 15*r94 + 54*r98)/9240
|
+ x0 * (r7 + r8 + 10 * y0 + y3) / 20
|
||||||
self.momentYY += -r103*r116/9240 - r125*r70/9240 - r126*x3/12 - 3*r127*(r26 + r38)/3080 - r128*(r26 + r30 + x3)/660 - r4*(r112*x3 + r115 - 14*r119 + 84*r47)/9240 - r52*r69/9240 - r54*(r58 + r61 + r75)/9240 - r55*(r100*y1 + r121*y2 + r26*y3 + r79*y2 + r84 + 210*x2*y1)/9240 + x0*(r108*y1 + r110*y0 + r111*y0 + r112*y0 + 45*r125 + 14*r126 + 126*r127 + 770*r128 + 42*r129 + r130 + r131*y2 + r132*r64 + 135*r48*y1 + 630*r55*y1 + 126*r55*y2 + 14*r55*y3 + r63*y3 + r65*y3 + r66*y0)/9240 + x1*(27*r125 + 42*r126 + 70*r129 + r130 + r39*r53 + r44*r48 + 27*r53*y2 + 54*r64*y2)/3080 + 3*x2*y3*(r48 + r66 + r8*y3)/220 - y0*(r100*r46 + 18*r114 - 9*r118 - 27*r120 - 18*r122 - 30*r123 + r124 + r131*x2 + r132*x3*y1 + 162*r42*y1 + r50 + 63*r53*x3 + r64*r99)/9240
|
+ 3 * x1 * (y2 + y3) / 20
|
||||||
|
+ 3 * x2 * y3 / 10
|
||||||
|
- y0 * (r5 + r6 + x3) / 20
|
||||||
|
)
|
||||||
|
self.momentX += (
|
||||||
|
r11 / 840
|
||||||
|
- r13 / 8
|
||||||
|
- r14 / 3
|
||||||
|
- r17 * (-r15 + r8) / 840
|
||||||
|
+ r19 * (r8 + 2 * y3) / 840
|
||||||
|
+ r20 * (r0 + r21 + 56 * y0 + y3) / 168
|
||||||
|
+ r29 * (-r23 + r25 + r28) / 840
|
||||||
|
- r4 * (10 * r12 + r17 + r22) / 840
|
||||||
|
+ x0
|
||||||
|
* (
|
||||||
|
12 * r27
|
||||||
|
+ r30 * y2
|
||||||
|
+ r34
|
||||||
|
- r35 * x1
|
||||||
|
- r37
|
||||||
|
- r38 * y0
|
||||||
|
+ r39 * x1
|
||||||
|
- r4 * x3
|
||||||
|
+ r45
|
||||||
|
)
|
||||||
|
/ 840
|
||||||
|
- y0 * (r17 + r30 * x2 + r31 * x1 + r32 + r33 + 18 * r9) / 840
|
||||||
|
)
|
||||||
|
self.momentY += (
|
||||||
|
-r4 * (r25 + r58) / 840
|
||||||
|
- r47 / 8
|
||||||
|
- r50 / 840
|
||||||
|
- r52 / 6
|
||||||
|
- r54 * (r6 + 2 * x3) / 840
|
||||||
|
- r55 * (r56 + r57 + x3) / 168
|
||||||
|
+ x0
|
||||||
|
* (
|
||||||
|
r35 * y1
|
||||||
|
+ r40 * y0
|
||||||
|
+ r44 * y2
|
||||||
|
+ 18 * r48
|
||||||
|
+ 140 * r55
|
||||||
|
+ r59
|
||||||
|
+ r63
|
||||||
|
+ 12 * r64
|
||||||
|
+ r65
|
||||||
|
+ r66
|
||||||
|
)
|
||||||
|
/ 840
|
||||||
|
+ x1 * (r24 * y1 + 10 * r51 + r59 + r60 + r7 * y3) / 280
|
||||||
|
+ x2 * y3 * (r15 + r8) / 56
|
||||||
|
- y0 * (r16 * y1 + r31 * y2 + r44 * x2 + r45 + r61 - r62 * x1) / 840
|
||||||
|
)
|
||||||
|
self.momentXX += (
|
||||||
|
-r12 * r72 * (-r40 + r8) / 9240
|
||||||
|
+ 3 * r18 * (r28 + r34 - r38 * y1 + r75) / 3080
|
||||||
|
+ r20
|
||||||
|
* (
|
||||||
|
r24 * x3
|
||||||
|
- r72 * y0
|
||||||
|
- r76 * y0
|
||||||
|
- r77 * y0
|
||||||
|
+ r78
|
||||||
|
+ r79 * y3
|
||||||
|
+ r80 * y1
|
||||||
|
+ 210 * r81
|
||||||
|
+ r84
|
||||||
|
)
|
||||||
|
/ 9240
|
||||||
|
- r29
|
||||||
|
* (
|
||||||
|
r12 * r21
|
||||||
|
+ 14 * r13
|
||||||
|
+ r44 * r9
|
||||||
|
- r73 * y3
|
||||||
|
+ 54 * r86
|
||||||
|
- 84 * r87
|
||||||
|
- r89
|
||||||
|
- r90
|
||||||
|
)
|
||||||
|
/ 9240
|
||||||
|
- r4 * (70 * r12 * x2 + 27 * r67 + 42 * r68 + r74) / 9240
|
||||||
|
+ 3 * r67 * y3 / 220
|
||||||
|
- r68 * r69 / 9240
|
||||||
|
- r68 * y3 / 4
|
||||||
|
- r70 * r9 * (-r62 + y2) / 9240
|
||||||
|
+ 3 * r71 * (r24 + r40) / 3080
|
||||||
|
+ x0**3 * (r24 + r44 + 165 * y0 + y3) / 660
|
||||||
|
+ x0
|
||||||
|
* (
|
||||||
|
r100 * r27
|
||||||
|
+ 162 * r101
|
||||||
|
+ r102
|
||||||
|
+ r11
|
||||||
|
+ 63 * r18 * y3
|
||||||
|
+ r27 * r91
|
||||||
|
- r33 * y0
|
||||||
|
- r37 * x3
|
||||||
|
+ r43 * x3
|
||||||
|
- r73 * y0
|
||||||
|
- r88 * y1
|
||||||
|
+ r92 * y2
|
||||||
|
- r93 * y0
|
||||||
|
- 9 * r94
|
||||||
|
- r95 * y0
|
||||||
|
- r96 * y0
|
||||||
|
- r97 * y1
|
||||||
|
- 18 * r98
|
||||||
|
+ r99 * x1 * y3
|
||||||
|
)
|
||||||
|
/ 9240
|
||||||
|
- y0
|
||||||
|
* (
|
||||||
|
r12 * r56
|
||||||
|
+ r12 * r80
|
||||||
|
+ r32 * x3
|
||||||
|
+ 45 * r67
|
||||||
|
+ 14 * r68
|
||||||
|
+ 126 * r71
|
||||||
|
+ r74
|
||||||
|
+ r85 * r91
|
||||||
|
+ 135 * r9 * x1
|
||||||
|
+ r92 * x2
|
||||||
|
)
|
||||||
|
/ 9240
|
||||||
|
)
|
||||||
|
self.momentXY += (
|
||||||
|
-r103 * r12 / 18480
|
||||||
|
- r12 * r51 / 8
|
||||||
|
- 3 * r14 * y2 / 44
|
||||||
|
+ 3 * r18 * (r105 + r2 * y1 + 18 * r46 + 15 * r48 + 7 * r51) / 6160
|
||||||
|
+ r20
|
||||||
|
* (
|
||||||
|
1260 * r106
|
||||||
|
+ r107 * y1
|
||||||
|
+ r108
|
||||||
|
+ 28 * r109
|
||||||
|
+ r110
|
||||||
|
+ r111
|
||||||
|
+ r112
|
||||||
|
+ 30 * r46
|
||||||
|
+ 2310 * r55
|
||||||
|
+ r66
|
||||||
|
)
|
||||||
|
/ 18480
|
||||||
|
- r54 * (7 * r12 + 18 * r85 + 15 * r9) / 18480
|
||||||
|
- r55 * (r33 + r73 + r93 + r95 + r96 + r97) / 18480
|
||||||
|
- r7 * (42 * r13 + r82 * x3 + 28 * r87 + r89 + r90) / 18480
|
||||||
|
- 3 * r85 * (r48 - r66) / 220
|
||||||
|
+ 3 * r9 * y3 * (r62 + 2 * y2) / 440
|
||||||
|
+ x0
|
||||||
|
* (
|
||||||
|
-r1 * y0
|
||||||
|
- 84 * r106 * x2
|
||||||
|
+ r109 * r56
|
||||||
|
+ 54 * r114
|
||||||
|
+ r117 * y1
|
||||||
|
+ 15 * r118
|
||||||
|
+ 21 * r119
|
||||||
|
+ 81 * r120
|
||||||
|
+ r121 * r46
|
||||||
|
+ 54 * r122
|
||||||
|
+ 60 * r123
|
||||||
|
+ r124
|
||||||
|
- r21 * x3 * y0
|
||||||
|
+ r23 * y3
|
||||||
|
- r54 * x3
|
||||||
|
- r55 * r72
|
||||||
|
- r55 * r76
|
||||||
|
- r55 * r77
|
||||||
|
+ r57 * y0 * y3
|
||||||
|
+ r60 * x3
|
||||||
|
+ 84 * r81 * y0
|
||||||
|
+ 189 * r81 * y1
|
||||||
|
)
|
||||||
|
/ 9240
|
||||||
|
+ x1
|
||||||
|
* (
|
||||||
|
r104 * r27
|
||||||
|
- r105 * x3
|
||||||
|
- r113 * r53
|
||||||
|
+ 63 * r114
|
||||||
|
+ r115
|
||||||
|
- r16 * r53
|
||||||
|
+ 28 * r47
|
||||||
|
+ r51 * r80
|
||||||
|
)
|
||||||
|
/ 3080
|
||||||
|
- y0
|
||||||
|
* (
|
||||||
|
54 * r101
|
||||||
|
+ r102
|
||||||
|
+ r116 * r5
|
||||||
|
+ r117 * x3
|
||||||
|
+ 21 * r13
|
||||||
|
- r19 * y3
|
||||||
|
+ r22 * y3
|
||||||
|
+ r78 * x3
|
||||||
|
+ 189 * r83 * x2
|
||||||
|
+ 60 * r86
|
||||||
|
+ 81 * r9 * y1
|
||||||
|
+ 15 * r94
|
||||||
|
+ 54 * r98
|
||||||
|
)
|
||||||
|
/ 9240
|
||||||
|
)
|
||||||
|
self.momentYY += (
|
||||||
|
-r103 * r116 / 9240
|
||||||
|
- r125 * r70 / 9240
|
||||||
|
- r126 * x3 / 12
|
||||||
|
- 3 * r127 * (r26 + r38) / 3080
|
||||||
|
- r128 * (r26 + r30 + x3) / 660
|
||||||
|
- r4 * (r112 * x3 + r115 - 14 * r119 + 84 * r47) / 9240
|
||||||
|
- r52 * r69 / 9240
|
||||||
|
- r54 * (r58 + r61 + r75) / 9240
|
||||||
|
- r55
|
||||||
|
* (r100 * y1 + r121 * y2 + r26 * y3 + r79 * y2 + r84 + 210 * x2 * y1)
|
||||||
|
/ 9240
|
||||||
|
+ x0
|
||||||
|
* (
|
||||||
|
r108 * y1
|
||||||
|
+ r110 * y0
|
||||||
|
+ r111 * y0
|
||||||
|
+ r112 * y0
|
||||||
|
+ 45 * r125
|
||||||
|
+ 14 * r126
|
||||||
|
+ 126 * r127
|
||||||
|
+ 770 * r128
|
||||||
|
+ 42 * r129
|
||||||
|
+ r130
|
||||||
|
+ r131 * y2
|
||||||
|
+ r132 * r64
|
||||||
|
+ 135 * r48 * y1
|
||||||
|
+ 630 * r55 * y1
|
||||||
|
+ 126 * r55 * y2
|
||||||
|
+ 14 * r55 * y3
|
||||||
|
+ r63 * y3
|
||||||
|
+ r65 * y3
|
||||||
|
+ r66 * y0
|
||||||
|
)
|
||||||
|
/ 9240
|
||||||
|
+ x1
|
||||||
|
* (
|
||||||
|
27 * r125
|
||||||
|
+ 42 * r126
|
||||||
|
+ 70 * r129
|
||||||
|
+ r130
|
||||||
|
+ r39 * r53
|
||||||
|
+ r44 * r48
|
||||||
|
+ 27 * r53 * y2
|
||||||
|
+ 54 * r64 * y2
|
||||||
|
)
|
||||||
|
/ 3080
|
||||||
|
+ 3 * x2 * y3 * (r48 + r66 + r8 * y3) / 220
|
||||||
|
- y0
|
||||||
|
* (
|
||||||
|
r100 * r46
|
||||||
|
+ 18 * r114
|
||||||
|
- 9 * r118
|
||||||
|
- 27 * r120
|
||||||
|
- 18 * r122
|
||||||
|
- 30 * r123
|
||||||
|
+ r124
|
||||||
|
+ r131 * x2
|
||||||
|
+ r132 * x3 * y1
|
||||||
|
+ 162 * r42 * y1
|
||||||
|
+ r50
|
||||||
|
+ 63 * r53 * x3
|
||||||
|
+ r64 * r99
|
||||||
|
)
|
||||||
|
/ 9240
|
||||||
|
)
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
|
if __name__ == "__main__":
|
||||||
from fontTools.misc.symfont import x, y, printGreenPen
|
from fontTools.misc.symfont import x, y, printGreenPen
|
||||||
printGreenPen('MomentsPen', [
|
|
||||||
('area', 1),
|
printGreenPen(
|
||||||
('momentX', x),
|
"MomentsPen",
|
||||||
('momentY', y),
|
[
|
||||||
('momentXX', x**2),
|
("area", 1),
|
||||||
('momentXY', x*y),
|
("momentX", x),
|
||||||
('momentYY', y**2),
|
("momentY", y),
|
||||||
])
|
("momentXX", x**2),
|
||||||
|
("momentXY", x * y),
|
||||||
|
("momentYY", y**2),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
@ -2,7 +2,12 @@
|
|||||||
"""Calculate the perimeter of a glyph."""
|
"""Calculate the perimeter of a glyph."""
|
||||||
|
|
||||||
from fontTools.pens.basePen import BasePen
|
from fontTools.pens.basePen import BasePen
|
||||||
from fontTools.misc.bezierTools import approximateQuadraticArcLengthC, calcQuadraticArcLengthC, approximateCubicArcLengthC, calcCubicArcLengthC
|
from fontTools.misc.bezierTools import (
|
||||||
|
approximateQuadraticArcLengthC,
|
||||||
|
calcQuadraticArcLengthC,
|
||||||
|
approximateCubicArcLengthC,
|
||||||
|
calcCubicArcLengthC,
|
||||||
|
)
|
||||||
import math
|
import math
|
||||||
|
|
||||||
|
|
||||||
@ -12,8 +17,8 @@ __all__ = ["PerimeterPen"]
|
|||||||
def _distance(p0, p1):
|
def _distance(p0, p1):
|
||||||
return math.hypot(p0[0] - p1[0], p0[1] - p1[1])
|
return math.hypot(p0[0] - p1[0], p0[1] - p1[1])
|
||||||
|
|
||||||
class PerimeterPen(BasePen):
|
|
||||||
|
|
||||||
|
class PerimeterPen(BasePen):
|
||||||
def __init__(self, glyphset=None, tolerance=0.005):
|
def __init__(self, glyphset=None, tolerance=0.005):
|
||||||
BasePen.__init__(self, glyphset)
|
BasePen.__init__(self, glyphset)
|
||||||
self.value = 0
|
self.value = 0
|
||||||
@ -22,8 +27,14 @@ class PerimeterPen(BasePen):
|
|||||||
# Choose which algorithm to use for quadratic and for cubic.
|
# Choose which algorithm to use for quadratic and for cubic.
|
||||||
# Quadrature is faster but has fixed error characteristic with no strong
|
# Quadrature is faster but has fixed error characteristic with no strong
|
||||||
# error bound. The cutoff points are derived empirically.
|
# error bound. The cutoff points are derived empirically.
|
||||||
self._addCubic = self._addCubicQuadrature if tolerance >= 0.0015 else self._addCubicRecursive
|
self._addCubic = (
|
||||||
self._addQuadratic = self._addQuadraticQuadrature if tolerance >= 0.00075 else self._addQuadraticExact
|
self._addCubicQuadrature if tolerance >= 0.0015 else self._addCubicRecursive
|
||||||
|
)
|
||||||
|
self._addQuadratic = (
|
||||||
|
self._addQuadraticQuadrature
|
||||||
|
if tolerance >= 0.00075
|
||||||
|
else self._addQuadraticExact
|
||||||
|
)
|
||||||
|
|
||||||
def _moveTo(self, p0):
|
def _moveTo(self, p0):
|
||||||
self.__startPoint = p0
|
self.__startPoint = p0
|
||||||
|
@ -119,7 +119,7 @@ class PointInsidePen(BasePen):
|
|||||||
by = (y3 - y2) * 3.0 - cy
|
by = (y3 - y2) * 3.0 - cy
|
||||||
ay = y4 - dy - cy - by
|
ay = y4 - dy - cy - by
|
||||||
solutions = sorted(solveCubic(ay, by, cy, dy - y))
|
solutions = sorted(solveCubic(ay, by, cy, dy - y))
|
||||||
solutions = [t for t in solutions if -0. <= t <= 1.]
|
solutions = [t for t in solutions if -0.0 <= t <= 1.0]
|
||||||
if not solutions:
|
if not solutions:
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -175,7 +175,9 @@ class PointInsidePen(BasePen):
|
|||||||
b = (y2 - c) * 2.0
|
b = (y2 - c) * 2.0
|
||||||
a = y3 - c - b
|
a = y3 - c - b
|
||||||
solutions = sorted(solveQuadratic(a, b, c - y))
|
solutions = sorted(solveQuadratic(a, b, c - y))
|
||||||
solutions = [t for t in solutions if ZERO_MINUS_EPSILON <= t <= ONE_PLUS_EPSILON]
|
solutions = [
|
||||||
|
t for t in solutions if ZERO_MINUS_EPSILON <= t <= ONE_PLUS_EPSILON
|
||||||
|
]
|
||||||
if not solutions:
|
if not solutions:
|
||||||
return
|
return
|
||||||
# XXX
|
# XXX
|
||||||
|
@ -45,7 +45,7 @@ class AbstractPointPen:
|
|||||||
smooth: bool = False,
|
smooth: bool = False,
|
||||||
name: Optional[str] = None,
|
name: Optional[str] = None,
|
||||||
identifier: Optional[str] = None,
|
identifier: Optional[str] = None,
|
||||||
**kwargs: Any
|
**kwargs: Any,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Add a point to the current sub path."""
|
"""Add a point to the current sub path."""
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
@ -55,7 +55,7 @@ class AbstractPointPen:
|
|||||||
baseGlyphName: str,
|
baseGlyphName: str,
|
||||||
transformation: Tuple[float, float, float, float, float, float],
|
transformation: Tuple[float, float, float, float, float, float],
|
||||||
identifier: Optional[str] = None,
|
identifier: Optional[str] = None,
|
||||||
**kwargs: Any
|
**kwargs: Any,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Add a sub glyph."""
|
"""Add a sub glyph."""
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
@ -154,8 +154,9 @@ class BasePointToSegmentPen(AbstractPointPen):
|
|||||||
|
|
||||||
self._flushContour(segments)
|
self._flushContour(segments)
|
||||||
|
|
||||||
def addPoint(self, pt, segmentType=None, smooth=False, name=None,
|
def addPoint(
|
||||||
identifier=None, **kwargs):
|
self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs
|
||||||
|
):
|
||||||
if self.currentPath is None:
|
if self.currentPath is None:
|
||||||
raise PenError("Path not begun")
|
raise PenError("Path not begun")
|
||||||
self.currentPath.append((pt, segmentType, smooth, name, kwargs))
|
self.currentPath.append((pt, segmentType, smooth, name, kwargs))
|
||||||
@ -388,8 +389,9 @@ class GuessSmoothPointPen(AbstractPointPen):
|
|||||||
self._outPen.endPath()
|
self._outPen.endPath()
|
||||||
self._points = None
|
self._points = None
|
||||||
|
|
||||||
def addPoint(self, pt, segmentType=None, smooth=False, name=None,
|
def addPoint(
|
||||||
identifier=None, **kwargs):
|
self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs
|
||||||
|
):
|
||||||
if self._points is None:
|
if self._points is None:
|
||||||
raise PenError("Path not begun")
|
raise PenError("Path not begun")
|
||||||
if identifier is not None:
|
if identifier is not None:
|
||||||
@ -464,7 +466,9 @@ class ReverseContourPointPen(AbstractPointPen):
|
|||||||
lastSegmentType = nextSegmentType
|
lastSegmentType = nextSegmentType
|
||||||
else:
|
else:
|
||||||
segmentType = None
|
segmentType = None
|
||||||
pen.addPoint(pt, segmentType=segmentType, smooth=smooth, name=name, **kwargs)
|
pen.addPoint(
|
||||||
|
pt, segmentType=segmentType, smooth=smooth, name=name, **kwargs
|
||||||
|
)
|
||||||
pen.endPath()
|
pen.endPath()
|
||||||
|
|
||||||
def beginPath(self, identifier=None, **kwargs):
|
def beginPath(self, identifier=None, **kwargs):
|
||||||
@ -480,7 +484,9 @@ class ReverseContourPointPen(AbstractPointPen):
|
|||||||
self._flushContour()
|
self._flushContour()
|
||||||
self.currentContour = None
|
self.currentContour = None
|
||||||
|
|
||||||
def addPoint(self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs):
|
def addPoint(
|
||||||
|
self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs
|
||||||
|
):
|
||||||
if self.currentContour is None:
|
if self.currentContour is None:
|
||||||
raise PenError("Path not begun")
|
raise PenError("Path not begun")
|
||||||
if identifier is not None:
|
if identifier is not None:
|
||||||
|
@ -5,11 +5,11 @@ __all__ = ["QtPen"]
|
|||||||
|
|
||||||
|
|
||||||
class QtPen(BasePen):
|
class QtPen(BasePen):
|
||||||
|
|
||||||
def __init__(self, glyphSet, path=None):
|
def __init__(self, glyphSet, path=None):
|
||||||
BasePen.__init__(self, glyphSet)
|
BasePen.__init__(self, glyphSet)
|
||||||
if path is None:
|
if path is None:
|
||||||
from PyQt5.QtGui import QPainterPath
|
from PyQt5.QtGui import QPainterPath
|
||||||
|
|
||||||
path = QPainterPath()
|
path = QPainterPath()
|
||||||
self.path = path
|
self.path = path
|
||||||
|
|
||||||
|
@ -42,4 +42,3 @@ class QuartzPen(BasePen):
|
|||||||
|
|
||||||
def _closePath(self):
|
def _closePath(self):
|
||||||
CGPathCloseSubpath(self.path)
|
CGPathCloseSubpath(self.path)
|
||||||
|
|
||||||
|
@ -48,20 +48,28 @@ class RecordingPen(AbstractPen):
|
|||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.value = []
|
self.value = []
|
||||||
|
|
||||||
def moveTo(self, p0):
|
def moveTo(self, p0):
|
||||||
self.value.append(('moveTo', (p0,)))
|
self.value.append(("moveTo", (p0,)))
|
||||||
|
|
||||||
def lineTo(self, p1):
|
def lineTo(self, p1):
|
||||||
self.value.append(('lineTo', (p1,)))
|
self.value.append(("lineTo", (p1,)))
|
||||||
|
|
||||||
def qCurveTo(self, *points):
|
def qCurveTo(self, *points):
|
||||||
self.value.append(('qCurveTo', points))
|
self.value.append(("qCurveTo", points))
|
||||||
|
|
||||||
def curveTo(self, *points):
|
def curveTo(self, *points):
|
||||||
self.value.append(('curveTo', points))
|
self.value.append(("curveTo", points))
|
||||||
|
|
||||||
def closePath(self):
|
def closePath(self):
|
||||||
self.value.append(('closePath', ()))
|
self.value.append(("closePath", ()))
|
||||||
|
|
||||||
def endPath(self):
|
def endPath(self):
|
||||||
self.value.append(('endPath', ()))
|
self.value.append(("endPath", ()))
|
||||||
|
|
||||||
def addComponent(self, glyphName, transformation):
|
def addComponent(self, glyphName, transformation):
|
||||||
self.value.append(('addComponent', (glyphName, transformation)))
|
self.value.append(("addComponent", (glyphName, transformation)))
|
||||||
|
|
||||||
def replay(self, pen):
|
def replay(self, pen):
|
||||||
replayRecording(self.value, pen)
|
replayRecording(self.value, pen)
|
||||||
|
|
||||||
@ -90,6 +98,7 @@ class DecomposingRecordingPen(DecomposingPen, RecordingPen):
|
|||||||
a: [('moveTo', ((0, 0),)), ('curveTo', ((1, 1), (2, 2), (3, 3))), ('closePath', ())]
|
a: [('moveTo', ((0, 0),)), ('curveTo', ((1, 1), (2, 2), (3, 3))), ('closePath', ())]
|
||||||
b: [('moveTo', ((-1, 1),)), ('curveTo', ((0, 2), (1, 3), (2, 4))), ('closePath', ())]
|
b: [('moveTo', ((-1, 1),)), ('curveTo', ((0, 2), (1, 3), (2, 4))), ('closePath', ())]
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# raises KeyError if base glyph is not found in glyphSet
|
# raises KeyError if base glyph is not found in glyphSet
|
||||||
skipMissingComponents = False
|
skipMissingComponents = False
|
||||||
|
|
||||||
@ -130,7 +139,9 @@ class RecordingPointPen(AbstractPointPen):
|
|||||||
def endPath(self):
|
def endPath(self):
|
||||||
self.value.append(("endPath", (), {}))
|
self.value.append(("endPath", (), {}))
|
||||||
|
|
||||||
def addPoint(self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs):
|
def addPoint(
|
||||||
|
self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs
|
||||||
|
):
|
||||||
if identifier is not None:
|
if identifier is not None:
|
||||||
kwargs["identifier"] = identifier
|
kwargs["identifier"] = identifier
|
||||||
self.value.append(("addPoint", (pt, segmentType, smooth, name), kwargs))
|
self.value.append(("addPoint", (pt, segmentType, smooth, name), kwargs))
|
||||||
@ -152,4 +163,5 @@ if __name__ == "__main__":
|
|||||||
pen.curveTo((50, 75), (60, 50), (50, 25))
|
pen.curveTo((50, 75), (60, 50), (50, 25))
|
||||||
pen.closePath()
|
pen.closePath()
|
||||||
from pprint import pprint
|
from pprint import pprint
|
||||||
|
|
||||||
pprint(pen.value)
|
pprint(pen.value)
|
||||||
|
@ -35,11 +35,18 @@ class ReportLabPen(BasePen):
|
|||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
if len(sys.argv) < 3:
|
if len(sys.argv) < 3:
|
||||||
print("Usage: reportLabPen.py <OTF/TTF font> <glyphname> [<image file to create>]")
|
print(
|
||||||
print(" If no image file name is created, by default <glyphname>.png is created.")
|
"Usage: reportLabPen.py <OTF/TTF font> <glyphname> [<image file to create>]"
|
||||||
|
)
|
||||||
|
print(
|
||||||
|
" If no image file name is created, by default <glyphname>.png is created."
|
||||||
|
)
|
||||||
print(" example: reportLabPen.py Arial.TTF R test.png")
|
print(" example: reportLabPen.py Arial.TTF R test.png")
|
||||||
print(" (The file format will be PNG, regardless of the image file name supplied)")
|
print(
|
||||||
|
" (The file format will be PNG, regardless of the image file name supplied)"
|
||||||
|
)
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
from fontTools.ttLib import TTFont
|
from fontTools.ttLib import TTFont
|
||||||
@ -47,7 +54,7 @@ if __name__=="__main__":
|
|||||||
|
|
||||||
path = sys.argv[1]
|
path = sys.argv[1]
|
||||||
glyphName = sys.argv[2]
|
glyphName = sys.argv[2]
|
||||||
if (len(sys.argv) > 3):
|
if len(sys.argv) > 3:
|
||||||
imageFile = sys.argv[3]
|
imageFile = sys.argv[3]
|
||||||
else:
|
else:
|
||||||
imageFile = "%s.png" % glyphName
|
imageFile = "%s.png" % glyphName
|
||||||
|
@ -40,16 +40,14 @@ def reversedContour(contour, outputImpliedClosingLine=False):
|
|||||||
|
|
||||||
firstType, firstPts = contour.pop(0)
|
firstType, firstPts = contour.pop(0)
|
||||||
assert firstType in ("moveTo", "qCurveTo"), (
|
assert firstType in ("moveTo", "qCurveTo"), (
|
||||||
"invalid initial segment type: %r" % firstType)
|
"invalid initial segment type: %r" % firstType
|
||||||
|
)
|
||||||
firstOnCurve = firstPts[-1]
|
firstOnCurve = firstPts[-1]
|
||||||
if firstType == "qCurveTo":
|
if firstType == "qCurveTo":
|
||||||
# special case for TrueType paths contaning only off-curve points
|
# special case for TrueType paths contaning only off-curve points
|
||||||
assert firstOnCurve is None, (
|
assert firstOnCurve is None, "off-curve only paths must end with 'None'"
|
||||||
"off-curve only paths must end with 'None'")
|
assert not contour, "only one qCurveTo allowed per off-curve path"
|
||||||
assert not contour, (
|
firstPts = (firstPts[0],) + tuple(reversed(firstPts[1:-1])) + (None,)
|
||||||
"only one qCurveTo allowed per off-curve path")
|
|
||||||
firstPts = ((firstPts[0],) + tuple(reversed(firstPts[1:-1])) +
|
|
||||||
(None,))
|
|
||||||
|
|
||||||
if not contour:
|
if not contour:
|
||||||
# contour contains only one segment, nothing to reverse
|
# contour contains only one segment, nothing to reverse
|
||||||
@ -67,8 +65,7 @@ def reversedContour(contour, outputImpliedClosingLine=False):
|
|||||||
if outputImpliedClosingLine or firstOnCurve != lastOnCurve:
|
if outputImpliedClosingLine or firstOnCurve != lastOnCurve:
|
||||||
# emit an implied line between the last and first points
|
# emit an implied line between the last and first points
|
||||||
yield "lineTo", (lastOnCurve,)
|
yield "lineTo", (lastOnCurve,)
|
||||||
contour[-1] = (lastType,
|
contour[-1] = (lastType, tuple(lastPts[:-1]) + (firstOnCurve,))
|
||||||
tuple(lastPts[:-1]) + (firstOnCurve,))
|
|
||||||
|
|
||||||
if len(contour) > 1:
|
if len(contour) > 1:
|
||||||
secondType, secondPts = contour[0]
|
secondType, secondPts = contour[0]
|
||||||
@ -84,8 +81,7 @@ def reversedContour(contour, outputImpliedClosingLine=False):
|
|||||||
if secondType == "lineTo" and firstPts != secondPts:
|
if secondType == "lineTo" and firstPts != secondPts:
|
||||||
del contour[0]
|
del contour[0]
|
||||||
if contour:
|
if contour:
|
||||||
contour[-1] = (lastType,
|
contour[-1] = (lastType, tuple(lastPts[:-1]) + secondPts)
|
||||||
tuple(lastPts[:-1]) + secondPts)
|
|
||||||
else:
|
else:
|
||||||
# for open paths, the last point will become the first
|
# for open paths, the last point will become the first
|
||||||
yield firstType, (lastOnCurve,)
|
yield firstType, (lastOnCurve,)
|
||||||
@ -94,8 +90,7 @@ def reversedContour(contour, outputImpliedClosingLine=False):
|
|||||||
# we iterate over all segment pairs in reverse order, and yield
|
# we iterate over all segment pairs in reverse order, and yield
|
||||||
# each one with the off-curve points reversed (if any), and
|
# each one with the off-curve points reversed (if any), and
|
||||||
# with the on-curve point of the following segment
|
# with the on-curve point of the following segment
|
||||||
for (curType, curPts), (_, nextPts) in pairwise(
|
for (curType, curPts), (_, nextPts) in pairwise(contour, reverse=True):
|
||||||
contour, reverse=True):
|
|
||||||
yield curType, tuple(reversed(curPts[:-1])) + (nextPts[-1],)
|
yield curType, tuple(reversed(curPts[:-1])) + (nextPts[-1],)
|
||||||
|
|
||||||
yield "closePath" if closed else "endPath", ()
|
yield "closePath" if closed else "endPath", ()
|
||||||
|
@ -53,8 +53,8 @@ class StatisticsPen(MomentsPen):
|
|||||||
self.varianceX = varianceX = self.momentXX / area - meanX**2
|
self.varianceX = varianceX = self.momentXX / area - meanX**2
|
||||||
self.varianceY = varianceY = self.momentYY / area - meanY**2
|
self.varianceY = varianceY = self.momentYY / area - meanY**2
|
||||||
|
|
||||||
self.stddevX = stddevX = math.copysign(abs(varianceX)**.5, varianceX)
|
self.stddevX = stddevX = math.copysign(abs(varianceX) ** 0.5, varianceX)
|
||||||
self.stddevY = stddevY = math.copysign(abs(varianceY)**.5, varianceY)
|
self.stddevY = stddevY = math.copysign(abs(varianceY) ** 0.5, varianceY)
|
||||||
|
|
||||||
# Covariance(X,Y) = ( E[X.Y] - E[X]E[Y] )
|
# Covariance(X,Y) = ( E[X.Y] - E[X]E[Y] )
|
||||||
self.covariance = covariance = self.momentXY / area - meanX * meanY
|
self.covariance = covariance = self.momentXY / area - meanX * meanY
|
||||||
@ -75,28 +75,48 @@ def _test(glyphset, upem, glyphs):
|
|||||||
from fontTools.pens.transformPen import TransformPen
|
from fontTools.pens.transformPen import TransformPen
|
||||||
from fontTools.misc.transform import Scale
|
from fontTools.misc.transform import Scale
|
||||||
|
|
||||||
print('upem', upem)
|
print("upem", upem)
|
||||||
|
|
||||||
for glyph_name in glyphs:
|
for glyph_name in glyphs:
|
||||||
print()
|
print()
|
||||||
print("glyph:", glyph_name)
|
print("glyph:", glyph_name)
|
||||||
glyph = glyphset[glyph_name]
|
glyph = glyphset[glyph_name]
|
||||||
pen = StatisticsPen(glyphset=glyphset)
|
pen = StatisticsPen(glyphset=glyphset)
|
||||||
transformer = TransformPen(pen, Scale(1./upem))
|
transformer = TransformPen(pen, Scale(1.0 / upem))
|
||||||
glyph.draw(transformer)
|
glyph.draw(transformer)
|
||||||
for item in ['area', 'momentX', 'momentY', 'momentXX', 'momentYY', 'momentXY', 'meanX', 'meanY', 'varianceX', 'varianceY', 'stddevX', 'stddevY', 'covariance', 'correlation', 'slant']:
|
for item in [
|
||||||
|
"area",
|
||||||
|
"momentX",
|
||||||
|
"momentY",
|
||||||
|
"momentXX",
|
||||||
|
"momentYY",
|
||||||
|
"momentXY",
|
||||||
|
"meanX",
|
||||||
|
"meanY",
|
||||||
|
"varianceX",
|
||||||
|
"varianceY",
|
||||||
|
"stddevX",
|
||||||
|
"stddevY",
|
||||||
|
"covariance",
|
||||||
|
"correlation",
|
||||||
|
"slant",
|
||||||
|
]:
|
||||||
print("%s: %g" % (item, getattr(pen, item)))
|
print("%s: %g" % (item, getattr(pen, item)))
|
||||||
|
|
||||||
|
|
||||||
def main(args):
|
def main(args):
|
||||||
if not args:
|
if not args:
|
||||||
return
|
return
|
||||||
filename, glyphs = args[0], args[1:]
|
filename, glyphs = args[0], args[1:]
|
||||||
from fontTools.ttLib import TTFont
|
from fontTools.ttLib import TTFont
|
||||||
|
|
||||||
font = TTFont(filename)
|
font = TTFont(filename)
|
||||||
if not glyphs:
|
if not glyphs:
|
||||||
glyphs = font.getGlyphOrder()
|
glyphs = font.getGlyphOrder()
|
||||||
_test(font.getGlyphSet(), font['head'].unitsPerEm, glyphs)
|
_test(font.getGlyphSet(), font["head"].unitsPerEm, glyphs)
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
|
if __name__ == "__main__":
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
main(sys.argv[1:])
|
main(sys.argv[1:])
|
||||||
|
@ -36,6 +36,7 @@ class SVGPathPen(BasePen):
|
|||||||
glyphset[glyphname].draw(pen)
|
glyphset[glyphname].draw(pen)
|
||||||
print(tpen.getCommands())
|
print(tpen.getCommands())
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, glyphSet, ntos: Callable[[float], str] = str):
|
def __init__(self, glyphSet, ntos: Callable[[float], str] = str):
|
||||||
BasePen.__init__(self, glyphSet)
|
BasePen.__init__(self, glyphSet)
|
||||||
self._commands = []
|
self._commands = []
|
||||||
@ -209,22 +210,25 @@ def main(args=None):
|
|||||||
|
|
||||||
if args is None:
|
if args is None:
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
args = sys.argv[1:]
|
args = sys.argv[1:]
|
||||||
|
|
||||||
from fontTools.ttLib import TTFont
|
from fontTools.ttLib import TTFont
|
||||||
import argparse
|
import argparse
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
"fonttools pens.svgPathPen", description="Generate SVG from text")
|
"fonttools pens.svgPathPen", description="Generate SVG from text"
|
||||||
|
)
|
||||||
|
parser.add_argument("font", metavar="font.ttf", help="Font file.")
|
||||||
|
parser.add_argument("text", metavar="text", help="Text string.")
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"font", metavar="font.ttf", help="Font file.")
|
"--variations",
|
||||||
parser.add_argument(
|
metavar="AXIS=LOC",
|
||||||
"text", metavar="text", help="Text string.")
|
default="",
|
||||||
parser.add_argument(
|
|
||||||
"--variations", metavar="AXIS=LOC", default='',
|
|
||||||
help="List of space separated locations. A location consist in "
|
help="List of space separated locations. A location consist in "
|
||||||
"the name of a variation axis, followed by '=' and a number. E.g.: "
|
"the name of a variation axis, followed by '=' and a number. E.g.: "
|
||||||
"wght=700 wdth=80. The default is the location of the base master.")
|
"wght=700 wdth=80. The default is the location of the base master.",
|
||||||
|
)
|
||||||
|
|
||||||
options = parser.parse_args(args)
|
options = parser.parse_args(args)
|
||||||
|
|
||||||
@ -233,18 +237,18 @@ def main(args=None):
|
|||||||
|
|
||||||
location = {}
|
location = {}
|
||||||
for tag_v in options.variations.split():
|
for tag_v in options.variations.split():
|
||||||
fields = tag_v.split('=')
|
fields = tag_v.split("=")
|
||||||
tag = fields[0].strip()
|
tag = fields[0].strip()
|
||||||
v = int(fields[1])
|
v = int(fields[1])
|
||||||
location[tag] = v
|
location[tag] = v
|
||||||
|
|
||||||
hhea = font['hhea']
|
hhea = font["hhea"]
|
||||||
ascent, descent = hhea.ascent, hhea.descent
|
ascent, descent = hhea.ascent, hhea.descent
|
||||||
|
|
||||||
glyphset = font.getGlyphSet(location=location)
|
glyphset = font.getGlyphSet(location=location)
|
||||||
cmap = font['cmap'].getBestCmap()
|
cmap = font["cmap"].getBestCmap()
|
||||||
|
|
||||||
s = ''
|
s = ""
|
||||||
width = 0
|
width = 0
|
||||||
for u in text:
|
for u in text:
|
||||||
g = cmap[ord(u)]
|
g = cmap[ord(u)]
|
||||||
@ -254,20 +258,29 @@ def main(args=None):
|
|||||||
glyph.draw(pen)
|
glyph.draw(pen)
|
||||||
commands = pen.getCommands()
|
commands = pen.getCommands()
|
||||||
|
|
||||||
s += '<g transform="translate(%d %d) scale(1 -1)"><path d="%s"/></g>\n' % (width, ascent, commands)
|
s += '<g transform="translate(%d %d) scale(1 -1)"><path d="%s"/></g>\n' % (
|
||||||
|
width,
|
||||||
|
ascent,
|
||||||
|
commands,
|
||||||
|
)
|
||||||
|
|
||||||
width += glyph.width
|
width += glyph.width
|
||||||
|
|
||||||
print('<?xml version="1.0" encoding="UTF-8"?>')
|
print('<?xml version="1.0" encoding="UTF-8"?>')
|
||||||
print('<svg width="%d" height="%d" xmlns="http://www.w3.org/2000/svg">' % (width, ascent-descent))
|
print(
|
||||||
print(s, end='')
|
'<svg width="%d" height="%d" xmlns="http://www.w3.org/2000/svg">'
|
||||||
print('</svg>')
|
% (width, ascent - descent)
|
||||||
|
)
|
||||||
|
print(s, end="")
|
||||||
|
print("</svg>")
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
if len(sys.argv) == 1:
|
if len(sys.argv) == 1:
|
||||||
import doctest
|
import doctest
|
||||||
|
|
||||||
sys.exit(doctest.testmod().failed)
|
sys.exit(doctest.testmod().failed)
|
||||||
|
|
||||||
sys.exit(main())
|
sys.exit(main())
|
||||||
|
@ -32,14 +32,14 @@ class T2CharStringPen(BasePen):
|
|||||||
return [pt[0] - p0[0], pt[1] - p0[1]]
|
return [pt[0] - p0[0], pt[1] - p0[1]]
|
||||||
|
|
||||||
def _moveTo(self, pt):
|
def _moveTo(self, pt):
|
||||||
self._commands.append(('rmoveto', self._p(pt)))
|
self._commands.append(("rmoveto", self._p(pt)))
|
||||||
|
|
||||||
def _lineTo(self, pt):
|
def _lineTo(self, pt):
|
||||||
self._commands.append(('rlineto', self._p(pt)))
|
self._commands.append(("rlineto", self._p(pt)))
|
||||||
|
|
||||||
def _curveToOne(self, pt1, pt2, pt3):
|
def _curveToOne(self, pt1, pt2, pt3):
|
||||||
_p = self._p
|
_p = self._p
|
||||||
self._commands.append(('rrcurveto', _p(pt1)+_p(pt2)+_p(pt3)))
|
self._commands.append(("rrcurveto", _p(pt1) + _p(pt2) + _p(pt3)))
|
||||||
|
|
||||||
def _closePath(self):
|
def _closePath(self):
|
||||||
pass
|
pass
|
||||||
@ -51,15 +51,18 @@ class T2CharStringPen(BasePen):
|
|||||||
commands = self._commands
|
commands = self._commands
|
||||||
if optimize:
|
if optimize:
|
||||||
maxstack = 48 if not self._CFF2 else 513
|
maxstack = 48 if not self._CFF2 else 513
|
||||||
commands = specializeCommands(commands,
|
commands = specializeCommands(
|
||||||
generalizeFirst=False,
|
commands, generalizeFirst=False, maxstack=maxstack
|
||||||
maxstack=maxstack)
|
)
|
||||||
program = commandsToProgram(commands)
|
program = commandsToProgram(commands)
|
||||||
if self._width is not None:
|
if self._width is not None:
|
||||||
assert not self._CFF2, "CFF2 does not allow encoding glyph width in CharString."
|
assert (
|
||||||
|
not self._CFF2
|
||||||
|
), "CFF2 does not allow encoding glyph width in CharString."
|
||||||
program.insert(0, otRound(self._width))
|
program.insert(0, otRound(self._width))
|
||||||
if not self._CFF2:
|
if not self._CFF2:
|
||||||
program.append('endchar')
|
program.append("endchar")
|
||||||
charString = T2CharString(
|
charString = T2CharString(
|
||||||
program=program, private=private, globalSubrs=globalSubrs)
|
program=program, private=private, globalSubrs=globalSubrs
|
||||||
|
)
|
||||||
return charString
|
return charString
|
||||||
|
@ -14,24 +14,31 @@ class TeePen(AbstractPen):
|
|||||||
if len(pens) == 1:
|
if len(pens) == 1:
|
||||||
pens = pens[0]
|
pens = pens[0]
|
||||||
self.pens = pens
|
self.pens = pens
|
||||||
|
|
||||||
def moveTo(self, p0):
|
def moveTo(self, p0):
|
||||||
for pen in self.pens:
|
for pen in self.pens:
|
||||||
pen.moveTo(p0)
|
pen.moveTo(p0)
|
||||||
|
|
||||||
def lineTo(self, p1):
|
def lineTo(self, p1):
|
||||||
for pen in self.pens:
|
for pen in self.pens:
|
||||||
pen.lineTo(p1)
|
pen.lineTo(p1)
|
||||||
|
|
||||||
def qCurveTo(self, *points):
|
def qCurveTo(self, *points):
|
||||||
for pen in self.pens:
|
for pen in self.pens:
|
||||||
pen.qCurveTo(*points)
|
pen.qCurveTo(*points)
|
||||||
|
|
||||||
def curveTo(self, *points):
|
def curveTo(self, *points):
|
||||||
for pen in self.pens:
|
for pen in self.pens:
|
||||||
pen.curveTo(*points)
|
pen.curveTo(*points)
|
||||||
|
|
||||||
def closePath(self):
|
def closePath(self):
|
||||||
for pen in self.pens:
|
for pen in self.pens:
|
||||||
pen.closePath()
|
pen.closePath()
|
||||||
|
|
||||||
def endPath(self):
|
def endPath(self):
|
||||||
for pen in self.pens:
|
for pen in self.pens:
|
||||||
pen.endPath()
|
pen.endPath()
|
||||||
|
|
||||||
def addComponent(self, glyphName, transformation):
|
def addComponent(self, glyphName, transformation):
|
||||||
for pen in self.pens:
|
for pen in self.pens:
|
||||||
pen.addComponent(glyphName, transformation)
|
pen.addComponent(glyphName, transformation)
|
||||||
@ -39,6 +46,7 @@ class TeePen(AbstractPen):
|
|||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
from fontTools.pens.basePen import _TestPen
|
from fontTools.pens.basePen import _TestPen
|
||||||
|
|
||||||
pen = TeePen(_TestPen(), _TestPen())
|
pen = TeePen(_TestPen(), _TestPen())
|
||||||
pen.moveTo((0, 0))
|
pen.moveTo((0, 0))
|
||||||
pen.lineTo((0, 100))
|
pen.lineTo((0, 100))
|
||||||
|
@ -18,6 +18,7 @@ class TransformPen(FilterPen):
|
|||||||
super(TransformPen, self).__init__(outPen)
|
super(TransformPen, self).__init__(outPen)
|
||||||
if not hasattr(transformation, "transformPoint"):
|
if not hasattr(transformation, "transformPoint"):
|
||||||
from fontTools.misc.transform import Transform
|
from fontTools.misc.transform import Transform
|
||||||
|
|
||||||
transformation = Transform(*transformation)
|
transformation = Transform(*transformation)
|
||||||
self._transformation = transformation
|
self._transformation = transformation
|
||||||
self._transformPoint = transformation.transformPoint
|
self._transformPoint = transformation.transformPoint
|
||||||
@ -85,6 +86,7 @@ class TransformPointPen(FilterPointPen):
|
|||||||
super().__init__(outPointPen)
|
super().__init__(outPointPen)
|
||||||
if not hasattr(transformation, "transformPoint"):
|
if not hasattr(transformation, "transformPoint"):
|
||||||
from fontTools.misc.transform import Transform
|
from fontTools.misc.transform import Transform
|
||||||
|
|
||||||
transformation = Transform(*transformation)
|
transformation = Transform(*transformation)
|
||||||
self._transformation = transformation
|
self._transformation = transformation
|
||||||
self._transformPoint = transformation.transformPoint
|
self._transformPoint = transformation.transformPoint
|
||||||
@ -101,6 +103,7 @@ class TransformPointPen(FilterPointPen):
|
|||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
from fontTools.pens.basePen import _TestPen
|
from fontTools.pens.basePen import _TestPen
|
||||||
|
|
||||||
pen = TransformPen(_TestPen(None), (2, 0, 0.5, 2, -10, 0))
|
pen = TransformPen(_TestPen(None), (2, 0, 0.5, 2, -10, 0))
|
||||||
pen.moveTo((0, 0))
|
pen.moveTo((0, 0))
|
||||||
pen.lineTo((0, 100))
|
pen.lineTo((0, 100))
|
||||||
|
@ -5,11 +5,11 @@ __all__ = ["WxPen"]
|
|||||||
|
|
||||||
|
|
||||||
class WxPen(BasePen):
|
class WxPen(BasePen):
|
||||||
|
|
||||||
def __init__(self, glyphSet, path=None):
|
def __init__(self, glyphSet, path=None):
|
||||||
BasePen.__init__(self, glyphSet)
|
BasePen.__init__(self, glyphSet)
|
||||||
if path is None:
|
if path is None:
|
||||||
import wx
|
import wx
|
||||||
|
|
||||||
path = wx.GraphicsRenderer.GetDefaultRenderer().CreatePath()
|
path = wx.GraphicsRenderer.GetDefaultRenderer().CreatePath()
|
||||||
self.path = path
|
self.path = path
|
||||||
|
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -2,5 +2,5 @@ import sys
|
|||||||
from fontTools.subset import main
|
from fontTools.subset import main
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == "__main__":
|
||||||
sys.exit(main())
|
sys.exit(main())
|
||||||
|
@ -7,17 +7,15 @@ from fontTools.subset.util import _add_method, _uniq_sort
|
|||||||
|
|
||||||
|
|
||||||
class _ClosureGlyphsT2Decompiler(psCharStrings.SimpleT2Decompiler):
|
class _ClosureGlyphsT2Decompiler(psCharStrings.SimpleT2Decompiler):
|
||||||
|
|
||||||
def __init__(self, components, localSubrs, globalSubrs):
|
def __init__(self, components, localSubrs, globalSubrs):
|
||||||
psCharStrings.SimpleT2Decompiler.__init__(self,
|
psCharStrings.SimpleT2Decompiler.__init__(self, localSubrs, globalSubrs)
|
||||||
localSubrs,
|
|
||||||
globalSubrs)
|
|
||||||
self.components = components
|
self.components = components
|
||||||
|
|
||||||
def op_endchar(self, index):
|
def op_endchar(self, index):
|
||||||
args = self.popall()
|
args = self.popall()
|
||||||
if len(args) >= 4:
|
if len(args) >= 4:
|
||||||
from fontTools.encodings.StandardEncoding import StandardEncoding
|
from fontTools.encodings.StandardEncoding import StandardEncoding
|
||||||
|
|
||||||
# endchar can do seac accent bulding; The T2 spec says it's deprecated,
|
# endchar can do seac accent bulding; The T2 spec says it's deprecated,
|
||||||
# but recent software that shall remain nameless does output it.
|
# but recent software that shall remain nameless does output it.
|
||||||
adx, ady, bchar, achar = args[-4:]
|
adx, ady, bchar, achar = args[-4:]
|
||||||
@ -26,7 +24,8 @@ class _ClosureGlyphsT2Decompiler(psCharStrings.SimpleT2Decompiler):
|
|||||||
self.components.add(baseGlyph)
|
self.components.add(baseGlyph)
|
||||||
self.components.add(accentGlyph)
|
self.components.add(accentGlyph)
|
||||||
|
|
||||||
@_add_method(ttLib.getTableClass('CFF '))
|
|
||||||
|
@_add_method(ttLib.getTableClass("CFF "))
|
||||||
def closure_glyphs(self, s):
|
def closure_glyphs(self, s):
|
||||||
cff = self.cff
|
cff = self.cff
|
||||||
assert len(cff) == 1
|
assert len(cff) == 1
|
||||||
@ -48,13 +47,14 @@ def closure_glyphs(self, s):
|
|||||||
s.glyphs.update(components)
|
s.glyphs.update(components)
|
||||||
decompose = components
|
decompose = components
|
||||||
|
|
||||||
|
|
||||||
def _empty_charstring(font, glyphName, isCFF2, ignoreWidth=False):
|
def _empty_charstring(font, glyphName, isCFF2, ignoreWidth=False):
|
||||||
c, fdSelectIndex = font.CharStrings.getItemAndSelector(glyphName)
|
c, fdSelectIndex = font.CharStrings.getItemAndSelector(glyphName)
|
||||||
if isCFF2 or ignoreWidth:
|
if isCFF2 or ignoreWidth:
|
||||||
# CFF2 charstrings have no widths nor 'endchar' operators
|
# CFF2 charstrings have no widths nor 'endchar' operators
|
||||||
c.setProgram([] if isCFF2 else ['endchar'])
|
c.setProgram([] if isCFF2 else ["endchar"])
|
||||||
else:
|
else:
|
||||||
if hasattr(font, 'FDArray') and font.FDArray is not None:
|
if hasattr(font, "FDArray") and font.FDArray is not None:
|
||||||
private = font.FDArray[fdSelectIndex].Private
|
private = font.FDArray[fdSelectIndex].Private
|
||||||
else:
|
else:
|
||||||
private = font.Private
|
private = font.Private
|
||||||
@ -63,11 +63,12 @@ def _empty_charstring(font, glyphName, isCFF2, ignoreWidth=False):
|
|||||||
pen = NullPen()
|
pen = NullPen()
|
||||||
c.draw(pen) # this will set the charstring's width
|
c.draw(pen) # this will set the charstring's width
|
||||||
if c.width != dfltWdX:
|
if c.width != dfltWdX:
|
||||||
c.program = [c.width - nmnlWdX, 'endchar']
|
c.program = [c.width - nmnlWdX, "endchar"]
|
||||||
else:
|
else:
|
||||||
c.program = ['endchar']
|
c.program = ["endchar"]
|
||||||
|
|
||||||
@_add_method(ttLib.getTableClass('CFF '))
|
|
||||||
|
@_add_method(ttLib.getTableClass("CFF "))
|
||||||
def prune_pre_subset(self, font, options):
|
def prune_pre_subset(self, font, options):
|
||||||
cff = self.cff
|
cff = self.cff
|
||||||
# CFF table must have one font only
|
# CFF table must have one font only
|
||||||
@ -87,7 +88,8 @@ def prune_pre_subset(self, font, options):
|
|||||||
|
|
||||||
return True # bool(cff.fontNames)
|
return True # bool(cff.fontNames)
|
||||||
|
|
||||||
@_add_method(ttLib.getTableClass('CFF '))
|
|
||||||
|
@_add_method(ttLib.getTableClass("CFF "))
|
||||||
def subset_glyphs(self, s):
|
def subset_glyphs(self, s):
|
||||||
cff = self.cff
|
cff = self.cff
|
||||||
for fontname in cff.keys():
|
for fontname in cff.keys():
|
||||||
@ -98,7 +100,8 @@ def subset_glyphs(self, s):
|
|||||||
|
|
||||||
# Load all glyphs
|
# Load all glyphs
|
||||||
for g in font.charset:
|
for g in font.charset:
|
||||||
if g not in glyphs: continue
|
if g not in glyphs:
|
||||||
|
continue
|
||||||
c, _ = cs.getItemAndSelector(g)
|
c, _ = cs.getItemAndSelector(g)
|
||||||
|
|
||||||
if cs.charStringsAreIndexed:
|
if cs.charStringsAreIndexed:
|
||||||
@ -117,31 +120,31 @@ def subset_glyphs(self, s):
|
|||||||
newCharStrings[g] = indicesIdx
|
newCharStrings[g] = indicesIdx
|
||||||
cs.charStrings = newCharStrings
|
cs.charStrings = newCharStrings
|
||||||
else:
|
else:
|
||||||
cs.charStrings = {g:v
|
cs.charStrings = {g: v for g, v in cs.charStrings.items() if g in glyphs}
|
||||||
for g,v in cs.charStrings.items()
|
|
||||||
if g in glyphs}
|
|
||||||
font.charset = [g for g in font.charset if g in glyphs]
|
font.charset = [g for g in font.charset if g in glyphs]
|
||||||
font.numGlyphs = len(font.charset)
|
font.numGlyphs = len(font.charset)
|
||||||
|
|
||||||
|
|
||||||
if s.options.retain_gids:
|
if s.options.retain_gids:
|
||||||
isCFF2 = cff.major > 1
|
isCFF2 = cff.major > 1
|
||||||
for g in s.glyphs_emptied:
|
for g in s.glyphs_emptied:
|
||||||
_empty_charstring(font, g, isCFF2=isCFF2, ignoreWidth=True)
|
_empty_charstring(font, g, isCFF2=isCFF2, ignoreWidth=True)
|
||||||
|
|
||||||
|
|
||||||
return True # any(cff[fontname].numGlyphs for fontname in cff.keys())
|
return True # any(cff[fontname].numGlyphs for fontname in cff.keys())
|
||||||
|
|
||||||
|
|
||||||
@_add_method(psCharStrings.T2CharString)
|
@_add_method(psCharStrings.T2CharString)
|
||||||
def subset_subroutines(self, subrs, gsubrs):
|
def subset_subroutines(self, subrs, gsubrs):
|
||||||
p = self.program
|
p = self.program
|
||||||
for i in range(1, len(p)):
|
for i in range(1, len(p)):
|
||||||
if p[i] == 'callsubr':
|
if p[i] == "callsubr":
|
||||||
assert isinstance(p[i - 1], int)
|
assert isinstance(p[i - 1], int)
|
||||||
p[i - 1] = subrs._used.index(p[i - 1] + subrs._old_bias) - subrs._new_bias
|
p[i - 1] = subrs._used.index(p[i - 1] + subrs._old_bias) - subrs._new_bias
|
||||||
elif p[i] == 'callgsubr':
|
elif p[i] == "callgsubr":
|
||||||
assert isinstance(p[i - 1], int)
|
assert isinstance(p[i - 1], int)
|
||||||
p[i-1] = gsubrs._used.index(p[i-1] + gsubrs._old_bias) - gsubrs._new_bias
|
p[i - 1] = (
|
||||||
|
gsubrs._used.index(p[i - 1] + gsubrs._old_bias) - gsubrs._new_bias
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@_add_method(psCharStrings.T2CharString)
|
@_add_method(psCharStrings.T2CharString)
|
||||||
def drop_hints(self):
|
def drop_hints(self):
|
||||||
@ -157,19 +160,21 @@ def drop_hints(self):
|
|||||||
self.program = self.program[hints.last_hint :]
|
self.program = self.program[hints.last_hint :]
|
||||||
if not self.program:
|
if not self.program:
|
||||||
# TODO CFF2 no need for endchar.
|
# TODO CFF2 no need for endchar.
|
||||||
self.program.append('endchar')
|
self.program.append("endchar")
|
||||||
if hasattr(self, 'width'):
|
if hasattr(self, "width"):
|
||||||
# Insert width back if needed
|
# Insert width back if needed
|
||||||
if self.width != self.private.defaultWidthX:
|
if self.width != self.private.defaultWidthX:
|
||||||
# For CFF2 charstrings, this should never happen
|
# For CFF2 charstrings, this should never happen
|
||||||
assert self.private.defaultWidthX is not None, "CFF2 CharStrings must not have an initial width value"
|
assert (
|
||||||
|
self.private.defaultWidthX is not None
|
||||||
|
), "CFF2 CharStrings must not have an initial width value"
|
||||||
self.program.insert(0, self.width - self.private.nominalWidthX)
|
self.program.insert(0, self.width - self.private.nominalWidthX)
|
||||||
|
|
||||||
if hints.has_hintmask:
|
if hints.has_hintmask:
|
||||||
i = 0
|
i = 0
|
||||||
p = self.program
|
p = self.program
|
||||||
while i < len(p):
|
while i < len(p):
|
||||||
if p[i] in ['hintmask', 'cntrmask']:
|
if p[i] in ["hintmask", "cntrmask"]:
|
||||||
assert i + 1 <= len(p)
|
assert i + 1 <= len(p)
|
||||||
del p[i : i + 2]
|
del p[i : i + 2]
|
||||||
continue
|
continue
|
||||||
@ -179,13 +184,12 @@ def drop_hints(self):
|
|||||||
|
|
||||||
del self._hints
|
del self._hints
|
||||||
|
|
||||||
class _MarkingT2Decompiler(psCharStrings.SimpleT2Decompiler):
|
|
||||||
|
|
||||||
|
class _MarkingT2Decompiler(psCharStrings.SimpleT2Decompiler):
|
||||||
def __init__(self, localSubrs, globalSubrs, private):
|
def __init__(self, localSubrs, globalSubrs, private):
|
||||||
psCharStrings.SimpleT2Decompiler.__init__(self,
|
psCharStrings.SimpleT2Decompiler.__init__(
|
||||||
localSubrs,
|
self, localSubrs, globalSubrs, private
|
||||||
globalSubrs,
|
)
|
||||||
private)
|
|
||||||
for subrs in [localSubrs, globalSubrs]:
|
for subrs in [localSubrs, globalSubrs]:
|
||||||
if subrs and not hasattr(subrs, "_used"):
|
if subrs and not hasattr(subrs, "_used"):
|
||||||
subrs._used = set()
|
subrs._used = set()
|
||||||
@ -198,8 +202,8 @@ class _MarkingT2Decompiler(psCharStrings.SimpleT2Decompiler):
|
|||||||
self.globalSubrs._used.add(self.operandStack[-1] + self.globalBias)
|
self.globalSubrs._used.add(self.operandStack[-1] + self.globalBias)
|
||||||
psCharStrings.SimpleT2Decompiler.op_callgsubr(self, index)
|
psCharStrings.SimpleT2Decompiler.op_callgsubr(self, index)
|
||||||
|
|
||||||
class _DehintingT2Decompiler(psCharStrings.T2WidthExtractor):
|
|
||||||
|
|
||||||
|
class _DehintingT2Decompiler(psCharStrings.T2WidthExtractor):
|
||||||
class Hints(object):
|
class Hints(object):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
# Whether calling this charstring produces any hint stems
|
# Whether calling this charstring produces any hint stems
|
||||||
@ -223,16 +227,20 @@ class _DehintingT2Decompiler(psCharStrings.T2WidthExtractor):
|
|||||||
self.has_hintmask = False
|
self.has_hintmask = False
|
||||||
# List of indices of calls to empty subroutines to remove.
|
# List of indices of calls to empty subroutines to remove.
|
||||||
self.deletions = []
|
self.deletions = []
|
||||||
|
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def __init__(self, css, localSubrs, globalSubrs, nominalWidthX, defaultWidthX, private=None):
|
def __init__(
|
||||||
|
self, css, localSubrs, globalSubrs, nominalWidthX, defaultWidthX, private=None
|
||||||
|
):
|
||||||
self._css = css
|
self._css = css
|
||||||
psCharStrings.T2WidthExtractor.__init__(
|
psCharStrings.T2WidthExtractor.__init__(
|
||||||
self, localSubrs, globalSubrs, nominalWidthX, defaultWidthX)
|
self, localSubrs, globalSubrs, nominalWidthX, defaultWidthX
|
||||||
|
)
|
||||||
self.private = private
|
self.private = private
|
||||||
|
|
||||||
def execute(self, charString):
|
def execute(self, charString):
|
||||||
old_hints = charString._hints if hasattr(charString, '_hints') else None
|
old_hints = charString._hints if hasattr(charString, "_hints") else None
|
||||||
charString._hints = self.Hints()
|
charString._hints = self.Hints()
|
||||||
|
|
||||||
psCharStrings.T2WidthExtractor.execute(self, charString)
|
psCharStrings.T2WidthExtractor.execute(self, charString)
|
||||||
@ -268,19 +276,24 @@ class _DehintingT2Decompiler(psCharStrings.T2WidthExtractor):
|
|||||||
def op_hstem(self, index):
|
def op_hstem(self, index):
|
||||||
psCharStrings.T2WidthExtractor.op_hstem(self, index)
|
psCharStrings.T2WidthExtractor.op_hstem(self, index)
|
||||||
self.processHint(index)
|
self.processHint(index)
|
||||||
|
|
||||||
def op_vstem(self, index):
|
def op_vstem(self, index):
|
||||||
psCharStrings.T2WidthExtractor.op_vstem(self, index)
|
psCharStrings.T2WidthExtractor.op_vstem(self, index)
|
||||||
self.processHint(index)
|
self.processHint(index)
|
||||||
|
|
||||||
def op_hstemhm(self, index):
|
def op_hstemhm(self, index):
|
||||||
psCharStrings.T2WidthExtractor.op_hstemhm(self, index)
|
psCharStrings.T2WidthExtractor.op_hstemhm(self, index)
|
||||||
self.processHint(index)
|
self.processHint(index)
|
||||||
|
|
||||||
def op_vstemhm(self, index):
|
def op_vstemhm(self, index):
|
||||||
psCharStrings.T2WidthExtractor.op_vstemhm(self, index)
|
psCharStrings.T2WidthExtractor.op_vstemhm(self, index)
|
||||||
self.processHint(index)
|
self.processHint(index)
|
||||||
|
|
||||||
def op_hintmask(self, index):
|
def op_hintmask(self, index):
|
||||||
rv = psCharStrings.T2WidthExtractor.op_hintmask(self, index)
|
rv = psCharStrings.T2WidthExtractor.op_hintmask(self, index)
|
||||||
self.processHintmask(index)
|
self.processHintmask(index)
|
||||||
return rv
|
return rv
|
||||||
|
|
||||||
def op_cntrmask(self, index):
|
def op_cntrmask(self, index):
|
||||||
rv = psCharStrings.T2WidthExtractor.op_cntrmask(self, index)
|
rv = psCharStrings.T2WidthExtractor.op_cntrmask(self, index)
|
||||||
self.processHintmask(index)
|
self.processHintmask(index)
|
||||||
@ -340,7 +353,7 @@ class _DehintingT2Decompiler(psCharStrings.T2WidthExtractor):
|
|||||||
hints.status = max(hints.status, subr_hints.status)
|
hints.status = max(hints.status, subr_hints.status)
|
||||||
|
|
||||||
|
|
||||||
@_add_method(ttLib.getTableClass('CFF '))
|
@_add_method(ttLib.getTableClass("CFF "))
|
||||||
def prune_post_subset(self, ttfFont, options):
|
def prune_post_subset(self, ttfFont, options):
|
||||||
cff = self.cff
|
cff = self.cff
|
||||||
for fontname in cff.keys():
|
for fontname in cff.keys():
|
||||||
@ -369,19 +382,21 @@ def prune_post_subset(self, ttfFont, options):
|
|||||||
|
|
||||||
|
|
||||||
def _delete_empty_subrs(private_dict):
|
def _delete_empty_subrs(private_dict):
|
||||||
if hasattr(private_dict, 'Subrs') and not private_dict.Subrs:
|
if hasattr(private_dict, "Subrs") and not private_dict.Subrs:
|
||||||
if 'Subrs' in private_dict.rawDict:
|
if "Subrs" in private_dict.rawDict:
|
||||||
del private_dict.rawDict['Subrs']
|
del private_dict.rawDict["Subrs"]
|
||||||
del private_dict.Subrs
|
del private_dict.Subrs
|
||||||
|
|
||||||
|
|
||||||
@deprecateFunction("use 'CFFFontSet.desubroutinize()' instead", category=DeprecationWarning)
|
@deprecateFunction(
|
||||||
@_add_method(ttLib.getTableClass('CFF '))
|
"use 'CFFFontSet.desubroutinize()' instead", category=DeprecationWarning
|
||||||
|
)
|
||||||
|
@_add_method(ttLib.getTableClass("CFF "))
|
||||||
def desubroutinize(self):
|
def desubroutinize(self):
|
||||||
self.cff.desubroutinize()
|
self.cff.desubroutinize()
|
||||||
|
|
||||||
|
|
||||||
@_add_method(ttLib.getTableClass('CFF '))
|
@_add_method(ttLib.getTableClass("CFF "))
|
||||||
def remove_hints(self):
|
def remove_hints(self):
|
||||||
cff = self.cff
|
cff = self.cff
|
||||||
for fontname in cff.keys():
|
for fontname in cff.keys():
|
||||||
@ -407,10 +422,14 @@ def remove_hints(self):
|
|||||||
c, _ = cs.getItemAndSelector(g)
|
c, _ = cs.getItemAndSelector(g)
|
||||||
c.decompile()
|
c.decompile()
|
||||||
subrs = getattr(c.private, "Subrs", [])
|
subrs = getattr(c.private, "Subrs", [])
|
||||||
decompiler = _DehintingT2Decompiler(css, subrs, c.globalSubrs,
|
decompiler = _DehintingT2Decompiler(
|
||||||
|
css,
|
||||||
|
subrs,
|
||||||
|
c.globalSubrs,
|
||||||
c.private.nominalWidthX,
|
c.private.nominalWidthX,
|
||||||
c.private.defaultWidthX,
|
c.private.defaultWidthX,
|
||||||
c.private)
|
c.private,
|
||||||
|
)
|
||||||
decompiler.execute(c)
|
decompiler.execute(c)
|
||||||
c.width = decompiler.width
|
c.width = decompiler.width
|
||||||
for charstring in css:
|
for charstring in css:
|
||||||
@ -419,22 +438,33 @@ def remove_hints(self):
|
|||||||
|
|
||||||
# Drop font-wide hinting values
|
# Drop font-wide hinting values
|
||||||
all_privs = []
|
all_privs = []
|
||||||
if hasattr(font, 'FDArray'):
|
if hasattr(font, "FDArray"):
|
||||||
all_privs.extend(fd.Private for fd in font.FDArray)
|
all_privs.extend(fd.Private for fd in font.FDArray)
|
||||||
else:
|
else:
|
||||||
all_privs.append(font.Private)
|
all_privs.append(font.Private)
|
||||||
for priv in all_privs:
|
for priv in all_privs:
|
||||||
for k in ['BlueValues', 'OtherBlues',
|
for k in [
|
||||||
'FamilyBlues', 'FamilyOtherBlues',
|
"BlueValues",
|
||||||
'BlueScale', 'BlueShift', 'BlueFuzz',
|
"OtherBlues",
|
||||||
'StemSnapH', 'StemSnapV', 'StdHW', 'StdVW',
|
"FamilyBlues",
|
||||||
'ForceBold', 'LanguageGroup', 'ExpansionFactor']:
|
"FamilyOtherBlues",
|
||||||
|
"BlueScale",
|
||||||
|
"BlueShift",
|
||||||
|
"BlueFuzz",
|
||||||
|
"StemSnapH",
|
||||||
|
"StemSnapV",
|
||||||
|
"StdHW",
|
||||||
|
"StdVW",
|
||||||
|
"ForceBold",
|
||||||
|
"LanguageGroup",
|
||||||
|
"ExpansionFactor",
|
||||||
|
]:
|
||||||
if hasattr(priv, k):
|
if hasattr(priv, k):
|
||||||
setattr(priv, k, None)
|
setattr(priv, k, None)
|
||||||
self.remove_unused_subroutines()
|
self.remove_unused_subroutines()
|
||||||
|
|
||||||
|
|
||||||
@_add_method(ttLib.getTableClass('CFF '))
|
@_add_method(ttLib.getTableClass("CFF "))
|
||||||
def remove_unused_subroutines(self):
|
def remove_unused_subroutines(self):
|
||||||
cff = self.cff
|
cff = self.cff
|
||||||
for fontname in cff.keys():
|
for fontname in cff.keys():
|
||||||
@ -450,16 +480,20 @@ def remove_unused_subroutines(self):
|
|||||||
decompiler.execute(c)
|
decompiler.execute(c)
|
||||||
|
|
||||||
all_subrs = [font.GlobalSubrs]
|
all_subrs = [font.GlobalSubrs]
|
||||||
if hasattr(font, 'FDArray'):
|
if hasattr(font, "FDArray"):
|
||||||
all_subrs.extend(fd.Private.Subrs for fd in font.FDArray if hasattr(fd.Private, 'Subrs') and fd.Private.Subrs)
|
all_subrs.extend(
|
||||||
elif hasattr(font.Private, 'Subrs') and font.Private.Subrs:
|
fd.Private.Subrs
|
||||||
|
for fd in font.FDArray
|
||||||
|
if hasattr(fd.Private, "Subrs") and fd.Private.Subrs
|
||||||
|
)
|
||||||
|
elif hasattr(font.Private, "Subrs") and font.Private.Subrs:
|
||||||
all_subrs.append(font.Private.Subrs)
|
all_subrs.append(font.Private.Subrs)
|
||||||
|
|
||||||
subrs = set(subrs) # Remove duplicates
|
subrs = set(subrs) # Remove duplicates
|
||||||
|
|
||||||
# Prepare
|
# Prepare
|
||||||
for subrs in all_subrs:
|
for subrs in all_subrs:
|
||||||
if not hasattr(subrs, '_used'):
|
if not hasattr(subrs, "_used"):
|
||||||
subrs._used = set()
|
subrs._used = set()
|
||||||
subrs._used = _uniq_sort(subrs._used)
|
subrs._used = _uniq_sort(subrs._used)
|
||||||
subrs._old_bias = psCharStrings.calcSubrBias(subrs)
|
subrs._old_bias = psCharStrings.calcSubrBias(subrs)
|
||||||
@ -474,7 +508,7 @@ def remove_unused_subroutines(self):
|
|||||||
# Renumber subroutines themselves
|
# Renumber subroutines themselves
|
||||||
for subrs in all_subrs:
|
for subrs in all_subrs:
|
||||||
if subrs == font.GlobalSubrs:
|
if subrs == font.GlobalSubrs:
|
||||||
if not hasattr(font, 'FDArray') and hasattr(font.Private, 'Subrs'):
|
if not hasattr(font, "FDArray") and hasattr(font.Private, "Subrs"):
|
||||||
local_subrs = font.Private.Subrs
|
local_subrs = font.Private.Subrs
|
||||||
else:
|
else:
|
||||||
local_subrs = []
|
local_subrs = []
|
||||||
@ -482,16 +516,16 @@ def remove_unused_subroutines(self):
|
|||||||
local_subrs = subrs
|
local_subrs = subrs
|
||||||
|
|
||||||
subrs.items = [subrs.items[i] for i in subrs._used]
|
subrs.items = [subrs.items[i] for i in subrs._used]
|
||||||
if hasattr(subrs, 'file'):
|
if hasattr(subrs, "file"):
|
||||||
del subrs.file
|
del subrs.file
|
||||||
if hasattr(subrs, 'offsets'):
|
if hasattr(subrs, "offsets"):
|
||||||
del subrs.offsets
|
del subrs.offsets
|
||||||
|
|
||||||
for subr in subrs.items:
|
for subr in subrs.items:
|
||||||
subr.subset_subroutines(local_subrs, font.GlobalSubrs)
|
subr.subset_subroutines(local_subrs, font.GlobalSubrs)
|
||||||
|
|
||||||
# Delete local SubrsIndex if empty
|
# Delete local SubrsIndex if empty
|
||||||
if hasattr(font, 'FDArray'):
|
if hasattr(font, "FDArray"):
|
||||||
for fd in font.FDArray:
|
for fd in font.FDArray:
|
||||||
_delete_empty_subrs(fd.Private)
|
_delete_empty_subrs(fd.Private)
|
||||||
else:
|
else:
|
||||||
|
@ -19,7 +19,6 @@ def _map_point(matrix, pt):
|
|||||||
|
|
||||||
|
|
||||||
class EllipticalArc(object):
|
class EllipticalArc(object):
|
||||||
|
|
||||||
def __init__(self, current_point, rx, ry, rotation, large, sweep, target_point):
|
def __init__(self, current_point, rx, ry, rotation, large, sweep, target_point):
|
||||||
self.current_point = current_point
|
self.current_point = current_point
|
||||||
self.rx = rx
|
self.rx = rx
|
||||||
|
@ -11,9 +11,9 @@ from .arc import EllipticalArc
|
|||||||
import re
|
import re
|
||||||
|
|
||||||
|
|
||||||
COMMANDS = set('MmZzLlHhVvCcSsQqTtAa')
|
COMMANDS = set("MmZzLlHhVvCcSsQqTtAa")
|
||||||
ARC_COMMANDS = set("Aa")
|
ARC_COMMANDS = set("Aa")
|
||||||
UPPERCASE = set('MZLHVCSQTA')
|
UPPERCASE = set("MZLHVCSQTA")
|
||||||
|
|
||||||
COMMAND_RE = re.compile("([MmZzLlHhVvCcSsQqTtAa])")
|
COMMAND_RE = re.compile("([MmZzLlHhVvCcSsQqTtAa])")
|
||||||
|
|
||||||
@ -136,11 +136,13 @@ def parse_path(pathdef, pen, current_pos=(0, 0), arc_class=EllipticalArc):
|
|||||||
# If this element starts with numbers, it is an implicit command
|
# If this element starts with numbers, it is an implicit command
|
||||||
# and we don't change the command. Check that it's allowed:
|
# and we don't change the command. Check that it's allowed:
|
||||||
if command is None:
|
if command is None:
|
||||||
raise ValueError("Unallowed implicit command in %s, position %s" % (
|
raise ValueError(
|
||||||
pathdef, len(pathdef.split()) - len(elements)))
|
"Unallowed implicit command in %s, position %s"
|
||||||
|
% (pathdef, len(pathdef.split()) - len(elements))
|
||||||
|
)
|
||||||
last_command = command # Used by S and T
|
last_command = command # Used by S and T
|
||||||
|
|
||||||
if command == 'M':
|
if command == "M":
|
||||||
# Moveto command.
|
# Moveto command.
|
||||||
x = elements.pop()
|
x = elements.pop()
|
||||||
y = elements.pop()
|
y = elements.pop()
|
||||||
@ -164,9 +166,9 @@ def parse_path(pathdef, pen, current_pos=(0, 0), arc_class=EllipticalArc):
|
|||||||
# Implicit moveto commands are treated as lineto commands.
|
# Implicit moveto commands are treated as lineto commands.
|
||||||
# So we set command to lineto here, in case there are
|
# So we set command to lineto here, in case there are
|
||||||
# further implicit commands after this moveto.
|
# further implicit commands after this moveto.
|
||||||
command = 'L'
|
command = "L"
|
||||||
|
|
||||||
elif command == 'Z':
|
elif command == "Z":
|
||||||
# Close path
|
# Close path
|
||||||
if current_pos != start_pos:
|
if current_pos != start_pos:
|
||||||
pen.lineTo((start_pos.real, start_pos.imag))
|
pen.lineTo((start_pos.real, start_pos.imag))
|
||||||
@ -175,7 +177,7 @@ def parse_path(pathdef, pen, current_pos=(0, 0), arc_class=EllipticalArc):
|
|||||||
start_pos = None
|
start_pos = None
|
||||||
command = None # You can't have implicit commands after closing.
|
command = None # You can't have implicit commands after closing.
|
||||||
|
|
||||||
elif command == 'L':
|
elif command == "L":
|
||||||
x = elements.pop()
|
x = elements.pop()
|
||||||
y = elements.pop()
|
y = elements.pop()
|
||||||
pos = float(x) + float(y) * 1j
|
pos = float(x) + float(y) * 1j
|
||||||
@ -184,7 +186,7 @@ def parse_path(pathdef, pen, current_pos=(0, 0), arc_class=EllipticalArc):
|
|||||||
pen.lineTo((pos.real, pos.imag))
|
pen.lineTo((pos.real, pos.imag))
|
||||||
current_pos = pos
|
current_pos = pos
|
||||||
|
|
||||||
elif command == 'H':
|
elif command == "H":
|
||||||
x = elements.pop()
|
x = elements.pop()
|
||||||
pos = float(x) + current_pos.imag * 1j
|
pos = float(x) + current_pos.imag * 1j
|
||||||
if not absolute:
|
if not absolute:
|
||||||
@ -192,7 +194,7 @@ def parse_path(pathdef, pen, current_pos=(0, 0), arc_class=EllipticalArc):
|
|||||||
pen.lineTo((pos.real, pos.imag))
|
pen.lineTo((pos.real, pos.imag))
|
||||||
current_pos = pos
|
current_pos = pos
|
||||||
|
|
||||||
elif command == 'V':
|
elif command == "V":
|
||||||
y = elements.pop()
|
y = elements.pop()
|
||||||
pos = current_pos.real + float(y) * 1j
|
pos = current_pos.real + float(y) * 1j
|
||||||
if not absolute:
|
if not absolute:
|
||||||
@ -200,7 +202,7 @@ def parse_path(pathdef, pen, current_pos=(0, 0), arc_class=EllipticalArc):
|
|||||||
pen.lineTo((pos.real, pos.imag))
|
pen.lineTo((pos.real, pos.imag))
|
||||||
current_pos = pos
|
current_pos = pos
|
||||||
|
|
||||||
elif command == 'C':
|
elif command == "C":
|
||||||
control1 = float(elements.pop()) + float(elements.pop()) * 1j
|
control1 = float(elements.pop()) + float(elements.pop()) * 1j
|
||||||
control2 = float(elements.pop()) + float(elements.pop()) * 1j
|
control2 = float(elements.pop()) + float(elements.pop()) * 1j
|
||||||
end = float(elements.pop()) + float(elements.pop()) * 1j
|
end = float(elements.pop()) + float(elements.pop()) * 1j
|
||||||
@ -210,17 +212,19 @@ def parse_path(pathdef, pen, current_pos=(0, 0), arc_class=EllipticalArc):
|
|||||||
control2 += current_pos
|
control2 += current_pos
|
||||||
end += current_pos
|
end += current_pos
|
||||||
|
|
||||||
pen.curveTo((control1.real, control1.imag),
|
pen.curveTo(
|
||||||
|
(control1.real, control1.imag),
|
||||||
(control2.real, control2.imag),
|
(control2.real, control2.imag),
|
||||||
(end.real, end.imag))
|
(end.real, end.imag),
|
||||||
|
)
|
||||||
current_pos = end
|
current_pos = end
|
||||||
last_control = control2
|
last_control = control2
|
||||||
|
|
||||||
elif command == 'S':
|
elif command == "S":
|
||||||
# Smooth curve. First control point is the "reflection" of
|
# Smooth curve. First control point is the "reflection" of
|
||||||
# the second control point in the previous path.
|
# the second control point in the previous path.
|
||||||
|
|
||||||
if last_command not in 'CS':
|
if last_command not in "CS":
|
||||||
# If there is no previous command or if the previous command
|
# If there is no previous command or if the previous command
|
||||||
# was not an C, c, S or s, assume the first control point is
|
# was not an C, c, S or s, assume the first control point is
|
||||||
# coincident with the current point.
|
# coincident with the current point.
|
||||||
@ -238,13 +242,15 @@ def parse_path(pathdef, pen, current_pos=(0, 0), arc_class=EllipticalArc):
|
|||||||
control2 += current_pos
|
control2 += current_pos
|
||||||
end += current_pos
|
end += current_pos
|
||||||
|
|
||||||
pen.curveTo((control1.real, control1.imag),
|
pen.curveTo(
|
||||||
|
(control1.real, control1.imag),
|
||||||
(control2.real, control2.imag),
|
(control2.real, control2.imag),
|
||||||
(end.real, end.imag))
|
(end.real, end.imag),
|
||||||
|
)
|
||||||
current_pos = end
|
current_pos = end
|
||||||
last_control = control2
|
last_control = control2
|
||||||
|
|
||||||
elif command == 'Q':
|
elif command == "Q":
|
||||||
control = float(elements.pop()) + float(elements.pop()) * 1j
|
control = float(elements.pop()) + float(elements.pop()) * 1j
|
||||||
end = float(elements.pop()) + float(elements.pop()) * 1j
|
end = float(elements.pop()) + float(elements.pop()) * 1j
|
||||||
|
|
||||||
@ -256,11 +262,11 @@ def parse_path(pathdef, pen, current_pos=(0, 0), arc_class=EllipticalArc):
|
|||||||
current_pos = end
|
current_pos = end
|
||||||
last_control = control
|
last_control = control
|
||||||
|
|
||||||
elif command == 'T':
|
elif command == "T":
|
||||||
# Smooth curve. Control point is the "reflection" of
|
# Smooth curve. Control point is the "reflection" of
|
||||||
# the second control point in the previous path.
|
# the second control point in the previous path.
|
||||||
|
|
||||||
if last_command not in 'QT':
|
if last_command not in "QT":
|
||||||
# If there is no previous command or if the previous command
|
# If there is no previous command or if the previous command
|
||||||
# was not an Q, q, T or t, assume the first control point is
|
# was not an Q, q, T or t, assume the first control point is
|
||||||
# coincident with the current point.
|
# coincident with the current point.
|
||||||
@ -280,7 +286,7 @@ def parse_path(pathdef, pen, current_pos=(0, 0), arc_class=EllipticalArc):
|
|||||||
current_pos = end
|
current_pos = end
|
||||||
last_control = control
|
last_control = control
|
||||||
|
|
||||||
elif command == 'A':
|
elif command == "A":
|
||||||
rx = abs(float(elements.pop()))
|
rx = abs(float(elements.pop()))
|
||||||
ry = abs(float(elements.pop()))
|
ry = abs(float(elements.pop()))
|
||||||
rotation = float(elements.pop())
|
rotation = float(elements.pop())
|
||||||
|
@ -5,18 +5,18 @@ def _prefer_non_zero(*args):
|
|||||||
for arg in args:
|
for arg in args:
|
||||||
if arg != 0:
|
if arg != 0:
|
||||||
return arg
|
return arg
|
||||||
return 0.
|
return 0.0
|
||||||
|
|
||||||
|
|
||||||
def _ntos(n):
|
def _ntos(n):
|
||||||
# %f likes to add unnecessary 0's, %g isn't consistent about # decimals
|
# %f likes to add unnecessary 0's, %g isn't consistent about # decimals
|
||||||
return ('%.3f' % n).rstrip('0').rstrip('.')
|
return ("%.3f" % n).rstrip("0").rstrip(".")
|
||||||
|
|
||||||
|
|
||||||
def _strip_xml_ns(tag):
|
def _strip_xml_ns(tag):
|
||||||
# ElementTree API doesn't provide a way to ignore XML namespaces in tags
|
# ElementTree API doesn't provide a way to ignore XML namespaces in tags
|
||||||
# so we here strip them ourselves: cf. https://bugs.python.org/issue18304
|
# so we here strip them ourselves: cf. https://bugs.python.org/issue18304
|
||||||
return tag.split('}', 1)[1] if '}' in tag else tag
|
return tag.split("}", 1)[1] if "}" in tag else tag
|
||||||
|
|
||||||
|
|
||||||
def _transform(raw_value):
|
def _transform(raw_value):
|
||||||
@ -24,12 +24,12 @@ def _transform(raw_value):
|
|||||||
# No other transform functions are supported at the moment.
|
# No other transform functions are supported at the moment.
|
||||||
# https://developer.mozilla.org/en-US/docs/Web/SVG/Attribute/transform
|
# https://developer.mozilla.org/en-US/docs/Web/SVG/Attribute/transform
|
||||||
# start simple: if you aren't exactly matrix(...) then no love
|
# start simple: if you aren't exactly matrix(...) then no love
|
||||||
match = re.match(r'matrix\((.*)\)', raw_value)
|
match = re.match(r"matrix\((.*)\)", raw_value)
|
||||||
if not match:
|
if not match:
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
matrix = tuple(float(p) for p in re.split(r'\s+|,', match.group(1)))
|
matrix = tuple(float(p) for p in re.split(r"\s+|,", match.group(1)))
|
||||||
if len(matrix) != 6:
|
if len(matrix) != 6:
|
||||||
raise ValueError('wrong # of terms in %s' % raw_value)
|
raise ValueError("wrong # of terms in %s" % raw_value)
|
||||||
return matrix
|
return matrix
|
||||||
|
|
||||||
|
|
||||||
@ -38,81 +38,83 @@ class PathBuilder(object):
|
|||||||
self.paths = []
|
self.paths = []
|
||||||
self.transforms = []
|
self.transforms = []
|
||||||
|
|
||||||
def _start_path(self, initial_path=''):
|
def _start_path(self, initial_path=""):
|
||||||
self.paths.append(initial_path)
|
self.paths.append(initial_path)
|
||||||
self.transforms.append(None)
|
self.transforms.append(None)
|
||||||
|
|
||||||
def _end_path(self):
|
def _end_path(self):
|
||||||
self._add('z')
|
self._add("z")
|
||||||
|
|
||||||
def _add(self, path_snippet):
|
def _add(self, path_snippet):
|
||||||
path = self.paths[-1]
|
path = self.paths[-1]
|
||||||
if path:
|
if path:
|
||||||
path += ' ' + path_snippet
|
path += " " + path_snippet
|
||||||
else:
|
else:
|
||||||
path = path_snippet
|
path = path_snippet
|
||||||
self.paths[-1] = path
|
self.paths[-1] = path
|
||||||
|
|
||||||
def _move(self, c, x, y):
|
def _move(self, c, x, y):
|
||||||
self._add('%s%s,%s' % (c, _ntos(x), _ntos(y)))
|
self._add("%s%s,%s" % (c, _ntos(x), _ntos(y)))
|
||||||
|
|
||||||
def M(self, x, y):
|
def M(self, x, y):
|
||||||
self._move('M', x, y)
|
self._move("M", x, y)
|
||||||
|
|
||||||
def m(self, x, y):
|
def m(self, x, y):
|
||||||
self._move('m', x, y)
|
self._move("m", x, y)
|
||||||
|
|
||||||
def _arc(self, c, rx, ry, x, y, large_arc):
|
def _arc(self, c, rx, ry, x, y, large_arc):
|
||||||
self._add('%s%s,%s 0 %d 1 %s,%s' % (c, _ntos(rx), _ntos(ry), large_arc,
|
self._add(
|
||||||
_ntos(x), _ntos(y)))
|
"%s%s,%s 0 %d 1 %s,%s"
|
||||||
|
% (c, _ntos(rx), _ntos(ry), large_arc, _ntos(x), _ntos(y))
|
||||||
|
)
|
||||||
|
|
||||||
def A(self, rx, ry, x, y, large_arc=0):
|
def A(self, rx, ry, x, y, large_arc=0):
|
||||||
self._arc('A', rx, ry, x, y, large_arc)
|
self._arc("A", rx, ry, x, y, large_arc)
|
||||||
|
|
||||||
def a(self, rx, ry, x, y, large_arc=0):
|
def a(self, rx, ry, x, y, large_arc=0):
|
||||||
self._arc('a', rx, ry, x, y, large_arc)
|
self._arc("a", rx, ry, x, y, large_arc)
|
||||||
|
|
||||||
def _vhline(self, c, x):
|
def _vhline(self, c, x):
|
||||||
self._add('%s%s' % (c, _ntos(x)))
|
self._add("%s%s" % (c, _ntos(x)))
|
||||||
|
|
||||||
def H(self, x):
|
def H(self, x):
|
||||||
self._vhline('H', x)
|
self._vhline("H", x)
|
||||||
|
|
||||||
def h(self, x):
|
def h(self, x):
|
||||||
self._vhline('h', x)
|
self._vhline("h", x)
|
||||||
|
|
||||||
def V(self, y):
|
def V(self, y):
|
||||||
self._vhline('V', y)
|
self._vhline("V", y)
|
||||||
|
|
||||||
def v(self, y):
|
def v(self, y):
|
||||||
self._vhline('v', y)
|
self._vhline("v", y)
|
||||||
|
|
||||||
def _line(self, c, x, y):
|
def _line(self, c, x, y):
|
||||||
self._add('%s%s,%s' % (c, _ntos(x), _ntos(y)))
|
self._add("%s%s,%s" % (c, _ntos(x), _ntos(y)))
|
||||||
|
|
||||||
def L(self, x, y):
|
def L(self, x, y):
|
||||||
self._line('L', x, y)
|
self._line("L", x, y)
|
||||||
|
|
||||||
def l(self, x, y):
|
def l(self, x, y):
|
||||||
self._line('l', x, y)
|
self._line("l", x, y)
|
||||||
|
|
||||||
def _parse_line(self, line):
|
def _parse_line(self, line):
|
||||||
x1 = float(line.attrib.get('x1', 0))
|
x1 = float(line.attrib.get("x1", 0))
|
||||||
y1 = float(line.attrib.get('y1', 0))
|
y1 = float(line.attrib.get("y1", 0))
|
||||||
x2 = float(line.attrib.get('x2', 0))
|
x2 = float(line.attrib.get("x2", 0))
|
||||||
y2 = float(line.attrib.get('y2', 0))
|
y2 = float(line.attrib.get("y2", 0))
|
||||||
|
|
||||||
self._start_path()
|
self._start_path()
|
||||||
self.M(x1, y1)
|
self.M(x1, y1)
|
||||||
self.L(x2, y2)
|
self.L(x2, y2)
|
||||||
|
|
||||||
def _parse_rect(self, rect):
|
def _parse_rect(self, rect):
|
||||||
x = float(rect.attrib.get('x', 0))
|
x = float(rect.attrib.get("x", 0))
|
||||||
y = float(rect.attrib.get('y', 0))
|
y = float(rect.attrib.get("y", 0))
|
||||||
w = float(rect.attrib.get('width'))
|
w = float(rect.attrib.get("width"))
|
||||||
h = float(rect.attrib.get('height'))
|
h = float(rect.attrib.get("height"))
|
||||||
rx = float(rect.attrib.get('rx', 0))
|
rx = float(rect.attrib.get("rx", 0))
|
||||||
ry = float(rect.attrib.get('ry', 0))
|
ry = float(rect.attrib.get("ry", 0))
|
||||||
|
|
||||||
rx = _prefer_non_zero(rx, ry)
|
rx = _prefer_non_zero(rx, ry)
|
||||||
ry = _prefer_non_zero(ry, rx)
|
ry = _prefer_non_zero(ry, rx)
|
||||||
@ -135,22 +137,22 @@ class PathBuilder(object):
|
|||||||
self._end_path()
|
self._end_path()
|
||||||
|
|
||||||
def _parse_path(self, path):
|
def _parse_path(self, path):
|
||||||
if 'd' in path.attrib:
|
if "d" in path.attrib:
|
||||||
self._start_path(initial_path=path.attrib['d'])
|
self._start_path(initial_path=path.attrib["d"])
|
||||||
|
|
||||||
def _parse_polygon(self, poly):
|
def _parse_polygon(self, poly):
|
||||||
if 'points' in poly.attrib:
|
if "points" in poly.attrib:
|
||||||
self._start_path('M' + poly.attrib['points'])
|
self._start_path("M" + poly.attrib["points"])
|
||||||
self._end_path()
|
self._end_path()
|
||||||
|
|
||||||
def _parse_polyline(self, poly):
|
def _parse_polyline(self, poly):
|
||||||
if 'points' in poly.attrib:
|
if "points" in poly.attrib:
|
||||||
self._start_path('M' + poly.attrib['points'])
|
self._start_path("M" + poly.attrib["points"])
|
||||||
|
|
||||||
def _parse_circle(self, circle):
|
def _parse_circle(self, circle):
|
||||||
cx = float(circle.attrib.get('cx', 0))
|
cx = float(circle.attrib.get("cx", 0))
|
||||||
cy = float(circle.attrib.get('cy', 0))
|
cy = float(circle.attrib.get("cy", 0))
|
||||||
r = float(circle.attrib.get('r'))
|
r = float(circle.attrib.get("r"))
|
||||||
|
|
||||||
# arc doesn't seem to like being a complete shape, draw two halves
|
# arc doesn't seem to like being a complete shape, draw two halves
|
||||||
self._start_path()
|
self._start_path()
|
||||||
@ -159,10 +161,10 @@ class PathBuilder(object):
|
|||||||
self.A(r, r, cx - r, cy, large_arc=1)
|
self.A(r, r, cx - r, cy, large_arc=1)
|
||||||
|
|
||||||
def _parse_ellipse(self, ellipse):
|
def _parse_ellipse(self, ellipse):
|
||||||
cx = float(ellipse.attrib.get('cx', 0))
|
cx = float(ellipse.attrib.get("cx", 0))
|
||||||
cy = float(ellipse.attrib.get('cy', 0))
|
cy = float(ellipse.attrib.get("cy", 0))
|
||||||
rx = float(ellipse.attrib.get('rx'))
|
rx = float(ellipse.attrib.get("rx"))
|
||||||
ry = float(ellipse.attrib.get('ry'))
|
ry = float(ellipse.attrib.get("ry"))
|
||||||
|
|
||||||
# arc doesn't seem to like being a complete shape, draw two halves
|
# arc doesn't seem to like being a complete shape, draw two halves
|
||||||
self._start_path()
|
self._start_path()
|
||||||
@ -172,10 +174,10 @@ class PathBuilder(object):
|
|||||||
|
|
||||||
def add_path_from_element(self, el):
|
def add_path_from_element(self, el):
|
||||||
tag = _strip_xml_ns(el.tag)
|
tag = _strip_xml_ns(el.tag)
|
||||||
parse_fn = getattr(self, '_parse_%s' % tag.lower(), None)
|
parse_fn = getattr(self, "_parse_%s" % tag.lower(), None)
|
||||||
if not callable(parse_fn):
|
if not callable(parse_fn):
|
||||||
return False
|
return False
|
||||||
parse_fn(el)
|
parse_fn(el)
|
||||||
if 'transform' in el.attrib:
|
if "transform" in el.attrib:
|
||||||
self.transforms[-1] = _transform(el.attrib['transform'])
|
self.transforms[-1] = _transform(el.attrib["transform"])
|
||||||
return True
|
return True
|
||||||
|
@ -19,7 +19,11 @@ import fontTools
|
|||||||
from fontTools.misc import eexec
|
from fontTools.misc import eexec
|
||||||
from fontTools.misc.macCreatorType import getMacCreatorAndType
|
from fontTools.misc.macCreatorType import getMacCreatorAndType
|
||||||
from fontTools.misc.textTools import bytechr, byteord, bytesjoin, tobytes
|
from fontTools.misc.textTools import bytechr, byteord, bytesjoin, tobytes
|
||||||
from fontTools.misc.psOperators import _type1_pre_eexec_order, _type1_fontinfo_order, _type1_post_eexec_order
|
from fontTools.misc.psOperators import (
|
||||||
|
_type1_pre_eexec_order,
|
||||||
|
_type1_fontinfo_order,
|
||||||
|
_type1_post_eexec_order,
|
||||||
|
)
|
||||||
from fontTools.encodings.StandardEncoding import StandardEncoding
|
from fontTools.encodings.StandardEncoding import StandardEncoding
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
@ -40,7 +44,8 @@ else:
|
|||||||
haveMacSupport = 1
|
haveMacSupport = 1
|
||||||
|
|
||||||
|
|
||||||
class T1Error(Exception): pass
|
class T1Error(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
class T1Font(object):
|
class T1Font(object):
|
||||||
@ -91,6 +96,7 @@ class T1Font(object):
|
|||||||
def parse(self):
|
def parse(self):
|
||||||
from fontTools.misc import psLib
|
from fontTools.misc import psLib
|
||||||
from fontTools.misc import psCharStrings
|
from fontTools.misc import psCharStrings
|
||||||
|
|
||||||
self.font = psLib.suckfont(self.data, self.encoding)
|
self.font = psLib.suckfont(self.data, self.encoding)
|
||||||
charStrings = self.font["CharStrings"]
|
charStrings = self.font["CharStrings"]
|
||||||
lenIV = self.font["Private"].get("lenIV", 4)
|
lenIV = self.font["Private"].get("lenIV", 4)
|
||||||
@ -98,8 +104,9 @@ class T1Font(object):
|
|||||||
subrs = self.font["Private"]["Subrs"]
|
subrs = self.font["Private"]["Subrs"]
|
||||||
for glyphName, charString in charStrings.items():
|
for glyphName, charString in charStrings.items():
|
||||||
charString, R = eexec.decrypt(charString, 4330)
|
charString, R = eexec.decrypt(charString, 4330)
|
||||||
charStrings[glyphName] = psCharStrings.T1CharString(charString[lenIV:],
|
charStrings[glyphName] = psCharStrings.T1CharString(
|
||||||
subrs=subrs)
|
charString[lenIV:], subrs=subrs
|
||||||
|
)
|
||||||
for i in range(len(subrs)):
|
for i in range(len(subrs)):
|
||||||
charString, R = eexec.decrypt(subrs[i], 4330)
|
charString, R = eexec.decrypt(subrs[i], 4330)
|
||||||
subrs[i] = psCharStrings.T1CharString(charString[lenIV:], subrs=subrs)
|
subrs[i] = psCharStrings.T1CharString(charString[lenIV:], subrs=subrs)
|
||||||
@ -111,9 +118,13 @@ class T1Font(object):
|
|||||||
eexec_began = False
|
eexec_began = False
|
||||||
eexec_dict = {}
|
eexec_dict = {}
|
||||||
lines = []
|
lines = []
|
||||||
lines.extend([self._tobytes(f"%!FontType1-1.1: {sf['FontName']}"),
|
lines.extend(
|
||||||
|
[
|
||||||
|
self._tobytes(f"%!FontType1-1.1: {sf['FontName']}"),
|
||||||
self._tobytes(f"%t1Font: ({fontTools.version})"),
|
self._tobytes(f"%t1Font: ({fontTools.version})"),
|
||||||
self._tobytes(f"%%BeginResource: font {sf['FontName']}")])
|
self._tobytes(f"%%BeginResource: font {sf['FontName']}"),
|
||||||
|
]
|
||||||
|
)
|
||||||
# follow t1write.c:writeRegNameKeyedFont
|
# follow t1write.c:writeRegNameKeyedFont
|
||||||
size = 3 # Headroom for new key addition
|
size = 3 # Headroom for new key addition
|
||||||
size += 1 # FontMatrix is always counted
|
size += 1 # FontMatrix is always counted
|
||||||
@ -149,9 +160,7 @@ class T1Font(object):
|
|||||||
|
|
||||||
for _ in range(8):
|
for _ in range(8):
|
||||||
lines.append(self._tobytes("0" * 64))
|
lines.append(self._tobytes("0" * 64))
|
||||||
lines.extend([b"cleartomark",
|
lines.extend([b"cleartomark", b"%%EndResource", b"%%EOF"])
|
||||||
b"%%EndResource",
|
|
||||||
b"%%EOF"])
|
|
||||||
|
|
||||||
data = bytesjoin(lines, "\n")
|
data = bytesjoin(lines, "\n")
|
||||||
return data
|
return data
|
||||||
@ -179,35 +188,65 @@ class T1Font(object):
|
|||||||
elif not NP_key and subvalue == PD_value:
|
elif not NP_key and subvalue == PD_value:
|
||||||
NP_key = subkey
|
NP_key = subkey
|
||||||
|
|
||||||
if subkey == 'OtherSubrs':
|
if subkey == "OtherSubrs":
|
||||||
# XXX: assert that no flex hint is used
|
# XXX: assert that no flex hint is used
|
||||||
lines.append(self._tobytes(hintothers))
|
lines.append(self._tobytes(hintothers))
|
||||||
elif subkey == "Subrs":
|
elif subkey == "Subrs":
|
||||||
# XXX: standard Subrs only
|
# XXX: standard Subrs only
|
||||||
lines.append(b"/Subrs 5 array")
|
lines.append(b"/Subrs 5 array")
|
||||||
for i, subr_bin in enumerate(std_subrs):
|
for i, subr_bin in enumerate(std_subrs):
|
||||||
encrypted_subr, R = eexec.encrypt(bytesjoin([char_IV, subr_bin]), 4330)
|
encrypted_subr, R = eexec.encrypt(
|
||||||
lines.append(bytesjoin([self._tobytes(f"dup {i} {len(encrypted_subr)} {RD_key} "), encrypted_subr, self._tobytes(f" {NP_key}")]))
|
bytesjoin([char_IV, subr_bin]), 4330
|
||||||
lines.append(b'def')
|
)
|
||||||
|
lines.append(
|
||||||
|
bytesjoin(
|
||||||
|
[
|
||||||
|
self._tobytes(
|
||||||
|
f"dup {i} {len(encrypted_subr)} {RD_key} "
|
||||||
|
),
|
||||||
|
encrypted_subr,
|
||||||
|
self._tobytes(f" {NP_key}"),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
)
|
||||||
|
lines.append(b"def")
|
||||||
|
|
||||||
lines.append(b"put")
|
lines.append(b"put")
|
||||||
else:
|
else:
|
||||||
lines.extend(self._make_lines(subkey, subvalue))
|
lines.extend(self._make_lines(subkey, subvalue))
|
||||||
elif key == "CharStrings":
|
elif key == "CharStrings":
|
||||||
lines.append(b"dup /CharStrings")
|
lines.append(b"dup /CharStrings")
|
||||||
lines.append(self._tobytes(f"{len(eexec_dict['CharStrings'])} dict dup begin"))
|
lines.append(
|
||||||
|
self._tobytes(f"{len(eexec_dict['CharStrings'])} dict dup begin")
|
||||||
|
)
|
||||||
for glyph_name, char_bin in eexec_dict["CharStrings"].items():
|
for glyph_name, char_bin in eexec_dict["CharStrings"].items():
|
||||||
char_bin.compile()
|
char_bin.compile()
|
||||||
encrypted_char, R = eexec.encrypt(bytesjoin([char_IV, char_bin.bytecode]), 4330)
|
encrypted_char, R = eexec.encrypt(
|
||||||
lines.append(bytesjoin([self._tobytes(f"/{glyph_name} {len(encrypted_char)} {RD_key} "), encrypted_char, self._tobytes(f" {ND_key}")]))
|
bytesjoin([char_IV, char_bin.bytecode]), 4330
|
||||||
|
)
|
||||||
|
lines.append(
|
||||||
|
bytesjoin(
|
||||||
|
[
|
||||||
|
self._tobytes(
|
||||||
|
f"/{glyph_name} {len(encrypted_char)} {RD_key} "
|
||||||
|
),
|
||||||
|
encrypted_char,
|
||||||
|
self._tobytes(f" {ND_key}"),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
)
|
||||||
lines.append(b"end put")
|
lines.append(b"end put")
|
||||||
else:
|
else:
|
||||||
lines.extend(self._make_lines(key, value))
|
lines.extend(self._make_lines(key, value))
|
||||||
|
|
||||||
lines.extend([b"end",
|
lines.extend(
|
||||||
|
[
|
||||||
|
b"end",
|
||||||
b"dup /FontName get exch definefont pop",
|
b"dup /FontName get exch definefont pop",
|
||||||
b"mark",
|
b"mark",
|
||||||
b"currentfile closefile\n"])
|
b"currentfile closefile\n",
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
eexec_portion = bytesjoin(lines, "\n")
|
eexec_portion = bytesjoin(lines, "\n")
|
||||||
encrypted_eexec, R = eexec.encrypt(bytesjoin([eexec_IV, eexec_portion]), 55665)
|
encrypted_eexec, R = eexec.encrypt(bytesjoin([eexec_IV, eexec_portion]), 55665)
|
||||||
@ -250,19 +289,21 @@ class T1Font(object):
|
|||||||
|
|
||||||
# low level T1 data read and write functions
|
# low level T1 data read and write functions
|
||||||
|
|
||||||
|
|
||||||
def read(path, onlyHeader=False):
|
def read(path, onlyHeader=False):
|
||||||
"""reads any Type 1 font file, returns raw data"""
|
"""reads any Type 1 font file, returns raw data"""
|
||||||
_, ext = os.path.splitext(path)
|
_, ext = os.path.splitext(path)
|
||||||
ext = ext.lower()
|
ext = ext.lower()
|
||||||
creator, typ = getMacCreatorAndType(path)
|
creator, typ = getMacCreatorAndType(path)
|
||||||
if typ == 'LWFN':
|
if typ == "LWFN":
|
||||||
return readLWFN(path, onlyHeader), 'LWFN'
|
return readLWFN(path, onlyHeader), "LWFN"
|
||||||
if ext == '.pfb':
|
if ext == ".pfb":
|
||||||
return readPFB(path, onlyHeader), 'PFB'
|
return readPFB(path, onlyHeader), "PFB"
|
||||||
else:
|
else:
|
||||||
return readOther(path), 'OTHER'
|
return readOther(path), "OTHER"
|
||||||
|
|
||||||
def write(path, data, kind='OTHER', dohex=False):
|
|
||||||
|
def write(path, data, kind="OTHER", dohex=False):
|
||||||
assertType1(data)
|
assertType1(data)
|
||||||
kind = kind.upper()
|
kind = kind.upper()
|
||||||
try:
|
try:
|
||||||
@ -271,9 +312,9 @@ def write(path, data, kind='OTHER', dohex=False):
|
|||||||
pass
|
pass
|
||||||
err = 1
|
err = 1
|
||||||
try:
|
try:
|
||||||
if kind == 'LWFN':
|
if kind == "LWFN":
|
||||||
writeLWFN(path, data)
|
writeLWFN(path, data)
|
||||||
elif kind == 'PFB':
|
elif kind == "PFB":
|
||||||
writePFB(path, data)
|
writePFB(path, data)
|
||||||
else:
|
else:
|
||||||
writeOther(path, data, dohex)
|
writeOther(path, data, dohex)
|
||||||
@ -295,13 +336,14 @@ HEXLINELENGTH = 80
|
|||||||
def readLWFN(path, onlyHeader=False):
|
def readLWFN(path, onlyHeader=False):
|
||||||
"""reads an LWFN font file, returns raw data"""
|
"""reads an LWFN font file, returns raw data"""
|
||||||
from fontTools.misc.macRes import ResourceReader
|
from fontTools.misc.macRes import ResourceReader
|
||||||
|
|
||||||
reader = ResourceReader(path)
|
reader = ResourceReader(path)
|
||||||
try:
|
try:
|
||||||
data = []
|
data = []
|
||||||
for res in reader.get('POST', []):
|
for res in reader.get("POST", []):
|
||||||
code = byteord(res.data[0])
|
code = byteord(res.data[0])
|
||||||
if byteord(res.data[1]) != 0:
|
if byteord(res.data[1]) != 0:
|
||||||
raise T1Error('corrupt LWFN file')
|
raise T1Error("corrupt LWFN file")
|
||||||
if code in [1, 2]:
|
if code in [1, 2]:
|
||||||
if onlyHeader and code == 2:
|
if onlyHeader and code == 2:
|
||||||
break
|
break
|
||||||
@ -314,20 +356,21 @@ def readLWFN(path, onlyHeader=False):
|
|||||||
elif code == 0:
|
elif code == 0:
|
||||||
pass # comment, ignore
|
pass # comment, ignore
|
||||||
else:
|
else:
|
||||||
raise T1Error('bad chunk code: ' + repr(code))
|
raise T1Error("bad chunk code: " + repr(code))
|
||||||
finally:
|
finally:
|
||||||
reader.close()
|
reader.close()
|
||||||
data = bytesjoin(data)
|
data = bytesjoin(data)
|
||||||
assertType1(data)
|
assertType1(data)
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
|
||||||
def readPFB(path, onlyHeader=False):
|
def readPFB(path, onlyHeader=False):
|
||||||
"""reads a PFB font file, returns raw data"""
|
"""reads a PFB font file, returns raw data"""
|
||||||
data = []
|
data = []
|
||||||
with open(path, "rb") as f:
|
with open(path, "rb") as f:
|
||||||
while True:
|
while True:
|
||||||
if f.read(1) != bytechr(128):
|
if f.read(1) != bytechr(128):
|
||||||
raise T1Error('corrupt PFB file')
|
raise T1Error("corrupt PFB file")
|
||||||
code = byteord(f.read(1))
|
code = byteord(f.read(1))
|
||||||
if code in [1, 2]:
|
if code in [1, 2]:
|
||||||
chunklen = stringToLong(f.read(4))
|
chunklen = stringToLong(f.read(4))
|
||||||
@ -337,13 +380,14 @@ def readPFB(path, onlyHeader=False):
|
|||||||
elif code == 3:
|
elif code == 3:
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
raise T1Error('bad chunk code: ' + repr(code))
|
raise T1Error("bad chunk code: " + repr(code))
|
||||||
if onlyHeader:
|
if onlyHeader:
|
||||||
break
|
break
|
||||||
data = bytesjoin(data)
|
data = bytesjoin(data)
|
||||||
assertType1(data)
|
assertType1(data)
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
|
||||||
def readOther(path):
|
def readOther(path):
|
||||||
"""reads any (font) file, returns raw data"""
|
"""reads any (font) file, returns raw data"""
|
||||||
with open(path, "rb") as f:
|
with open(path, "rb") as f:
|
||||||
@ -358,8 +402,10 @@ def readOther(path):
|
|||||||
data.append(chunk)
|
data.append(chunk)
|
||||||
return bytesjoin(data)
|
return bytesjoin(data)
|
||||||
|
|
||||||
|
|
||||||
# file writing tools
|
# file writing tools
|
||||||
|
|
||||||
|
|
||||||
def writeLWFN(path, data):
|
def writeLWFN(path, data):
|
||||||
# Res.FSpCreateResFile was deprecated in OS X 10.5
|
# Res.FSpCreateResFile was deprecated in OS X 10.5
|
||||||
Res.FSpCreateResFile(path, "just", "LWFN", 0)
|
Res.FSpCreateResFile(path, "just", "LWFN", 0)
|
||||||
@ -374,15 +420,16 @@ def writeLWFN(path, data):
|
|||||||
else:
|
else:
|
||||||
code = 1
|
code = 1
|
||||||
while chunk:
|
while chunk:
|
||||||
res = Res.Resource(bytechr(code) + '\0' + chunk[:LWFNCHUNKSIZE - 2])
|
res = Res.Resource(bytechr(code) + "\0" + chunk[: LWFNCHUNKSIZE - 2])
|
||||||
res.AddResource('POST', resID, '')
|
res.AddResource("POST", resID, "")
|
||||||
chunk = chunk[LWFNCHUNKSIZE - 2 :]
|
chunk = chunk[LWFNCHUNKSIZE - 2 :]
|
||||||
resID = resID + 1
|
resID = resID + 1
|
||||||
res = Res.Resource(bytechr(5) + '\0')
|
res = Res.Resource(bytechr(5) + "\0")
|
||||||
res.AddResource('POST', resID, '')
|
res.AddResource("POST", resID, "")
|
||||||
finally:
|
finally:
|
||||||
Res.CloseResFile(resRef)
|
Res.CloseResFile(resRef)
|
||||||
|
|
||||||
|
|
||||||
def writePFB(path, data):
|
def writePFB(path, data):
|
||||||
chunks = findEncryptedChunks(data)
|
chunks = findEncryptedChunks(data)
|
||||||
with open(path, "wb") as f:
|
with open(path, "wb") as f:
|
||||||
@ -396,6 +443,7 @@ def writePFB(path, data):
|
|||||||
f.write(chunk)
|
f.write(chunk)
|
||||||
f.write(bytechr(128) + bytechr(3))
|
f.write(bytechr(128) + bytechr(3))
|
||||||
|
|
||||||
|
|
||||||
def writeOther(path, data, dohex=False):
|
def writeOther(path, data, dohex=False):
|
||||||
chunks = findEncryptedChunks(data)
|
chunks = findEncryptedChunks(data)
|
||||||
with open(path, "wb") as f:
|
with open(path, "wb") as f:
|
||||||
@ -408,7 +456,7 @@ def writeOther(path, data, dohex=False):
|
|||||||
if code == 2 and dohex:
|
if code == 2 and dohex:
|
||||||
while chunk:
|
while chunk:
|
||||||
f.write(eexec.hexString(chunk[:hexlinelen]))
|
f.write(eexec.hexString(chunk[:hexlinelen]))
|
||||||
f.write(b'\r')
|
f.write(b"\r")
|
||||||
chunk = chunk[hexlinelen:]
|
chunk = chunk[hexlinelen:]
|
||||||
else:
|
else:
|
||||||
f.write(chunk)
|
f.write(chunk)
|
||||||
@ -419,12 +467,13 @@ def writeOther(path, data, dohex=False):
|
|||||||
EEXECBEGIN = b"currentfile eexec"
|
EEXECBEGIN = b"currentfile eexec"
|
||||||
# The spec allows for 512 ASCII zeros interrupted by arbitrary whitespace to
|
# The spec allows for 512 ASCII zeros interrupted by arbitrary whitespace to
|
||||||
# follow eexec
|
# follow eexec
|
||||||
EEXECEND = re.compile(b'(0[ \t\r\n]*){512}', flags=re.M)
|
EEXECEND = re.compile(b"(0[ \t\r\n]*){512}", flags=re.M)
|
||||||
EEXECINTERNALEND = b"currentfile closefile"
|
EEXECINTERNALEND = b"currentfile closefile"
|
||||||
EEXECBEGINMARKER = b"%-- eexec start\r"
|
EEXECBEGINMARKER = b"%-- eexec start\r"
|
||||||
EEXECENDMARKER = b"%-- eexec end\r"
|
EEXECENDMARKER = b"%-- eexec end\r"
|
||||||
|
|
||||||
_ishexRE = re.compile(b'[0-9A-Fa-f]*$')
|
_ishexRE = re.compile(b"[0-9A-Fa-f]*$")
|
||||||
|
|
||||||
|
|
||||||
def isHex(text):
|
def isHex(text):
|
||||||
return _ishexRE.match(text) is not None
|
return _ishexRE.match(text) is not None
|
||||||
@ -439,10 +488,12 @@ def decryptType1(data):
|
|||||||
chunk = deHexString(chunk)
|
chunk = deHexString(chunk)
|
||||||
decrypted, R = eexec.decrypt(chunk, 55665)
|
decrypted, R = eexec.decrypt(chunk, 55665)
|
||||||
decrypted = decrypted[4:]
|
decrypted = decrypted[4:]
|
||||||
if decrypted[-len(EEXECINTERNALEND)-1:-1] != EEXECINTERNALEND \
|
if (
|
||||||
and decrypted[-len(EEXECINTERNALEND)-2:-2] != EEXECINTERNALEND:
|
decrypted[-len(EEXECINTERNALEND) - 1 : -1] != EEXECINTERNALEND
|
||||||
|
and decrypted[-len(EEXECINTERNALEND) - 2 : -2] != EEXECINTERNALEND
|
||||||
|
):
|
||||||
raise T1Error("invalid end of eexec part")
|
raise T1Error("invalid end of eexec part")
|
||||||
decrypted = decrypted[:-len(EEXECINTERNALEND)-2] + b'\r'
|
decrypted = decrypted[: -len(EEXECINTERNALEND) - 2] + b"\r"
|
||||||
data.append(EEXECBEGINMARKER + decrypted + EEXECENDMARKER)
|
data.append(EEXECBEGINMARKER + decrypted + EEXECENDMARKER)
|
||||||
else:
|
else:
|
||||||
if chunk[-len(EEXECBEGIN) - 1 : -1] == EEXECBEGIN:
|
if chunk[-len(EEXECBEGIN) - 1 : -1] == EEXECBEGIN:
|
||||||
@ -451,6 +502,7 @@ def decryptType1(data):
|
|||||||
data.append(chunk)
|
data.append(chunk)
|
||||||
return bytesjoin(data)
|
return bytesjoin(data)
|
||||||
|
|
||||||
|
|
||||||
def findEncryptedChunks(data):
|
def findEncryptedChunks(data):
|
||||||
chunks = []
|
chunks = []
|
||||||
while True:
|
while True:
|
||||||
@ -475,16 +527,18 @@ def findEncryptedChunks(data):
|
|||||||
chunks.append((0, data))
|
chunks.append((0, data))
|
||||||
return chunks
|
return chunks
|
||||||
|
|
||||||
|
|
||||||
def deHexString(hexstring):
|
def deHexString(hexstring):
|
||||||
return eexec.deHexString(bytesjoin(hexstring.split()))
|
return eexec.deHexString(bytesjoin(hexstring.split()))
|
||||||
|
|
||||||
|
|
||||||
# Type 1 assertion
|
# Type 1 assertion
|
||||||
|
|
||||||
_fontType1RE = re.compile(br"/FontType\s+1\s+def")
|
_fontType1RE = re.compile(rb"/FontType\s+1\s+def")
|
||||||
|
|
||||||
|
|
||||||
def assertType1(data):
|
def assertType1(data):
|
||||||
for head in [b'%!PS-AdobeFont', b'%!FontType1']:
|
for head in [b"%!PS-AdobeFont", b"%!FontType1"]:
|
||||||
if data[: len(head)] == head:
|
if data[: len(head)] == head:
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
@ -499,15 +553,17 @@ def assertType1(data):
|
|||||||
|
|
||||||
# pfb helpers
|
# pfb helpers
|
||||||
|
|
||||||
|
|
||||||
def longToString(long):
|
def longToString(long):
|
||||||
s = b""
|
s = b""
|
||||||
for i in range(4):
|
for i in range(4):
|
||||||
s += bytechr((long & (0xff << (i * 8))) >> i * 8)
|
s += bytechr((long & (0xFF << (i * 8))) >> i * 8)
|
||||||
return s
|
return s
|
||||||
|
|
||||||
|
|
||||||
def stringToLong(s):
|
def stringToLong(s):
|
||||||
if len(s) != 4:
|
if len(s) != 4:
|
||||||
raise ValueError('string must be 4 bytes long')
|
raise ValueError("string must be 4 bytes long")
|
||||||
l = 0
|
l = 0
|
||||||
for i in range(4):
|
for i in range(4):
|
||||||
l += byteord(s[i]) << (i * 8)
|
l += byteord(s[i]) << (i * 8)
|
||||||
@ -523,10 +579,12 @@ font_dictionary_keys.remove("FontMatrix")
|
|||||||
|
|
||||||
FontInfo_dictionary_keys = list(_type1_fontinfo_order)
|
FontInfo_dictionary_keys = list(_type1_fontinfo_order)
|
||||||
# extend because AFDKO tx may use following keys
|
# extend because AFDKO tx may use following keys
|
||||||
FontInfo_dictionary_keys.extend([
|
FontInfo_dictionary_keys.extend(
|
||||||
|
[
|
||||||
"FSType",
|
"FSType",
|
||||||
"Copyright",
|
"Copyright",
|
||||||
])
|
]
|
||||||
|
)
|
||||||
|
|
||||||
Private_dictionary_keys = [
|
Private_dictionary_keys = [
|
||||||
# We don't know what names will be actually used.
|
# We don't know what names will be actually used.
|
||||||
@ -570,7 +628,7 @@ std_subrs = [
|
|||||||
# return
|
# return
|
||||||
b"\x0b",
|
b"\x0b",
|
||||||
# 3 1 3 callother pop callsubr return
|
# 3 1 3 callother pop callsubr return
|
||||||
b"\x8e\x8c\x8e\x0c\x10\x0c\x11\x0a\x0b"
|
b"\x8e\x8c\x8e\x0c\x10\x0c\x11\x0a\x0b",
|
||||||
]
|
]
|
||||||
# follow t1write.c:writeRegNameKeyedFont
|
# follow t1write.c:writeRegNameKeyedFont
|
||||||
eexec_IV = b"cccc"
|
eexec_IV = b"cccc"
|
||||||
|
@ -7,14 +7,22 @@ import sys
|
|||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
class TTLibError(Exception): pass
|
|
||||||
class TTLibFileIsCollectionError (TTLibError): pass
|
class TTLibError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class TTLibFileIsCollectionError(TTLibError):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
@deprecateFunction("use logging instead", category=DeprecationWarning)
|
@deprecateFunction("use logging instead", category=DeprecationWarning)
|
||||||
def debugmsg(msg):
|
def debugmsg(msg):
|
||||||
import time
|
import time
|
||||||
|
|
||||||
print(msg + time.strftime(" (%H:%M:%S)", time.localtime(time.time())))
|
print(msg + time.strftime(" (%H:%M:%S)", time.localtime(time.time())))
|
||||||
|
|
||||||
|
|
||||||
from fontTools.ttLib.ttFont import *
|
from fontTools.ttLib.ttFont import *
|
||||||
from fontTools.ttLib.ttCollection import TTCollection
|
from fontTools.ttLib.ttCollection import TTCollection
|
||||||
|
|
||||||
@ -62,7 +70,7 @@ def main(args=None):
|
|||||||
allows for extracting a single font from a
|
allows for extracting a single font from a
|
||||||
collection, or combining multiple fonts into a
|
collection, or combining multiple fonts into a
|
||||||
collection.
|
collection.
|
||||||
"""
|
""",
|
||||||
)
|
)
|
||||||
parser.add_argument("font", metavar="font", nargs="*", help="Font file.")
|
parser.add_argument("font", metavar="font", nargs="*", help="Font file.")
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
@ -100,5 +108,6 @@ def main(args=None):
|
|||||||
collection.fonts = fonts
|
collection.fonts = fonts
|
||||||
collection.save(outFile)
|
collection.save(outFile)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
sys.exit(main())
|
sys.exit(main())
|
||||||
|
@ -7,7 +7,7 @@ def getSFNTResIndices(path):
|
|||||||
"""Determine whether a file has a 'sfnt' resource fork or not."""
|
"""Determine whether a file has a 'sfnt' resource fork or not."""
|
||||||
try:
|
try:
|
||||||
reader = ResourceReader(path)
|
reader = ResourceReader(path)
|
||||||
indices = reader.getIndices('sfnt')
|
indices = reader.getIndices("sfnt")
|
||||||
reader.close()
|
reader.close()
|
||||||
return indices
|
return indices
|
||||||
except ResourceError:
|
except ResourceError:
|
||||||
@ -21,6 +21,7 @@ def openTTFonts(path):
|
|||||||
font objects as there are sfnt resources in the file.
|
font objects as there are sfnt resources in the file.
|
||||||
"""
|
"""
|
||||||
from fontTools import ttLib
|
from fontTools import ttLib
|
||||||
|
|
||||||
fonts = []
|
fonts = []
|
||||||
sfnts = getSFNTResIndices(path)
|
sfnts = getSFNTResIndices(path)
|
||||||
if not sfnts:
|
if not sfnts:
|
||||||
@ -39,11 +40,12 @@ class SFNTResourceReader(BytesIO):
|
|||||||
|
|
||||||
def __init__(self, path, res_name_or_index):
|
def __init__(self, path, res_name_or_index):
|
||||||
from fontTools import ttLib
|
from fontTools import ttLib
|
||||||
|
|
||||||
reader = ResourceReader(path)
|
reader = ResourceReader(path)
|
||||||
if isinstance(res_name_or_index, str):
|
if isinstance(res_name_or_index, str):
|
||||||
rsrc = reader.getNamedResource('sfnt', res_name_or_index)
|
rsrc = reader.getNamedResource("sfnt", res_name_or_index)
|
||||||
else:
|
else:
|
||||||
rsrc = reader.getIndResource('sfnt', res_name_or_index)
|
rsrc = reader.getIndResource("sfnt", res_name_or_index)
|
||||||
if rsrc is None:
|
if rsrc is None:
|
||||||
raise ttLib.TTLibError("sfnt resource not found: %s" % res_name_or_index)
|
raise ttLib.TTLibError("sfnt resource not found: %s" % res_name_or_index)
|
||||||
reader.close()
|
reader.close()
|
||||||
|
@ -24,8 +24,8 @@ import logging
|
|||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
class SFNTReader(object):
|
|
||||||
|
|
||||||
|
class SFNTReader(object):
|
||||||
def __new__(cls, *args, **kwargs):
|
def __new__(cls, *args, **kwargs):
|
||||||
"""Return an instance of the SFNTReader sub-class which is compatible
|
"""Return an instance of the SFNTReader sub-class which is compatible
|
||||||
with the input file type.
|
with the input file type.
|
||||||
@ -38,6 +38,7 @@ class SFNTReader(object):
|
|||||||
if sfntVersion == "wOF2":
|
if sfntVersion == "wOF2":
|
||||||
# return new WOFF2Reader object
|
# return new WOFF2Reader object
|
||||||
from fontTools.ttLib.woff2 import WOFF2Reader
|
from fontTools.ttLib.woff2 import WOFF2Reader
|
||||||
|
|
||||||
return object.__new__(WOFF2Reader)
|
return object.__new__(WOFF2Reader)
|
||||||
# return default object
|
# return default object
|
||||||
return object.__new__(cls)
|
return object.__new__(cls)
|
||||||
@ -56,7 +57,10 @@ class SFNTReader(object):
|
|||||||
header = readTTCHeader(self.file)
|
header = readTTCHeader(self.file)
|
||||||
numFonts = header.numFonts
|
numFonts = header.numFonts
|
||||||
if not 0 <= fontNumber < numFonts:
|
if not 0 <= fontNumber < numFonts:
|
||||||
raise TTLibFileIsCollectionError("specify a font number between 0 and %d (inclusive)" % (numFonts - 1))
|
raise TTLibFileIsCollectionError(
|
||||||
|
"specify a font number between 0 and %d (inclusive)"
|
||||||
|
% (numFonts - 1)
|
||||||
|
)
|
||||||
self.numFonts = numFonts
|
self.numFonts = numFonts
|
||||||
self.file.seek(header.offsetTable[fontNumber])
|
self.file.seek(header.offsetTable[fontNumber])
|
||||||
data = self.file.read(sfntDirectorySize)
|
data = self.file.read(sfntDirectorySize)
|
||||||
@ -104,9 +108,9 @@ class SFNTReader(object):
|
|||||||
entry = self.tables[Tag(tag)]
|
entry = self.tables[Tag(tag)]
|
||||||
data = entry.loadData(self.file)
|
data = entry.loadData(self.file)
|
||||||
if self.checkChecksums:
|
if self.checkChecksums:
|
||||||
if tag == 'head':
|
if tag == "head":
|
||||||
# Beh: we have to special-case the 'head' table.
|
# Beh: we have to special-case the 'head' table.
|
||||||
checksum = calcChecksum(data[:8] + b'\0\0\0\0' + data[12:])
|
checksum = calcChecksum(data[:8] + b"\0\0\0\0" + data[12:])
|
||||||
else:
|
else:
|
||||||
checksum = calcChecksum(data)
|
checksum = calcChecksum(data)
|
||||||
if self.checkChecksums > 1:
|
if self.checkChecksums > 1:
|
||||||
@ -179,36 +183,44 @@ def compress(data, level=ZLIB_COMPRESSION_LEVEL):
|
|||||||
The default value is a compromise between speed and compression (6).
|
The default value is a compromise between speed and compression (6).
|
||||||
"""
|
"""
|
||||||
if not (0 <= level <= 9):
|
if not (0 <= level <= 9):
|
||||||
raise ValueError('Bad compression level: %s' % level)
|
raise ValueError("Bad compression level: %s" % level)
|
||||||
if not USE_ZOPFLI or level == 0:
|
if not USE_ZOPFLI or level == 0:
|
||||||
from zlib import compress
|
from zlib import compress
|
||||||
|
|
||||||
return compress(data, level)
|
return compress(data, level)
|
||||||
else:
|
else:
|
||||||
from zopfli.zlib import compress
|
from zopfli.zlib import compress
|
||||||
|
|
||||||
return compress(data, numiterations=ZOPFLI_LEVELS[level])
|
return compress(data, numiterations=ZOPFLI_LEVELS[level])
|
||||||
|
|
||||||
|
|
||||||
class SFNTWriter(object):
|
class SFNTWriter(object):
|
||||||
|
|
||||||
def __new__(cls, *args, **kwargs):
|
def __new__(cls, *args, **kwargs):
|
||||||
"""Return an instance of the SFNTWriter sub-class which is compatible
|
"""Return an instance of the SFNTWriter sub-class which is compatible
|
||||||
with the specified 'flavor'.
|
with the specified 'flavor'.
|
||||||
"""
|
"""
|
||||||
flavor = None
|
flavor = None
|
||||||
if kwargs and 'flavor' in kwargs:
|
if kwargs and "flavor" in kwargs:
|
||||||
flavor = kwargs['flavor']
|
flavor = kwargs["flavor"]
|
||||||
elif args and len(args) > 3:
|
elif args and len(args) > 3:
|
||||||
flavor = args[3]
|
flavor = args[3]
|
||||||
if cls is SFNTWriter:
|
if cls is SFNTWriter:
|
||||||
if flavor == "woff2":
|
if flavor == "woff2":
|
||||||
# return new WOFF2Writer object
|
# return new WOFF2Writer object
|
||||||
from fontTools.ttLib.woff2 import WOFF2Writer
|
from fontTools.ttLib.woff2 import WOFF2Writer
|
||||||
|
|
||||||
return object.__new__(WOFF2Writer)
|
return object.__new__(WOFF2Writer)
|
||||||
# return default object
|
# return default object
|
||||||
return object.__new__(cls)
|
return object.__new__(cls)
|
||||||
|
|
||||||
def __init__(self, file, numTables, sfntVersion="\000\001\000\000",
|
def __init__(
|
||||||
flavor=None, flavorData=None):
|
self,
|
||||||
|
file,
|
||||||
|
numTables,
|
||||||
|
sfntVersion="\000\001\000\000",
|
||||||
|
flavor=None,
|
||||||
|
flavorData=None,
|
||||||
|
):
|
||||||
self.file = file
|
self.file = file
|
||||||
self.numTables = numTables
|
self.numTables = numTables
|
||||||
self.sfntVersion = Tag(sfntVersion)
|
self.sfntVersion = Tag(sfntVersion)
|
||||||
@ -223,7 +235,9 @@ class SFNTWriter(object):
|
|||||||
self.signature = "wOFF"
|
self.signature = "wOFF"
|
||||||
|
|
||||||
# to calculate WOFF checksum adjustment, we also need the original SFNT offsets
|
# to calculate WOFF checksum adjustment, we also need the original SFNT offsets
|
||||||
self.origNextTableOffset = sfntDirectorySize + numTables * sfntDirectoryEntrySize
|
self.origNextTableOffset = (
|
||||||
|
sfntDirectorySize + numTables * sfntDirectoryEntrySize
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
assert not self.flavor, "Unknown flavor '%s'" % self.flavor
|
assert not self.flavor, "Unknown flavor '%s'" % self.flavor
|
||||||
self.directoryFormat = sfntDirectoryFormat
|
self.directoryFormat = sfntDirectoryFormat
|
||||||
@ -231,14 +245,21 @@ class SFNTWriter(object):
|
|||||||
self.DirectoryEntry = SFNTDirectoryEntry
|
self.DirectoryEntry = SFNTDirectoryEntry
|
||||||
|
|
||||||
from fontTools.ttLib import getSearchRange
|
from fontTools.ttLib import getSearchRange
|
||||||
self.searchRange, self.entrySelector, self.rangeShift = getSearchRange(numTables, 16)
|
|
||||||
|
self.searchRange, self.entrySelector, self.rangeShift = getSearchRange(
|
||||||
|
numTables, 16
|
||||||
|
)
|
||||||
|
|
||||||
self.directoryOffset = self.file.tell()
|
self.directoryOffset = self.file.tell()
|
||||||
self.nextTableOffset = self.directoryOffset + self.directorySize + numTables * self.DirectoryEntry.formatSize
|
self.nextTableOffset = (
|
||||||
|
self.directoryOffset
|
||||||
|
+ self.directorySize
|
||||||
|
+ numTables * self.DirectoryEntry.formatSize
|
||||||
|
)
|
||||||
# clear out directory area
|
# clear out directory area
|
||||||
self.file.seek(self.nextTableOffset)
|
self.file.seek(self.nextTableOffset)
|
||||||
# make sure we're actually where we want to be. (old cStringIO bug)
|
# make sure we're actually where we want to be. (old cStringIO bug)
|
||||||
self.file.write(b'\0' * (self.nextTableOffset - self.file.tell()))
|
self.file.write(b"\0" * (self.nextTableOffset - self.file.tell()))
|
||||||
self.tables = OrderedDict()
|
self.tables = OrderedDict()
|
||||||
|
|
||||||
def setEntry(self, tag, entry):
|
def setEntry(self, tag, entry):
|
||||||
@ -255,8 +276,8 @@ class SFNTWriter(object):
|
|||||||
entry = self.DirectoryEntry()
|
entry = self.DirectoryEntry()
|
||||||
entry.tag = tag
|
entry.tag = tag
|
||||||
entry.offset = self.nextTableOffset
|
entry.offset = self.nextTableOffset
|
||||||
if tag == 'head':
|
if tag == "head":
|
||||||
entry.checkSum = calcChecksum(data[:8] + b'\0\0\0\0' + data[12:])
|
entry.checkSum = calcChecksum(data[:8] + b"\0\0\0\0" + data[12:])
|
||||||
self.headTable = data
|
self.headTable = data
|
||||||
entry.uncompressed = True
|
entry.uncompressed = True
|
||||||
else:
|
else:
|
||||||
@ -272,7 +293,7 @@ class SFNTWriter(object):
|
|||||||
# Don't depend on f.seek() as we need to add the padding even if no
|
# Don't depend on f.seek() as we need to add the padding even if no
|
||||||
# subsequent write follows (seek is lazy), ie. after the final table
|
# subsequent write follows (seek is lazy), ie. after the final table
|
||||||
# in the font.
|
# in the font.
|
||||||
self.file.write(b'\0' * (self.nextTableOffset - self.file.tell()))
|
self.file.write(b"\0" * (self.nextTableOffset - self.file.tell()))
|
||||||
assert self.nextTableOffset == self.file.tell()
|
assert self.nextTableOffset == self.file.tell()
|
||||||
|
|
||||||
self.setEntry(tag, entry)
|
self.setEntry(tag, entry)
|
||||||
@ -286,7 +307,10 @@ class SFNTWriter(object):
|
|||||||
"""
|
"""
|
||||||
tables = sorted(self.tables.items())
|
tables = sorted(self.tables.items())
|
||||||
if len(tables) != self.numTables:
|
if len(tables) != self.numTables:
|
||||||
raise TTLibError("wrong number of tables; expected %d, found %d" % (self.numTables, len(tables)))
|
raise TTLibError(
|
||||||
|
"wrong number of tables; expected %d, found %d"
|
||||||
|
% (self.numTables, len(tables))
|
||||||
|
)
|
||||||
|
|
||||||
if self.flavor == "woff":
|
if self.flavor == "woff":
|
||||||
self.signature = b"wOFF"
|
self.signature = b"wOFF"
|
||||||
@ -302,8 +326,10 @@ class SFNTWriter(object):
|
|||||||
self.majorVersion = data.majorVersion
|
self.majorVersion = data.majorVersion
|
||||||
self.minorVersion = data.minorVersion
|
self.minorVersion = data.minorVersion
|
||||||
else:
|
else:
|
||||||
if hasattr(self, 'headTable'):
|
if hasattr(self, "headTable"):
|
||||||
self.majorVersion, self.minorVersion = struct.unpack(">HH", self.headTable[4:8])
|
self.majorVersion, self.minorVersion = struct.unpack(
|
||||||
|
">HH", self.headTable[4:8]
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
self.majorVersion = self.minorVersion = 0
|
self.majorVersion = self.minorVersion = 0
|
||||||
if data.metaData:
|
if data.metaData:
|
||||||
@ -319,7 +345,7 @@ class SFNTWriter(object):
|
|||||||
self.file.seek(0, 2)
|
self.file.seek(0, 2)
|
||||||
off = self.file.tell()
|
off = self.file.tell()
|
||||||
paddedOff = (off + 3) & ~3
|
paddedOff = (off + 3) & ~3
|
||||||
self.file.write('\0' * (paddedOff - off))
|
self.file.write("\0" * (paddedOff - off))
|
||||||
self.privOffset = self.file.tell()
|
self.privOffset = self.file.tell()
|
||||||
self.privLength = len(data.privData)
|
self.privLength = len(data.privData)
|
||||||
self.file.write(data.privData)
|
self.file.write(data.privData)
|
||||||
@ -356,7 +382,10 @@ class SFNTWriter(object):
|
|||||||
if self.DirectoryEntry != SFNTDirectoryEntry:
|
if self.DirectoryEntry != SFNTDirectoryEntry:
|
||||||
# Create a SFNT directory for checksum calculation purposes
|
# Create a SFNT directory for checksum calculation purposes
|
||||||
from fontTools.ttLib import getSearchRange
|
from fontTools.ttLib import getSearchRange
|
||||||
self.searchRange, self.entrySelector, self.rangeShift = getSearchRange(self.numTables, 16)
|
|
||||||
|
self.searchRange, self.entrySelector, self.rangeShift = getSearchRange(
|
||||||
|
self.numTables, 16
|
||||||
|
)
|
||||||
directory = sstruct.pack(sfntDirectoryFormat, self)
|
directory = sstruct.pack(sfntDirectoryFormat, self)
|
||||||
tables = sorted(self.tables.items())
|
tables = sorted(self.tables.items())
|
||||||
for tag, entry in tables:
|
for tag, entry in tables:
|
||||||
@ -371,15 +400,15 @@ class SFNTWriter(object):
|
|||||||
assert directory_end == len(directory)
|
assert directory_end == len(directory)
|
||||||
|
|
||||||
checksums.append(calcChecksum(directory))
|
checksums.append(calcChecksum(directory))
|
||||||
checksum = sum(checksums) & 0xffffffff
|
checksum = sum(checksums) & 0xFFFFFFFF
|
||||||
# BiboAfba!
|
# BiboAfba!
|
||||||
checksumadjustment = (0xB1B0AFBA - checksum) & 0xffffffff
|
checksumadjustment = (0xB1B0AFBA - checksum) & 0xFFFFFFFF
|
||||||
return checksumadjustment
|
return checksumadjustment
|
||||||
|
|
||||||
def writeMasterChecksum(self, directory):
|
def writeMasterChecksum(self, directory):
|
||||||
checksumadjustment = self._calcMasterChecksum(directory)
|
checksumadjustment = self._calcMasterChecksum(directory)
|
||||||
# write the checksum to the file
|
# write the checksum to the file
|
||||||
self.file.seek(self.tables['head'].offset + 8)
|
self.file.seek(self.tables["head"].offset + 8)
|
||||||
self.file.write(struct.pack(">L", checksumadjustment))
|
self.file.write(struct.pack(">L", checksumadjustment))
|
||||||
|
|
||||||
def reordersTables(self):
|
def reordersTables(self):
|
||||||
@ -454,7 +483,6 @@ woffDirectoryEntrySize = sstruct.calcsize(woffDirectoryEntryFormat)
|
|||||||
|
|
||||||
|
|
||||||
class DirectoryEntry(object):
|
class DirectoryEntry(object):
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.uncompressed = False # if True, always embed entry raw
|
self.uncompressed = False # if True, always embed entry raw
|
||||||
|
|
||||||
@ -477,12 +505,12 @@ class DirectoryEntry(object):
|
|||||||
file.seek(self.offset)
|
file.seek(self.offset)
|
||||||
data = file.read(self.length)
|
data = file.read(self.length)
|
||||||
assert len(data) == self.length
|
assert len(data) == self.length
|
||||||
if hasattr(self.__class__, 'decodeData'):
|
if hasattr(self.__class__, "decodeData"):
|
||||||
data = self.decodeData(data)
|
data = self.decodeData(data)
|
||||||
return data
|
return data
|
||||||
|
|
||||||
def saveData(self, file, data):
|
def saveData(self, file, data):
|
||||||
if hasattr(self.__class__, 'encodeData'):
|
if hasattr(self.__class__, "encodeData"):
|
||||||
data = self.encodeData(data)
|
data = self.encodeData(data)
|
||||||
self.length = len(data)
|
self.length = len(data)
|
||||||
file.seek(self.offset)
|
file.seek(self.offset)
|
||||||
@ -494,11 +522,13 @@ class DirectoryEntry(object):
|
|||||||
def encodeData(self, data):
|
def encodeData(self, data):
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
|
||||||
class SFNTDirectoryEntry(DirectoryEntry):
|
class SFNTDirectoryEntry(DirectoryEntry):
|
||||||
|
|
||||||
format = sfntDirectoryEntryFormat
|
format = sfntDirectoryEntryFormat
|
||||||
formatSize = sfntDirectoryEntrySize
|
formatSize = sfntDirectoryEntrySize
|
||||||
|
|
||||||
|
|
||||||
class WOFFDirectoryEntry(DirectoryEntry):
|
class WOFFDirectoryEntry(DirectoryEntry):
|
||||||
|
|
||||||
format = woffDirectoryEntryFormat
|
format = woffDirectoryEntryFormat
|
||||||
@ -512,11 +542,12 @@ class WOFFDirectoryEntry(DirectoryEntry):
|
|||||||
# defined `ZLIB_COMPRESSION_LEVEL`, which is also applied when
|
# defined `ZLIB_COMPRESSION_LEVEL`, which is also applied when
|
||||||
# compressing the metadata. For backward compatibility, we still
|
# compressing the metadata. For backward compatibility, we still
|
||||||
# use the class attribute if it was already set.
|
# use the class attribute if it was already set.
|
||||||
if not hasattr(WOFFDirectoryEntry, 'zlibCompressionLevel'):
|
if not hasattr(WOFFDirectoryEntry, "zlibCompressionLevel"):
|
||||||
self.zlibCompressionLevel = ZLIB_COMPRESSION_LEVEL
|
self.zlibCompressionLevel = ZLIB_COMPRESSION_LEVEL
|
||||||
|
|
||||||
def decodeData(self, rawData):
|
def decodeData(self, rawData):
|
||||||
import zlib
|
import zlib
|
||||||
|
|
||||||
if self.length == self.origLength:
|
if self.length == self.origLength:
|
||||||
data = rawData
|
data = rawData
|
||||||
else:
|
else:
|
||||||
@ -538,9 +569,10 @@ class WOFFDirectoryEntry(DirectoryEntry):
|
|||||||
self.length = len(rawData)
|
self.length = len(rawData)
|
||||||
return rawData
|
return rawData
|
||||||
|
|
||||||
class WOFFFlavorData():
|
|
||||||
|
|
||||||
Flavor = 'woff'
|
class WOFFFlavorData:
|
||||||
|
|
||||||
|
Flavor = "woff"
|
||||||
|
|
||||||
def __init__(self, reader=None):
|
def __init__(self, reader=None):
|
||||||
self.majorVersion = None
|
self.majorVersion = None
|
||||||
@ -565,6 +597,7 @@ class WOFFFlavorData():
|
|||||||
|
|
||||||
def _decompress(self, rawData):
|
def _decompress(self, rawData):
|
||||||
import zlib
|
import zlib
|
||||||
|
|
||||||
return zlib.decompress(rawData)
|
return zlib.decompress(rawData)
|
||||||
|
|
||||||
|
|
||||||
@ -588,9 +621,10 @@ def calcChecksum(data):
|
|||||||
for i in range(0, len(data), blockSize):
|
for i in range(0, len(data), blockSize):
|
||||||
block = data[i : i + blockSize]
|
block = data[i : i + blockSize]
|
||||||
longs = struct.unpack(">%dL" % (len(block) // 4), block)
|
longs = struct.unpack(">%dL" % (len(block) // 4), block)
|
||||||
value = (value + sum(longs)) & 0xffffffff
|
value = (value + sum(longs)) & 0xFFFFFFFF
|
||||||
return value
|
return value
|
||||||
|
|
||||||
|
|
||||||
def readTTCHeader(file):
|
def readTTCHeader(file):
|
||||||
file.seek(0)
|
file.seek(0)
|
||||||
data = file.read(ttcHeaderSize)
|
data = file.read(ttcHeaderSize)
|
||||||
@ -600,15 +634,20 @@ def readTTCHeader(file):
|
|||||||
sstruct.unpack(ttcHeaderFormat, data, self)
|
sstruct.unpack(ttcHeaderFormat, data, self)
|
||||||
if self.TTCTag != "ttcf":
|
if self.TTCTag != "ttcf":
|
||||||
raise TTLibError("Not a Font Collection")
|
raise TTLibError("Not a Font Collection")
|
||||||
assert self.Version == 0x00010000 or self.Version == 0x00020000, "unrecognized TTC version 0x%08x" % self.Version
|
assert self.Version == 0x00010000 or self.Version == 0x00020000, (
|
||||||
self.offsetTable = struct.unpack(">%dL" % self.numFonts, file.read(self.numFonts * 4))
|
"unrecognized TTC version 0x%08x" % self.Version
|
||||||
|
)
|
||||||
|
self.offsetTable = struct.unpack(
|
||||||
|
">%dL" % self.numFonts, file.read(self.numFonts * 4)
|
||||||
|
)
|
||||||
if self.Version == 0x00020000:
|
if self.Version == 0x00020000:
|
||||||
pass # ignoring version 2.0 signatures
|
pass # ignoring version 2.0 signatures
|
||||||
return self
|
return self
|
||||||
|
|
||||||
|
|
||||||
def writeTTCHeader(file, numFonts):
|
def writeTTCHeader(file, numFonts):
|
||||||
self = SimpleNamespace()
|
self = SimpleNamespace()
|
||||||
self.TTCTag = 'ttcf'
|
self.TTCTag = "ttcf"
|
||||||
self.Version = 0x00010000
|
self.Version = 0x00010000
|
||||||
self.numFonts = numFonts
|
self.numFonts = numFonts
|
||||||
file.seek(0)
|
file.seek(0)
|
||||||
@ -617,7 +656,9 @@ def writeTTCHeader(file, numFonts):
|
|||||||
file.write(struct.pack(">%dL" % self.numFonts, *([0] * self.numFonts)))
|
file.write(struct.pack(">%dL" % self.numFonts, *([0] * self.numFonts)))
|
||||||
return offset
|
return offset
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
import sys
|
import sys
|
||||||
import doctest
|
import doctest
|
||||||
|
|
||||||
sys.exit(doctest.testmod().failed)
|
sys.exit(doctest.testmod().failed)
|
||||||
|
@ -267,5 +267,5 @@ standardGlyphOrder = [
|
|||||||
"cacute", # 254
|
"cacute", # 254
|
||||||
"Ccaron", # 255
|
"Ccaron", # 255
|
||||||
"ccaron", # 256
|
"ccaron", # 256
|
||||||
"dcroat" # 257
|
"dcroat", # 257
|
||||||
]
|
]
|
||||||
|
@ -28,8 +28,8 @@ smallGlyphMetricsFormat = """
|
|||||||
Advance: B
|
Advance: B
|
||||||
"""
|
"""
|
||||||
|
|
||||||
class BitmapGlyphMetrics(object):
|
|
||||||
|
|
||||||
|
class BitmapGlyphMetrics(object):
|
||||||
def toXML(self, writer, ttFont):
|
def toXML(self, writer, ttFont):
|
||||||
writer.begintag(self.__class__.__name__)
|
writer.begintag(self.__class__.__name__)
|
||||||
writer.newline()
|
writer.newline()
|
||||||
@ -47,13 +47,18 @@ class BitmapGlyphMetrics(object):
|
|||||||
name, attrs, content = element
|
name, attrs, content = element
|
||||||
# Make sure this is a metric that is needed by GlyphMetrics.
|
# Make sure this is a metric that is needed by GlyphMetrics.
|
||||||
if name in metricNames:
|
if name in metricNames:
|
||||||
vars(self)[name] = safeEval(attrs['value'])
|
vars(self)[name] = safeEval(attrs["value"])
|
||||||
else:
|
else:
|
||||||
log.warning("unknown name '%s' being ignored in %s.", name, self.__class__.__name__)
|
log.warning(
|
||||||
|
"unknown name '%s' being ignored in %s.",
|
||||||
|
name,
|
||||||
|
self.__class__.__name__,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class BigGlyphMetrics(BitmapGlyphMetrics):
|
class BigGlyphMetrics(BitmapGlyphMetrics):
|
||||||
binaryFormat = bigGlyphMetricsFormat
|
binaryFormat = bigGlyphMetricsFormat
|
||||||
|
|
||||||
|
|
||||||
class SmallGlyphMetrics(BitmapGlyphMetrics):
|
class SmallGlyphMetrics(BitmapGlyphMetrics):
|
||||||
binaryFormat = smallGlyphMetricsFormat
|
binaryFormat = smallGlyphMetricsFormat
|
||||||
|
@ -6,14 +6,24 @@
|
|||||||
from fontTools.misc.textTools import bytesjoin
|
from fontTools.misc.textTools import bytesjoin
|
||||||
from fontTools.misc import sstruct
|
from fontTools.misc import sstruct
|
||||||
from . import E_B_D_T_
|
from . import E_B_D_T_
|
||||||
from .BitmapGlyphMetrics import BigGlyphMetrics, bigGlyphMetricsFormat, SmallGlyphMetrics, smallGlyphMetricsFormat
|
from .BitmapGlyphMetrics import (
|
||||||
from .E_B_D_T_ import BitmapGlyph, BitmapPlusSmallMetricsMixin, BitmapPlusBigMetricsMixin
|
BigGlyphMetrics,
|
||||||
|
bigGlyphMetricsFormat,
|
||||||
|
SmallGlyphMetrics,
|
||||||
|
smallGlyphMetricsFormat,
|
||||||
|
)
|
||||||
|
from .E_B_D_T_ import (
|
||||||
|
BitmapGlyph,
|
||||||
|
BitmapPlusSmallMetricsMixin,
|
||||||
|
BitmapPlusBigMetricsMixin,
|
||||||
|
)
|
||||||
import struct
|
import struct
|
||||||
|
|
||||||
|
|
||||||
class table_C_B_D_T_(E_B_D_T_.table_E_B_D_T_):
|
class table_C_B_D_T_(E_B_D_T_.table_E_B_D_T_):
|
||||||
|
|
||||||
# Change the data locator table being referenced.
|
# Change the data locator table being referenced.
|
||||||
locatorName = 'CBLC'
|
locatorName = "CBLC"
|
||||||
|
|
||||||
# Modify the format class accessor for color bitmap use.
|
# Modify the format class accessor for color bitmap use.
|
||||||
def getImageFormatClass(self, imageFormat):
|
def getImageFormatClass(self, imageFormat):
|
||||||
@ -22,20 +32,22 @@ class table_C_B_D_T_(E_B_D_T_.table_E_B_D_T_):
|
|||||||
except KeyError:
|
except KeyError:
|
||||||
return cbdt_bitmap_classes[imageFormat]
|
return cbdt_bitmap_classes[imageFormat]
|
||||||
|
|
||||||
|
|
||||||
# Helper method for removing export features not supported by color bitmaps.
|
# Helper method for removing export features not supported by color bitmaps.
|
||||||
# Write data in the parent class will default to raw if an option is unsupported.
|
# Write data in the parent class will default to raw if an option is unsupported.
|
||||||
def _removeUnsupportedForColor(dataFunctions):
|
def _removeUnsupportedForColor(dataFunctions):
|
||||||
dataFunctions = dict(dataFunctions)
|
dataFunctions = dict(dataFunctions)
|
||||||
del dataFunctions['row']
|
del dataFunctions["row"]
|
||||||
return dataFunctions
|
return dataFunctions
|
||||||
|
|
||||||
|
|
||||||
class ColorBitmapGlyph(BitmapGlyph):
|
class ColorBitmapGlyph(BitmapGlyph):
|
||||||
|
|
||||||
fileExtension = '.png'
|
fileExtension = ".png"
|
||||||
xmlDataFunctions = _removeUnsupportedForColor(BitmapGlyph.xmlDataFunctions)
|
xmlDataFunctions = _removeUnsupportedForColor(BitmapGlyph.xmlDataFunctions)
|
||||||
|
|
||||||
class cbdt_bitmap_format_17(BitmapPlusSmallMetricsMixin, ColorBitmapGlyph):
|
|
||||||
|
|
||||||
|
class cbdt_bitmap_format_17(BitmapPlusSmallMetricsMixin, ColorBitmapGlyph):
|
||||||
def decompile(self):
|
def decompile(self):
|
||||||
self.metrics = SmallGlyphMetrics()
|
self.metrics = SmallGlyphMetrics()
|
||||||
dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics)
|
dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics)
|
||||||
@ -53,8 +65,8 @@ class cbdt_bitmap_format_17(BitmapPlusSmallMetricsMixin, ColorBitmapGlyph):
|
|||||||
dataList.append(self.imageData)
|
dataList.append(self.imageData)
|
||||||
return bytesjoin(dataList)
|
return bytesjoin(dataList)
|
||||||
|
|
||||||
class cbdt_bitmap_format_18(BitmapPlusBigMetricsMixin, ColorBitmapGlyph):
|
|
||||||
|
|
||||||
|
class cbdt_bitmap_format_18(BitmapPlusBigMetricsMixin, ColorBitmapGlyph):
|
||||||
def decompile(self):
|
def decompile(self):
|
||||||
self.metrics = BigGlyphMetrics()
|
self.metrics = BigGlyphMetrics()
|
||||||
dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics)
|
dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics)
|
||||||
@ -72,8 +84,8 @@ class cbdt_bitmap_format_18(BitmapPlusBigMetricsMixin, ColorBitmapGlyph):
|
|||||||
dataList.append(self.imageData)
|
dataList.append(self.imageData)
|
||||||
return bytesjoin(dataList)
|
return bytesjoin(dataList)
|
||||||
|
|
||||||
class cbdt_bitmap_format_19(ColorBitmapGlyph):
|
|
||||||
|
|
||||||
|
class cbdt_bitmap_format_19(ColorBitmapGlyph):
|
||||||
def decompile(self):
|
def decompile(self):
|
||||||
(dataLen,) = struct.unpack(">L", self.data[:4])
|
(dataLen,) = struct.unpack(">L", self.data[:4])
|
||||||
data = self.data[4:]
|
data = self.data[4:]
|
||||||
@ -84,6 +96,7 @@ class cbdt_bitmap_format_19(ColorBitmapGlyph):
|
|||||||
def compile(self, ttFont):
|
def compile(self, ttFont):
|
||||||
return struct.pack(">L", len(self.imageData)) + self.imageData
|
return struct.pack(">L", len(self.imageData)) + self.imageData
|
||||||
|
|
||||||
|
|
||||||
# Dict for CBDT extended formats.
|
# Dict for CBDT extended formats.
|
||||||
cbdt_bitmap_classes = {
|
cbdt_bitmap_classes = {
|
||||||
17: cbdt_bitmap_format_17,
|
17: cbdt_bitmap_format_17,
|
||||||
|
@ -4,6 +4,7 @@
|
|||||||
|
|
||||||
from . import E_B_L_C_
|
from . import E_B_L_C_
|
||||||
|
|
||||||
|
|
||||||
class table_C_B_L_C_(E_B_L_C_.table_E_B_L_C_):
|
class table_C_B_L_C_(E_B_L_C_.table_E_B_L_C_):
|
||||||
|
|
||||||
dependencies = ['CBDT']
|
dependencies = ["CBDT"]
|
||||||
|
@ -4,7 +4,6 @@ from . import DefaultTable
|
|||||||
|
|
||||||
|
|
||||||
class table_C_F_F_(DefaultTable.DefaultTable):
|
class table_C_F_F_(DefaultTable.DefaultTable):
|
||||||
|
|
||||||
def __init__(self, tag=None):
|
def __init__(self, tag=None):
|
||||||
DefaultTable.DefaultTable.__init__(self, tag)
|
DefaultTable.DefaultTable.__init__(self, tag)
|
||||||
self.cff = cffLib.CFFFontSet()
|
self.cff = cffLib.CFFFontSet()
|
||||||
@ -28,6 +27,7 @@ class table_C_F_F_(DefaultTable.DefaultTable):
|
|||||||
def getGlyphOrder(self):
|
def getGlyphOrder(self):
|
||||||
if self._gaveGlyphOrder:
|
if self._gaveGlyphOrder:
|
||||||
from fontTools import ttLib
|
from fontTools import ttLib
|
||||||
|
|
||||||
raise ttLib.TTLibError("illegal use of getGlyphOrder()")
|
raise ttLib.TTLibError("illegal use of getGlyphOrder()")
|
||||||
self._gaveGlyphOrder = True
|
self._gaveGlyphOrder = True
|
||||||
return self.cff[self.cff.fontNames[0]].getGlyphOrder()
|
return self.cff[self.cff.fontNames[0]].getGlyphOrder()
|
||||||
|
@ -3,7 +3,6 @@ from fontTools.ttLib.tables.C_F_F_ import table_C_F_F_
|
|||||||
|
|
||||||
|
|
||||||
class table_C_F_F__2(table_C_F_F_):
|
class table_C_F_F__2(table_C_F_F_):
|
||||||
|
|
||||||
def decompile(self, data, otFont):
|
def decompile(self, data, otFont):
|
||||||
self.cff.decompile(BytesIO(data), otFont, isCFF2=True)
|
self.cff.decompile(BytesIO(data), otFont, isCFF2=True)
|
||||||
assert len(self.cff) == 1, "can't deal with multi-font CFF tables."
|
assert len(self.cff) == 1, "can't deal with multi-font CFF tables."
|
||||||
|
@ -26,13 +26,11 @@ class table_C_O_L_R_(DefaultTable.DefaultTable):
|
|||||||
baseGlyph = baseRec.BaseGlyph
|
baseGlyph = baseRec.BaseGlyph
|
||||||
firstLayerIndex = baseRec.FirstLayerIndex
|
firstLayerIndex = baseRec.FirstLayerIndex
|
||||||
numLayers = baseRec.NumLayers
|
numLayers = baseRec.NumLayers
|
||||||
assert (firstLayerIndex + numLayers <= numLayerRecords)
|
assert firstLayerIndex + numLayers <= numLayerRecords
|
||||||
layers = []
|
layers = []
|
||||||
for i in range(firstLayerIndex, firstLayerIndex + numLayers):
|
for i in range(firstLayerIndex, firstLayerIndex + numLayers):
|
||||||
layerRec = layerRecords[i]
|
layerRec = layerRecords[i]
|
||||||
layers.append(
|
layers.append(LayerRecord(layerRec.LayerGlyph, layerRec.PaletteIndex))
|
||||||
LayerRecord(layerRec.LayerGlyph, layerRec.PaletteIndex)
|
|
||||||
)
|
|
||||||
colorLayerLists[baseGlyph] = layers
|
colorLayerLists[baseGlyph] = layers
|
||||||
return colorLayerLists
|
return colorLayerLists
|
||||||
|
|
||||||
@ -142,8 +140,8 @@ class table_C_O_L_R_(DefaultTable.DefaultTable):
|
|||||||
def __delitem__(self, glyphName):
|
def __delitem__(self, glyphName):
|
||||||
del self.ColorLayers[glyphName]
|
del self.ColorLayers[glyphName]
|
||||||
|
|
||||||
class LayerRecord(object):
|
|
||||||
|
|
||||||
|
class LayerRecord(object):
|
||||||
def __init__(self, name=None, colorID=None):
|
def __init__(self, name=None, colorID=None):
|
||||||
self.name = name
|
self.name = name
|
||||||
self.colorID = colorID
|
self.colorID = colorID
|
||||||
|
@ -23,13 +23,21 @@ class table_C_P_A_L_(DefaultTable.DefaultTable):
|
|||||||
self.paletteEntryLabels = []
|
self.paletteEntryLabels = []
|
||||||
|
|
||||||
def decompile(self, data, ttFont):
|
def decompile(self, data, ttFont):
|
||||||
self.version, self.numPaletteEntries, numPalettes, numColorRecords, goffsetFirstColorRecord = struct.unpack(">HHHHL", data[:12])
|
(
|
||||||
assert (self.version <= 1), "Version of CPAL table is higher than I know how to handle"
|
self.version,
|
||||||
|
self.numPaletteEntries,
|
||||||
|
numPalettes,
|
||||||
|
numColorRecords,
|
||||||
|
goffsetFirstColorRecord,
|
||||||
|
) = struct.unpack(">HHHHL", data[:12])
|
||||||
|
assert (
|
||||||
|
self.version <= 1
|
||||||
|
), "Version of CPAL table is higher than I know how to handle"
|
||||||
self.palettes = []
|
self.palettes = []
|
||||||
pos = 12
|
pos = 12
|
||||||
for i in range(numPalettes):
|
for i in range(numPalettes):
|
||||||
startIndex = struct.unpack(">H", data[pos : pos + 2])[0]
|
startIndex = struct.unpack(">H", data[pos : pos + 2])[0]
|
||||||
assert (startIndex + self.numPaletteEntries <= numColorRecords)
|
assert startIndex + self.numPaletteEntries <= numColorRecords
|
||||||
pos += 2
|
pos += 2
|
||||||
palette = []
|
palette = []
|
||||||
ppos = goffsetFirstColorRecord + startIndex * 4
|
ppos = goffsetFirstColorRecord + startIndex * 4
|
||||||
@ -43,23 +51,33 @@ class table_C_P_A_L_(DefaultTable.DefaultTable):
|
|||||||
offsetToPaletteEntryLabelArray = 0
|
offsetToPaletteEntryLabelArray = 0
|
||||||
else:
|
else:
|
||||||
pos = 12 + numPalettes * 2
|
pos = 12 + numPalettes * 2
|
||||||
(offsetToPaletteTypeArray, offsetToPaletteLabelArray,
|
(
|
||||||
offsetToPaletteEntryLabelArray) = (
|
offsetToPaletteTypeArray,
|
||||||
struct.unpack(">LLL", data[pos:pos+12]))
|
offsetToPaletteLabelArray,
|
||||||
|
offsetToPaletteEntryLabelArray,
|
||||||
|
) = struct.unpack(">LLL", data[pos : pos + 12])
|
||||||
self.paletteTypes = self._decompileUInt32Array(
|
self.paletteTypes = self._decompileUInt32Array(
|
||||||
data, offsetToPaletteTypeArray, numPalettes,
|
data,
|
||||||
default=self.DEFAULT_PALETTE_TYPE)
|
offsetToPaletteTypeArray,
|
||||||
|
numPalettes,
|
||||||
|
default=self.DEFAULT_PALETTE_TYPE,
|
||||||
|
)
|
||||||
self.paletteLabels = self._decompileUInt16Array(
|
self.paletteLabels = self._decompileUInt16Array(
|
||||||
data, offsetToPaletteLabelArray, numPalettes, default=self.NO_NAME_ID)
|
data, offsetToPaletteLabelArray, numPalettes, default=self.NO_NAME_ID
|
||||||
|
)
|
||||||
self.paletteEntryLabels = self._decompileUInt16Array(
|
self.paletteEntryLabels = self._decompileUInt16Array(
|
||||||
data, offsetToPaletteEntryLabelArray,
|
data,
|
||||||
self.numPaletteEntries, default=self.NO_NAME_ID)
|
offsetToPaletteEntryLabelArray,
|
||||||
|
self.numPaletteEntries,
|
||||||
|
default=self.NO_NAME_ID,
|
||||||
|
)
|
||||||
|
|
||||||
def _decompileUInt16Array(self, data, offset, numElements, default=0):
|
def _decompileUInt16Array(self, data, offset, numElements, default=0):
|
||||||
if offset == 0:
|
if offset == 0:
|
||||||
return [default] * numElements
|
return [default] * numElements
|
||||||
result = array.array("H", data[offset : offset + 2 * numElements])
|
result = array.array("H", data[offset : offset + 2 * numElements])
|
||||||
if sys.byteorder != "big": result.byteswap()
|
if sys.byteorder != "big":
|
||||||
|
result.byteswap()
|
||||||
assert len(result) == numElements, result
|
assert len(result) == numElements, result
|
||||||
return result.tolist()
|
return result.tolist()
|
||||||
|
|
||||||
@ -67,7 +85,8 @@ class table_C_P_A_L_(DefaultTable.DefaultTable):
|
|||||||
if offset == 0:
|
if offset == 0:
|
||||||
return [default] * numElements
|
return [default] * numElements
|
||||||
result = array.array("I", data[offset : offset + 4 * numElements])
|
result = array.array("I", data[offset : offset + 4 * numElements])
|
||||||
if sys.byteorder != "big": result.byteswap()
|
if sys.byteorder != "big":
|
||||||
|
result.byteswap()
|
||||||
assert len(result) == numElements, result
|
assert len(result) == numElements, result
|
||||||
return result.tolist()
|
return result.tolist()
|
||||||
|
|
||||||
@ -80,9 +99,14 @@ class table_C_P_A_L_(DefaultTable.DefaultTable):
|
|||||||
offsetToFirstColorRecord = 12 + len(colorRecordIndices)
|
offsetToFirstColorRecord = 12 + len(colorRecordIndices)
|
||||||
if self.version >= 1:
|
if self.version >= 1:
|
||||||
offsetToFirstColorRecord += 12
|
offsetToFirstColorRecord += 12
|
||||||
header = struct.pack(">HHHHL", self.version,
|
header = struct.pack(
|
||||||
self.numPaletteEntries, len(self.palettes),
|
">HHHHL",
|
||||||
numColorRecords, offsetToFirstColorRecord)
|
self.version,
|
||||||
|
self.numPaletteEntries,
|
||||||
|
len(self.palettes),
|
||||||
|
numColorRecords,
|
||||||
|
offsetToFirstColorRecord,
|
||||||
|
)
|
||||||
if self.version == 0:
|
if self.version == 0:
|
||||||
dataList = [header, colorRecordIndices, colorRecords]
|
dataList = [header, colorRecordIndices, colorRecords]
|
||||||
else:
|
else:
|
||||||
@ -102,17 +126,25 @@ class table_C_P_A_L_(DefaultTable.DefaultTable):
|
|||||||
else:
|
else:
|
||||||
offsetToPaletteEntryLabelArray = pos
|
offsetToPaletteEntryLabelArray = pos
|
||||||
pos += len(paletteLabels)
|
pos += len(paletteLabels)
|
||||||
header1 = struct.pack(">LLL",
|
header1 = struct.pack(
|
||||||
|
">LLL",
|
||||||
offsetToPaletteTypeArray,
|
offsetToPaletteTypeArray,
|
||||||
offsetToPaletteLabelArray,
|
offsetToPaletteLabelArray,
|
||||||
offsetToPaletteEntryLabelArray)
|
offsetToPaletteEntryLabelArray,
|
||||||
dataList = [header, colorRecordIndices, header1,
|
)
|
||||||
colorRecords, paletteTypes, paletteLabels,
|
dataList = [
|
||||||
paletteEntryLabels]
|
header,
|
||||||
|
colorRecordIndices,
|
||||||
|
header1,
|
||||||
|
colorRecords,
|
||||||
|
paletteTypes,
|
||||||
|
paletteLabels,
|
||||||
|
paletteEntryLabels,
|
||||||
|
]
|
||||||
return bytesjoin(dataList)
|
return bytesjoin(dataList)
|
||||||
|
|
||||||
def _compilePalette(self, palette):
|
def _compilePalette(self, palette):
|
||||||
assert(len(palette) == self.numPaletteEntries)
|
assert len(palette) == self.numPaletteEntries
|
||||||
pack = lambda c: struct.pack(">BBBB", c.blue, c.green, c.red, c.alpha)
|
pack = lambda c: struct.pack(">BBBB", c.blue, c.green, c.red, c.alpha)
|
||||||
return bytesjoin([pack(color) for color in palette])
|
return bytesjoin([pack(color) for color in palette])
|
||||||
|
|
||||||
@ -131,40 +163,39 @@ class table_C_P_A_L_(DefaultTable.DefaultTable):
|
|||||||
|
|
||||||
def _compilePaletteTypes(self):
|
def _compilePaletteTypes(self):
|
||||||
if self.version == 0 or not any(self.paletteTypes):
|
if self.version == 0 or not any(self.paletteTypes):
|
||||||
return b''
|
return b""
|
||||||
assert len(self.paletteTypes) == len(self.palettes)
|
assert len(self.paletteTypes) == len(self.palettes)
|
||||||
result = bytesjoin([struct.pack(">I", ptype)
|
result = bytesjoin([struct.pack(">I", ptype) for ptype in self.paletteTypes])
|
||||||
for ptype in self.paletteTypes])
|
|
||||||
assert len(result) == 4 * len(self.palettes)
|
assert len(result) == 4 * len(self.palettes)
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def _compilePaletteLabels(self):
|
def _compilePaletteLabels(self):
|
||||||
if self.version == 0 or all(l == self.NO_NAME_ID for l in self.paletteLabels):
|
if self.version == 0 or all(l == self.NO_NAME_ID for l in self.paletteLabels):
|
||||||
return b''
|
return b""
|
||||||
assert len(self.paletteLabels) == len(self.palettes)
|
assert len(self.paletteLabels) == len(self.palettes)
|
||||||
result = bytesjoin([struct.pack(">H", label)
|
result = bytesjoin([struct.pack(">H", label) for label in self.paletteLabels])
|
||||||
for label in self.paletteLabels])
|
|
||||||
assert len(result) == 2 * len(self.palettes)
|
assert len(result) == 2 * len(self.palettes)
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def _compilePaletteEntryLabels(self):
|
def _compilePaletteEntryLabels(self):
|
||||||
if self.version == 0 or all(l == self.NO_NAME_ID for l in self.paletteEntryLabels):
|
if self.version == 0 or all(
|
||||||
return b''
|
l == self.NO_NAME_ID for l in self.paletteEntryLabels
|
||||||
|
):
|
||||||
|
return b""
|
||||||
assert len(self.paletteEntryLabels) == self.numPaletteEntries
|
assert len(self.paletteEntryLabels) == self.numPaletteEntries
|
||||||
result = bytesjoin([struct.pack(">H", label)
|
result = bytesjoin(
|
||||||
for label in self.paletteEntryLabels])
|
[struct.pack(">H", label) for label in self.paletteEntryLabels]
|
||||||
|
)
|
||||||
assert len(result) == 2 * self.numPaletteEntries
|
assert len(result) == 2 * self.numPaletteEntries
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def toXML(self, writer, ttFont):
|
def toXML(self, writer, ttFont):
|
||||||
numPalettes = len(self.palettes)
|
numPalettes = len(self.palettes)
|
||||||
paletteLabels = {i: nameID
|
paletteLabels = {i: nameID for (i, nameID) in enumerate(self.paletteLabels)}
|
||||||
for (i, nameID) in enumerate(self.paletteLabels)}
|
|
||||||
paletteTypes = {i: typ for (i, typ) in enumerate(self.paletteTypes)}
|
paletteTypes = {i: typ for (i, typ) in enumerate(self.paletteTypes)}
|
||||||
writer.simpletag("version", value=self.version)
|
writer.simpletag("version", value=self.version)
|
||||||
writer.newline()
|
writer.newline()
|
||||||
writer.simpletag("numPaletteEntries",
|
writer.simpletag("numPaletteEntries", value=self.numPaletteEntries)
|
||||||
value=self.numPaletteEntries)
|
|
||||||
writer.newline()
|
writer.newline()
|
||||||
for index, palette in enumerate(self.palettes):
|
for index, palette in enumerate(self.palettes):
|
||||||
attrs = {"index": index}
|
attrs = {"index": index}
|
||||||
@ -176,24 +207,30 @@ class table_C_P_A_L_(DefaultTable.DefaultTable):
|
|||||||
attrs["type"] = paletteType
|
attrs["type"] = paletteType
|
||||||
writer.begintag("palette", **attrs)
|
writer.begintag("palette", **attrs)
|
||||||
writer.newline()
|
writer.newline()
|
||||||
if (self.version > 0 and paletteLabel != self.NO_NAME_ID and
|
if (
|
||||||
ttFont and "name" in ttFont):
|
self.version > 0
|
||||||
|
and paletteLabel != self.NO_NAME_ID
|
||||||
|
and ttFont
|
||||||
|
and "name" in ttFont
|
||||||
|
):
|
||||||
name = ttFont["name"].getDebugName(paletteLabel)
|
name = ttFont["name"].getDebugName(paletteLabel)
|
||||||
if name is not None:
|
if name is not None:
|
||||||
writer.comment(name)
|
writer.comment(name)
|
||||||
writer.newline()
|
writer.newline()
|
||||||
assert(len(palette) == self.numPaletteEntries)
|
assert len(palette) == self.numPaletteEntries
|
||||||
for cindex, color in enumerate(palette):
|
for cindex, color in enumerate(palette):
|
||||||
color.toXML(writer, ttFont, cindex)
|
color.toXML(writer, ttFont, cindex)
|
||||||
writer.endtag("palette")
|
writer.endtag("palette")
|
||||||
writer.newline()
|
writer.newline()
|
||||||
if self.version > 0 and not all(l == self.NO_NAME_ID for l in self.paletteEntryLabels):
|
if self.version > 0 and not all(
|
||||||
|
l == self.NO_NAME_ID for l in self.paletteEntryLabels
|
||||||
|
):
|
||||||
writer.begintag("paletteEntryLabels")
|
writer.begintag("paletteEntryLabels")
|
||||||
writer.newline()
|
writer.newline()
|
||||||
for index, label in enumerate(self.paletteEntryLabels):
|
for index, label in enumerate(self.paletteEntryLabels):
|
||||||
if label != self.NO_NAME_ID:
|
if label != self.NO_NAME_ID:
|
||||||
writer.simpletag("label", index=index, value=label)
|
writer.simpletag("label", index=index, value=label)
|
||||||
if (self.version > 0 and label and ttFont and "name" in ttFont):
|
if self.version > 0 and label and ttFont and "name" in ttFont:
|
||||||
name = ttFont["name"].getDebugName(label)
|
name = ttFont["name"].getDebugName(label)
|
||||||
if name is not None:
|
if name is not None:
|
||||||
writer.comment(name)
|
writer.comment(name)
|
||||||
@ -225,7 +262,8 @@ class table_C_P_A_L_(DefaultTable.DefaultTable):
|
|||||||
colorLabels[labelIndex] = nameID
|
colorLabels[labelIndex] = nameID
|
||||||
self.paletteEntryLabels = [
|
self.paletteEntryLabels = [
|
||||||
colorLabels.get(i, self.NO_NAME_ID)
|
colorLabels.get(i, self.NO_NAME_ID)
|
||||||
for i in range(self.numPaletteEntries)]
|
for i in range(self.numPaletteEntries)
|
||||||
|
]
|
||||||
elif "value" in attrs:
|
elif "value" in attrs:
|
||||||
value = safeEval(attrs["value"])
|
value = safeEval(attrs["value"])
|
||||||
setattr(self, name, value)
|
setattr(self, name, value)
|
||||||
@ -234,7 +272,6 @@ class table_C_P_A_L_(DefaultTable.DefaultTable):
|
|||||||
|
|
||||||
|
|
||||||
class Color(namedtuple("Color", "blue green red alpha")):
|
class Color(namedtuple("Color", "blue green red alpha")):
|
||||||
|
|
||||||
def hex(self):
|
def hex(self):
|
||||||
return "#%02X%02X%02X%02X" % (self.red, self.green, self.blue, self.alpha)
|
return "#%02X%02X%02X%02X" % (self.red, self.green, self.blue, self.alpha)
|
||||||
|
|
||||||
@ -247,7 +284,7 @@ class Color(namedtuple("Color", "blue green red alpha")):
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def fromHex(cls, value):
|
def fromHex(cls, value):
|
||||||
if value[0] == '#':
|
if value[0] == "#":
|
||||||
value = value[1:]
|
value = value[1:]
|
||||||
red = int(value[0:2], 16)
|
red = int(value[0:2], 16)
|
||||||
green = int(value[2:4], 16)
|
green = int(value[2:4], 16)
|
||||||
|
@ -37,21 +37,31 @@ DSIG_SignatureBlockFormat = """
|
|||||||
# on compilation with no padding whatsoever.
|
# on compilation with no padding whatsoever.
|
||||||
#
|
#
|
||||||
|
|
||||||
class table_D_S_I_G_(DefaultTable.DefaultTable):
|
|
||||||
|
|
||||||
|
class table_D_S_I_G_(DefaultTable.DefaultTable):
|
||||||
def decompile(self, data, ttFont):
|
def decompile(self, data, ttFont):
|
||||||
dummy, newData = sstruct.unpack2(DSIG_HeaderFormat, data, self)
|
dummy, newData = sstruct.unpack2(DSIG_HeaderFormat, data, self)
|
||||||
assert self.ulVersion == 1, "DSIG ulVersion must be 1"
|
assert self.ulVersion == 1, "DSIG ulVersion must be 1"
|
||||||
assert self.usFlag & ~1 == 0, "DSIG usFlag must be 0x1 or 0x0"
|
assert self.usFlag & ~1 == 0, "DSIG usFlag must be 0x1 or 0x0"
|
||||||
self.signatureRecords = sigrecs = []
|
self.signatureRecords = sigrecs = []
|
||||||
for n in range(self.usNumSigs):
|
for n in range(self.usNumSigs):
|
||||||
sigrec, newData = sstruct.unpack2(DSIG_SignatureFormat, newData, SignatureRecord())
|
sigrec, newData = sstruct.unpack2(
|
||||||
assert sigrec.ulFormat == 1, "DSIG signature record #%d ulFormat must be 1" % n
|
DSIG_SignatureFormat, newData, SignatureRecord()
|
||||||
|
)
|
||||||
|
assert sigrec.ulFormat == 1, (
|
||||||
|
"DSIG signature record #%d ulFormat must be 1" % n
|
||||||
|
)
|
||||||
sigrecs.append(sigrec)
|
sigrecs.append(sigrec)
|
||||||
for sigrec in sigrecs:
|
for sigrec in sigrecs:
|
||||||
dummy, newData = sstruct.unpack2(DSIG_SignatureBlockFormat, data[sigrec.ulOffset:], sigrec)
|
dummy, newData = sstruct.unpack2(
|
||||||
assert sigrec.usReserved1 == 0, "DSIG signature record #%d usReserverd1 must be 0" % n
|
DSIG_SignatureBlockFormat, data[sigrec.ulOffset :], sigrec
|
||||||
assert sigrec.usReserved2 == 0, "DSIG signature record #%d usReserverd2 must be 0" % n
|
)
|
||||||
|
assert sigrec.usReserved1 == 0, (
|
||||||
|
"DSIG signature record #%d usReserverd1 must be 0" % n
|
||||||
|
)
|
||||||
|
assert sigrec.usReserved2 == 0, (
|
||||||
|
"DSIG signature record #%d usReserverd2 must be 0" % n
|
||||||
|
)
|
||||||
sigrec.pkcs7 = newData[: sigrec.cbSignature]
|
sigrec.pkcs7 = newData[: sigrec.cbSignature]
|
||||||
|
|
||||||
def compile(self, ttFont):
|
def compile(self, ttFont):
|
||||||
@ -72,13 +82,20 @@ class table_D_S_I_G_(DefaultTable.DefaultTable):
|
|||||||
offset += sigrec.ulLength
|
offset += sigrec.ulLength
|
||||||
if offset % 2:
|
if offset % 2:
|
||||||
# Pad to even bytes
|
# Pad to even bytes
|
||||||
data.append(b'\0')
|
data.append(b"\0")
|
||||||
return bytesjoin(headers + data)
|
return bytesjoin(headers + data)
|
||||||
|
|
||||||
def toXML(self, xmlWriter, ttFont):
|
def toXML(self, xmlWriter, ttFont):
|
||||||
xmlWriter.comment("note that the Digital Signature will be invalid after recompilation!")
|
xmlWriter.comment(
|
||||||
|
"note that the Digital Signature will be invalid after recompilation!"
|
||||||
|
)
|
||||||
xmlWriter.newline()
|
xmlWriter.newline()
|
||||||
xmlWriter.simpletag("tableHeader", version=self.ulVersion, numSigs=self.usNumSigs, flag="0x%X" % self.usFlag)
|
xmlWriter.simpletag(
|
||||||
|
"tableHeader",
|
||||||
|
version=self.ulVersion,
|
||||||
|
numSigs=self.usNumSigs,
|
||||||
|
flag="0x%X" % self.usFlag,
|
||||||
|
)
|
||||||
for sigrec in self.signatureRecords:
|
for sigrec in self.signatureRecords:
|
||||||
xmlWriter.newline()
|
xmlWriter.newline()
|
||||||
sigrec.toXML(xmlWriter, ttFont)
|
sigrec.toXML(xmlWriter, ttFont)
|
||||||
@ -96,20 +113,25 @@ class table_D_S_I_G_(DefaultTable.DefaultTable):
|
|||||||
sigrec.fromXML(name, attrs, content, ttFont)
|
sigrec.fromXML(name, attrs, content, ttFont)
|
||||||
self.signatureRecords.append(sigrec)
|
self.signatureRecords.append(sigrec)
|
||||||
|
|
||||||
|
|
||||||
pem_spam = lambda l, spam={
|
pem_spam = lambda l, spam={
|
||||||
"-----BEGIN PKCS7-----": True, "-----END PKCS7-----": True, "": True
|
"-----BEGIN PKCS7-----": True,
|
||||||
|
"-----END PKCS7-----": True,
|
||||||
|
"": True,
|
||||||
}: not spam.get(l.strip())
|
}: not spam.get(l.strip())
|
||||||
|
|
||||||
|
|
||||||
def b64encode(b):
|
def b64encode(b):
|
||||||
s = base64.b64encode(b)
|
s = base64.b64encode(b)
|
||||||
# Line-break at 76 chars.
|
# Line-break at 76 chars.
|
||||||
items = []
|
items = []
|
||||||
while s:
|
while s:
|
||||||
items.append(tostr(s[:76]))
|
items.append(tostr(s[:76]))
|
||||||
items.append('\n')
|
items.append("\n")
|
||||||
s = s[76:]
|
s = s[76:]
|
||||||
return strjoin(items)
|
return strjoin(items)
|
||||||
|
|
||||||
|
|
||||||
class SignatureRecord(object):
|
class SignatureRecord(object):
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return "<%s: %s>" % (self.__class__.__name__, self.__dict__)
|
return "<%s: %s>" % (self.__class__.__name__, self.__dict__)
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user