Blacken code

This commit is contained in:
Nikolaus Waxweiler 2022-12-13 11:26:36 +00:00
parent 698d8fb387
commit d584daa8fd
359 changed files with 80867 additions and 67305 deletions

View File

@ -30,14 +30,17 @@ needs_sphinx = "1.3"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.viewcode", "sphinx.ext.napoleon", "sphinx.ext.coverage", "sphinx.ext.autosectionlabel"]
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.viewcode",
"sphinx.ext.napoleon",
"sphinx.ext.coverage",
"sphinx.ext.autosectionlabel",
]
autodoc_mock_imports = ["gtk", "reportlab"]
autodoc_default_options = {
'members': True,
'inherited-members': True
}
autodoc_default_options = {"members": True, "inherited-members": True}
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
@ -52,9 +55,11 @@ source_suffix = ".rst"
master_doc = "index"
# General information about the project.
project = u"fontTools"
copyright = u"2020, Just van Rossum, Behdad Esfahbod, and the fontTools Authors. CC BY-SA 4.0"
author = u"Just van Rossum, Behdad Esfahbod, and the fontTools Authors"
project = "fontTools"
copyright = (
"2020, Just van Rossum, Behdad Esfahbod, and the fontTools Authors. CC BY-SA 4.0"
)
author = "Just van Rossum, Behdad Esfahbod, and the fontTools Authors"
# HTML page title
html_title = "fontTools Documentation"
@ -64,9 +69,9 @@ html_title = "fontTools Documentation"
# built documents.
#
# The short X.Y version.
version = u"4.0"
version = "4.0"
# The full version, including alpha/beta/rc tags.
release = u"4.0"
release = "4.0"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
@ -142,8 +147,8 @@ latex_documents = [
(
master_doc,
"fontTools.tex",
u"fontTools Documentation",
u"Just van Rossum, Behdad Esfahbod et al.",
"fontTools Documentation",
"Just van Rossum, Behdad Esfahbod et al.",
"manual",
)
]
@ -153,7 +158,7 @@ latex_documents = [
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "fonttools", u"fontTools Documentation", [author], 1)]
man_pages = [(master_doc, "fonttools", "fontTools Documentation", [author], 1)]
# -- Options for Texinfo output -------------------------------------------
@ -165,7 +170,7 @@ texinfo_documents = [
(
master_doc,
"fontTools",
u"fontTools Documentation",
"fontTools Documentation",
author,
"fontTools",
"A library for manipulating fonts, written in Python.",

View File

@ -2,33 +2,34 @@ import sys
def main(args=None):
if args is None:
args = sys.argv[1:]
if args is None:
args = sys.argv[1:]
# TODO Handle library-wide options. Eg.:
# --unicodedata
# --verbose / other logging stuff
# TODO Handle library-wide options. Eg.:
# --unicodedata
# --verbose / other logging stuff
# TODO Allow a way to run arbitrary modules? Useful for setting
# library-wide options and calling another library. Eg.:
#
# $ fonttools --unicodedata=... fontmake ...
#
# This allows for a git-like command where thirdparty commands
# can be added. Should we just try importing the fonttools
# module first and try without if it fails?
# TODO Allow a way to run arbitrary modules? Useful for setting
# library-wide options and calling another library. Eg.:
#
# $ fonttools --unicodedata=... fontmake ...
#
# This allows for a git-like command where thirdparty commands
# can be added. Should we just try importing the fonttools
# module first and try without if it fails?
if len(sys.argv) < 2:
sys.argv.append("help")
if sys.argv[1] == "-h" or sys.argv[1] == "--help":
sys.argv[1] = "help"
mod = 'fontTools.'+sys.argv[1]
sys.argv[1] = sys.argv[0] + ' ' + sys.argv[1]
del sys.argv[0]
if len(sys.argv) < 2:
sys.argv.append("help")
if sys.argv[1] == "-h" or sys.argv[1] == "--help":
sys.argv[1] = "help"
mod = "fontTools." + sys.argv[1]
sys.argv[1] = sys.argv[0] + " " + sys.argv[1]
del sys.argv[0]
import runpy
runpy.run_module(mod, run_name='__main__')
import runpy
runpy.run_module(mod, run_name="__main__")
if __name__ == '__main__':
sys.exit(main())
if __name__ == "__main__":
sys.exit(main())

View File

@ -53,378 +53,386 @@ identifierRE = re.compile(r"^([A-Za-z]+).*")
# regular expression to parse char lines
charRE = re.compile(
r"(-?\d+)" # charnum
r"\s*;\s*WX\s+" # ; WX
r"(-?\d+)" # width
r"\s*;\s*N\s+" # ; N
r"([.A-Za-z0-9_]+)" # charname
r"\s*;\s*B\s+" # ; B
r"(-?\d+)" # left
r"\s+"
r"(-?\d+)" # bottom
r"\s+"
r"(-?\d+)" # right
r"\s+"
r"(-?\d+)" # top
r"\s*;\s*" # ;
)
r"(-?\d+)" # charnum
r"\s*;\s*WX\s+" # ; WX
r"(-?\d+)" # width
r"\s*;\s*N\s+" # ; N
r"([.A-Za-z0-9_]+)" # charname
r"\s*;\s*B\s+" # ; B
r"(-?\d+)" # left
r"\s+"
r"(-?\d+)" # bottom
r"\s+"
r"(-?\d+)" # right
r"\s+"
r"(-?\d+)" # top
r"\s*;\s*" # ;
)
# regular expression to parse kerning lines
kernRE = re.compile(
r"([.A-Za-z0-9_]+)" # leftchar
r"\s+"
r"([.A-Za-z0-9_]+)" # rightchar
r"\s+"
r"(-?\d+)" # value
r"\s*"
)
r"([.A-Za-z0-9_]+)" # leftchar
r"\s+"
r"([.A-Za-z0-9_]+)" # rightchar
r"\s+"
r"(-?\d+)" # value
r"\s*"
)
# regular expressions to parse composite info lines of the form:
# Aacute 2 ; PCC A 0 0 ; PCC acute 182 211 ;
compositeRE = re.compile(
r"([.A-Za-z0-9_]+)" # char name
r"\s+"
r"(\d+)" # number of parts
r"\s*;\s*"
)
r"([.A-Za-z0-9_]+)" r"\s+" r"(\d+)" r"\s*;\s*" # char name # number of parts
)
componentRE = re.compile(
r"PCC\s+" # PPC
r"([.A-Za-z0-9_]+)" # base char name
r"\s+"
r"(-?\d+)" # x offset
r"\s+"
r"(-?\d+)" # y offset
r"\s*;\s*"
)
r"PCC\s+" # PPC
r"([.A-Za-z0-9_]+)" # base char name
r"\s+"
r"(-?\d+)" # x offset
r"\s+"
r"(-?\d+)" # y offset
r"\s*;\s*"
)
preferredAttributeOrder = [
"FontName",
"FullName",
"FamilyName",
"Weight",
"ItalicAngle",
"IsFixedPitch",
"FontBBox",
"UnderlinePosition",
"UnderlineThickness",
"Version",
"Notice",
"EncodingScheme",
"CapHeight",
"XHeight",
"Ascender",
"Descender",
"FontName",
"FullName",
"FamilyName",
"Weight",
"ItalicAngle",
"IsFixedPitch",
"FontBBox",
"UnderlinePosition",
"UnderlineThickness",
"Version",
"Notice",
"EncodingScheme",
"CapHeight",
"XHeight",
"Ascender",
"Descender",
]
class error(Exception):
pass
pass
class AFM(object):
_attrs = None
_attrs = None
_keywords = ['StartFontMetrics',
'EndFontMetrics',
'StartCharMetrics',
'EndCharMetrics',
'StartKernData',
'StartKernPairs',
'EndKernPairs',
'EndKernData',
'StartComposites',
'EndComposites',
]
_keywords = [
"StartFontMetrics",
"EndFontMetrics",
"StartCharMetrics",
"EndCharMetrics",
"StartKernData",
"StartKernPairs",
"EndKernPairs",
"EndKernData",
"StartComposites",
"EndComposites",
]
def __init__(self, path=None):
"""AFM file reader.
def __init__(self, path=None):
"""AFM file reader.
Instantiating an object with a path name will cause the file to be opened,
read, and parsed. Alternatively the path can be left unspecified, and a
file can be parsed later with the :meth:`read` method."""
self._attrs = {}
self._chars = {}
self._kerning = {}
self._index = {}
self._comments = []
self._composites = {}
if path is not None:
self.read(path)
Instantiating an object with a path name will cause the file to be opened,
read, and parsed. Alternatively the path can be left unspecified, and a
file can be parsed later with the :meth:`read` method."""
self._attrs = {}
self._chars = {}
self._kerning = {}
self._index = {}
self._comments = []
self._composites = {}
if path is not None:
self.read(path)
def read(self, path):
"""Opens, reads and parses a file."""
lines = readlines(path)
for line in lines:
if not line.strip():
continue
m = identifierRE.match(line)
if m is None:
raise error("syntax error in AFM file: " + repr(line))
def read(self, path):
"""Opens, reads and parses a file."""
lines = readlines(path)
for line in lines:
if not line.strip():
continue
m = identifierRE.match(line)
if m is None:
raise error("syntax error in AFM file: " + repr(line))
pos = m.regs[1][1]
word = line[:pos]
rest = line[pos:].strip()
if word in self._keywords:
continue
if word == "C":
self.parsechar(rest)
elif word == "KPX":
self.parsekernpair(rest)
elif word == "CC":
self.parsecomposite(rest)
else:
self.parseattr(word, rest)
pos = m.regs[1][1]
word = line[:pos]
rest = line[pos:].strip()
if word in self._keywords:
continue
if word == "C":
self.parsechar(rest)
elif word == "KPX":
self.parsekernpair(rest)
elif word == "CC":
self.parsecomposite(rest)
else:
self.parseattr(word, rest)
def parsechar(self, rest):
m = charRE.match(rest)
if m is None:
raise error("syntax error in AFM file: " + repr(rest))
things = []
for fr, to in m.regs[1:]:
things.append(rest[fr:to])
charname = things[2]
del things[2]
charnum, width, l, b, r, t = (int(thing) for thing in things)
self._chars[charname] = charnum, width, (l, b, r, t)
def parsechar(self, rest):
m = charRE.match(rest)
if m is None:
raise error("syntax error in AFM file: " + repr(rest))
things = []
for fr, to in m.regs[1:]:
things.append(rest[fr:to])
charname = things[2]
del things[2]
charnum, width, l, b, r, t = (int(thing) for thing in things)
self._chars[charname] = charnum, width, (l, b, r, t)
def parsekernpair(self, rest):
m = kernRE.match(rest)
if m is None:
raise error("syntax error in AFM file: " + repr(rest))
things = []
for fr, to in m.regs[1:]:
things.append(rest[fr:to])
leftchar, rightchar, value = things
value = int(value)
self._kerning[(leftchar, rightchar)] = value
def parsekernpair(self, rest):
m = kernRE.match(rest)
if m is None:
raise error("syntax error in AFM file: " + repr(rest))
things = []
for fr, to in m.regs[1:]:
things.append(rest[fr:to])
leftchar, rightchar, value = things
value = int(value)
self._kerning[(leftchar, rightchar)] = value
def parseattr(self, word, rest):
if word == "FontBBox":
l, b, r, t = [int(thing) for thing in rest.split()]
self._attrs[word] = l, b, r, t
elif word == "Comment":
self._comments.append(rest)
else:
try:
value = int(rest)
except (ValueError, OverflowError):
self._attrs[word] = rest
else:
self._attrs[word] = value
def parseattr(self, word, rest):
if word == "FontBBox":
l, b, r, t = [int(thing) for thing in rest.split()]
self._attrs[word] = l, b, r, t
elif word == "Comment":
self._comments.append(rest)
else:
try:
value = int(rest)
except (ValueError, OverflowError):
self._attrs[word] = rest
else:
self._attrs[word] = value
def parsecomposite(self, rest):
m = compositeRE.match(rest)
if m is None:
raise error("syntax error in AFM file: " + repr(rest))
charname = m.group(1)
ncomponents = int(m.group(2))
rest = rest[m.regs[0][1]:]
components = []
while True:
m = componentRE.match(rest)
if m is None:
raise error("syntax error in AFM file: " + repr(rest))
basechar = m.group(1)
xoffset = int(m.group(2))
yoffset = int(m.group(3))
components.append((basechar, xoffset, yoffset))
rest = rest[m.regs[0][1]:]
if not rest:
break
assert len(components) == ncomponents
self._composites[charname] = components
def parsecomposite(self, rest):
m = compositeRE.match(rest)
if m is None:
raise error("syntax error in AFM file: " + repr(rest))
charname = m.group(1)
ncomponents = int(m.group(2))
rest = rest[m.regs[0][1] :]
components = []
while True:
m = componentRE.match(rest)
if m is None:
raise error("syntax error in AFM file: " + repr(rest))
basechar = m.group(1)
xoffset = int(m.group(2))
yoffset = int(m.group(3))
components.append((basechar, xoffset, yoffset))
rest = rest[m.regs[0][1] :]
if not rest:
break
assert len(components) == ncomponents
self._composites[charname] = components
def write(self, path, sep='\r'):
"""Writes out an AFM font to the given path."""
import time
lines = [ "StartFontMetrics 2.0",
"Comment Generated by afmLib; at %s" % (
time.strftime("%m/%d/%Y %H:%M:%S",
time.localtime(time.time())))]
def write(self, path, sep="\r"):
"""Writes out an AFM font to the given path."""
import time
# write comments, assuming (possibly wrongly!) they should
# all appear at the top
for comment in self._comments:
lines.append("Comment " + comment)
lines = [
"StartFontMetrics 2.0",
"Comment Generated by afmLib; at %s"
% (time.strftime("%m/%d/%Y %H:%M:%S", time.localtime(time.time()))),
]
# write attributes, first the ones we know about, in
# a preferred order
attrs = self._attrs
for attr in preferredAttributeOrder:
if attr in attrs:
value = attrs[attr]
if attr == "FontBBox":
value = "%s %s %s %s" % value
lines.append(attr + " " + str(value))
# then write the attributes we don't know about,
# in alphabetical order
items = sorted(attrs.items())
for attr, value in items:
if attr in preferredAttributeOrder:
continue
lines.append(attr + " " + str(value))
# write comments, assuming (possibly wrongly!) they should
# all appear at the top
for comment in self._comments:
lines.append("Comment " + comment)
# write char metrics
lines.append("StartCharMetrics " + repr(len(self._chars)))
items = [(charnum, (charname, width, box)) for charname, (charnum, width, box) in self._chars.items()]
# write attributes, first the ones we know about, in
# a preferred order
attrs = self._attrs
for attr in preferredAttributeOrder:
if attr in attrs:
value = attrs[attr]
if attr == "FontBBox":
value = "%s %s %s %s" % value
lines.append(attr + " " + str(value))
# then write the attributes we don't know about,
# in alphabetical order
items = sorted(attrs.items())
for attr, value in items:
if attr in preferredAttributeOrder:
continue
lines.append(attr + " " + str(value))
def myKey(a):
"""Custom key function to make sure unencoded chars (-1)
end up at the end of the list after sorting."""
if a[0] == -1:
a = (0xffff,) + a[1:] # 0xffff is an arbitrary large number
return a
items.sort(key=myKey)
# write char metrics
lines.append("StartCharMetrics " + repr(len(self._chars)))
items = [
(charnum, (charname, width, box))
for charname, (charnum, width, box) in self._chars.items()
]
for charnum, (charname, width, (l, b, r, t)) in items:
lines.append("C %d ; WX %d ; N %s ; B %d %d %d %d ;" %
(charnum, width, charname, l, b, r, t))
lines.append("EndCharMetrics")
def myKey(a):
"""Custom key function to make sure unencoded chars (-1)
end up at the end of the list after sorting."""
if a[0] == -1:
a = (0xFFFF,) + a[1:] # 0xffff is an arbitrary large number
return a
# write kerning info
lines.append("StartKernData")
lines.append("StartKernPairs " + repr(len(self._kerning)))
items = sorted(self._kerning.items())
for (leftchar, rightchar), value in items:
lines.append("KPX %s %s %d" % (leftchar, rightchar, value))
lines.append("EndKernPairs")
lines.append("EndKernData")
items.sort(key=myKey)
if self._composites:
composites = sorted(self._composites.items())
lines.append("StartComposites %s" % len(self._composites))
for charname, components in composites:
line = "CC %s %s ;" % (charname, len(components))
for basechar, xoffset, yoffset in components:
line = line + " PCC %s %s %s ;" % (basechar, xoffset, yoffset)
lines.append(line)
lines.append("EndComposites")
for charnum, (charname, width, (l, b, r, t)) in items:
lines.append(
"C %d ; WX %d ; N %s ; B %d %d %d %d ;"
% (charnum, width, charname, l, b, r, t)
)
lines.append("EndCharMetrics")
lines.append("EndFontMetrics")
# write kerning info
lines.append("StartKernData")
lines.append("StartKernPairs " + repr(len(self._kerning)))
items = sorted(self._kerning.items())
for (leftchar, rightchar), value in items:
lines.append("KPX %s %s %d" % (leftchar, rightchar, value))
lines.append("EndKernPairs")
lines.append("EndKernData")
writelines(path, lines, sep)
if self._composites:
composites = sorted(self._composites.items())
lines.append("StartComposites %s" % len(self._composites))
for charname, components in composites:
line = "CC %s %s ;" % (charname, len(components))
for basechar, xoffset, yoffset in components:
line = line + " PCC %s %s %s ;" % (basechar, xoffset, yoffset)
lines.append(line)
lines.append("EndComposites")
def has_kernpair(self, pair):
"""Returns `True` if the given glyph pair (specified as a tuple) exists
in the kerning dictionary."""
return pair in self._kerning
lines.append("EndFontMetrics")
def kernpairs(self):
"""Returns a list of all kern pairs in the kerning dictionary."""
return list(self._kerning.keys())
writelines(path, lines, sep)
def has_char(self, char):
"""Returns `True` if the given glyph exists in the font."""
return char in self._chars
def has_kernpair(self, pair):
"""Returns `True` if the given glyph pair (specified as a tuple) exists
in the kerning dictionary."""
return pair in self._kerning
def chars(self):
"""Returns a list of all glyph names in the font."""
return list(self._chars.keys())
def kernpairs(self):
"""Returns a list of all kern pairs in the kerning dictionary."""
return list(self._kerning.keys())
def comments(self):
"""Returns all comments from the file."""
return self._comments
def has_char(self, char):
"""Returns `True` if the given glyph exists in the font."""
return char in self._chars
def addComment(self, comment):
"""Adds a new comment to the file."""
self._comments.append(comment)
def chars(self):
"""Returns a list of all glyph names in the font."""
return list(self._chars.keys())
def addComposite(self, glyphName, components):
"""Specifies that the glyph `glyphName` is made up of the given components.
The components list should be of the following form::
def comments(self):
"""Returns all comments from the file."""
return self._comments
[
(glyphname, xOffset, yOffset),
...
]
"""
self._composites[glyphName] = components
def addComment(self, comment):
"""Adds a new comment to the file."""
self._comments.append(comment)
def __getattr__(self, attr):
if attr in self._attrs:
return self._attrs[attr]
else:
raise AttributeError(attr)
def addComposite(self, glyphName, components):
"""Specifies that the glyph `glyphName` is made up of the given components.
The components list should be of the following form::
def __setattr__(self, attr, value):
# all attrs *not* starting with "_" are consider to be AFM keywords
if attr[:1] == "_":
self.__dict__[attr] = value
else:
self._attrs[attr] = value
[
(glyphname, xOffset, yOffset),
...
]
def __delattr__(self, attr):
# all attrs *not* starting with "_" are consider to be AFM keywords
if attr[:1] == "_":
try:
del self.__dict__[attr]
except KeyError:
raise AttributeError(attr)
else:
try:
del self._attrs[attr]
except KeyError:
raise AttributeError(attr)
"""
self._composites[glyphName] = components
def __getitem__(self, key):
if isinstance(key, tuple):
# key is a tuple, return the kernpair
return self._kerning[key]
else:
# return the metrics instead
return self._chars[key]
def __getattr__(self, attr):
if attr in self._attrs:
return self._attrs[attr]
else:
raise AttributeError(attr)
def __setitem__(self, key, value):
if isinstance(key, tuple):
# key is a tuple, set kernpair
self._kerning[key] = value
else:
# set char metrics
self._chars[key] = value
def __setattr__(self, attr, value):
# all attrs *not* starting with "_" are consider to be AFM keywords
if attr[:1] == "_":
self.__dict__[attr] = value
else:
self._attrs[attr] = value
def __delitem__(self, key):
if isinstance(key, tuple):
# key is a tuple, del kernpair
del self._kerning[key]
else:
# del char metrics
del self._chars[key]
def __delattr__(self, attr):
# all attrs *not* starting with "_" are consider to be AFM keywords
if attr[:1] == "_":
try:
del self.__dict__[attr]
except KeyError:
raise AttributeError(attr)
else:
try:
del self._attrs[attr]
except KeyError:
raise AttributeError(attr)
def __repr__(self):
if hasattr(self, "FullName"):
return '<AFM object for %s>' % self.FullName
else:
return '<AFM object at %x>' % id(self)
def __getitem__(self, key):
if isinstance(key, tuple):
# key is a tuple, return the kernpair
return self._kerning[key]
else:
# return the metrics instead
return self._chars[key]
def __setitem__(self, key, value):
if isinstance(key, tuple):
# key is a tuple, set kernpair
self._kerning[key] = value
else:
# set char metrics
self._chars[key] = value
def __delitem__(self, key):
if isinstance(key, tuple):
# key is a tuple, del kernpair
del self._kerning[key]
else:
# del char metrics
del self._chars[key]
def __repr__(self):
if hasattr(self, "FullName"):
return "<AFM object for %s>" % self.FullName
else:
return "<AFM object at %x>" % id(self)
def readlines(path):
with open(path, "r", encoding="ascii") as f:
data = f.read()
return data.splitlines()
with open(path, "r", encoding="ascii") as f:
data = f.read()
return data.splitlines()
def writelines(path, lines, sep='\r'):
with open(path, "w", encoding="ascii", newline=sep) as f:
f.write("\n".join(lines) + "\n")
def writelines(path, lines, sep="\r"):
with open(path, "w", encoding="ascii", newline=sep) as f:
f.write("\n".join(lines) + "\n")
if __name__ == "__main__":
import EasyDialogs
path = EasyDialogs.AskFileForOpen()
if path:
afm = AFM(path)
char = 'A'
if afm.has_char(char):
print(afm[char]) # print charnum, width and boundingbox
pair = ('A', 'V')
if afm.has_kernpair(pair):
print(afm[pair]) # print kerning value for pair
print(afm.Version) # various other afm entries have become attributes
print(afm.Weight)
# afm.comments() returns a list of all Comment lines found in the AFM
print(afm.comments())
#print afm.chars()
#print afm.kernpairs()
print(afm)
afm.write(path + ".muck")
import EasyDialogs
path = EasyDialogs.AskFileForOpen()
if path:
afm = AFM(path)
char = "A"
if afm.has_char(char):
print(afm[char]) # print charnum, width and boundingbox
pair = ("A", "V")
if afm.has_kernpair(pair):
print(afm[pair]) # print kerning value for pair
print(afm.Version) # various other afm entries have become attributes
print(afm.Weight)
# afm.comments() returns a list of all Comment lines found in the AFM
print(afm.comments())
# print afm.chars()
# print afm.kernpairs()
print(afm)
afm.write(path + ".muck")

View File

@ -5059,174 +5059,175 @@ _aglfnText = """\
class AGLError(Exception):
pass
pass
LEGACY_AGL2UV = {}
AGL2UV = {}
UV2AGL = {}
def _builddicts():
import re
import re
lines = _aglText.splitlines()
lines = _aglText.splitlines()
parseAGL_RE = re.compile("([A-Za-z0-9]+);((?:[0-9A-F]{4})(?: (?:[0-9A-F]{4}))*)$")
parseAGL_RE = re.compile("([A-Za-z0-9]+);((?:[0-9A-F]{4})(?: (?:[0-9A-F]{4}))*)$")
for line in lines:
if not line or line[:1] == '#':
continue
m = parseAGL_RE.match(line)
if not m:
raise AGLError("syntax error in glyphlist.txt: %s" % repr(line[:20]))
unicodes = m.group(2)
assert len(unicodes) % 5 == 4
unicodes = [int(unicode, 16) for unicode in unicodes.split()]
glyphName = tostr(m.group(1))
LEGACY_AGL2UV[glyphName] = unicodes
for line in lines:
if not line or line[:1] == "#":
continue
m = parseAGL_RE.match(line)
if not m:
raise AGLError("syntax error in glyphlist.txt: %s" % repr(line[:20]))
unicodes = m.group(2)
assert len(unicodes) % 5 == 4
unicodes = [int(unicode, 16) for unicode in unicodes.split()]
glyphName = tostr(m.group(1))
LEGACY_AGL2UV[glyphName] = unicodes
lines = _aglfnText.splitlines()
lines = _aglfnText.splitlines()
parseAGLFN_RE = re.compile("([0-9A-F]{4});([A-Za-z0-9]+);.*?$")
parseAGLFN_RE = re.compile("([0-9A-F]{4});([A-Za-z0-9]+);.*?$")
for line in lines:
if not line or line[:1] == "#":
continue
m = parseAGLFN_RE.match(line)
if not m:
raise AGLError("syntax error in aglfn.txt: %s" % repr(line[:20]))
unicode = m.group(1)
assert len(unicode) == 4
unicode = int(unicode, 16)
glyphName = tostr(m.group(2))
AGL2UV[glyphName] = unicode
UV2AGL[unicode] = glyphName
for line in lines:
if not line or line[:1] == '#':
continue
m = parseAGLFN_RE.match(line)
if not m:
raise AGLError("syntax error in aglfn.txt: %s" % repr(line[:20]))
unicode = m.group(1)
assert len(unicode) == 4
unicode = int(unicode, 16)
glyphName = tostr(m.group(2))
AGL2UV[glyphName] = unicode
UV2AGL[unicode] = glyphName
_builddicts()
def toUnicode(glyph, isZapfDingbats=False):
"""Convert glyph names to Unicode, such as ``'longs_t.oldstyle'`` --> ``u'ſt'``
"""Convert glyph names to Unicode, such as ``'longs_t.oldstyle'`` --> ``u'ſt'``
If ``isZapfDingbats`` is ``True``, the implementation recognizes additional
glyph names (as required by the AGL specification).
"""
# https://github.com/adobe-type-tools/agl-specification#2-the-mapping
#
# 1. Drop all the characters from the glyph name starting with
# the first occurrence of a period (U+002E; FULL STOP), if any.
glyph = glyph.split(".", 1)[0]
If ``isZapfDingbats`` is ``True``, the implementation recognizes additional
glyph names (as required by the AGL specification).
"""
# https://github.com/adobe-type-tools/agl-specification#2-the-mapping
#
# 1. Drop all the characters from the glyph name starting with
# the first occurrence of a period (U+002E; FULL STOP), if any.
glyph = glyph.split(".", 1)[0]
# 2. Split the remaining string into a sequence of components,
# using underscore (U+005F; LOW LINE) as the delimiter.
components = glyph.split("_")
# 2. Split the remaining string into a sequence of components,
# using underscore (U+005F; LOW LINE) as the delimiter.
components = glyph.split("_")
# 3. Map each component to a character string according to the
# procedure below, and concatenate those strings; the result
# is the character string to which the glyph name is mapped.
result = [_glyphComponentToUnicode(c, isZapfDingbats)
for c in components]
return "".join(result)
# 3. Map each component to a character string according to the
# procedure below, and concatenate those strings; the result
# is the character string to which the glyph name is mapped.
result = [_glyphComponentToUnicode(c, isZapfDingbats) for c in components]
return "".join(result)
def _glyphComponentToUnicode(component, isZapfDingbats):
# If the font is Zapf Dingbats (PostScript FontName: ZapfDingbats),
# and the component is in the ITC Zapf Dingbats Glyph List, then
# map it to the corresponding character in that list.
dingbat = _zapfDingbatsToUnicode(component) if isZapfDingbats else None
if dingbat:
return dingbat
# If the font is Zapf Dingbats (PostScript FontName: ZapfDingbats),
# and the component is in the ITC Zapf Dingbats Glyph List, then
# map it to the corresponding character in that list.
dingbat = _zapfDingbatsToUnicode(component) if isZapfDingbats else None
if dingbat:
return dingbat
# Otherwise, if the component is in AGL, then map it
# to the corresponding character in that list.
uchars = LEGACY_AGL2UV.get(component)
if uchars:
return "".join(map(chr, uchars))
# Otherwise, if the component is in AGL, then map it
# to the corresponding character in that list.
uchars = LEGACY_AGL2UV.get(component)
if uchars:
return "".join(map(chr, uchars))
# Otherwise, if the component is of the form "uni" (U+0075,
# U+006E, and U+0069) followed by a sequence of uppercase
# hexadecimal digits (09 and AF, meaning U+0030 through
# U+0039 and U+0041 through U+0046), if the length of that
# sequence is a multiple of four, and if each group of four
# digits represents a value in the ranges 0000 through D7FF
# or E000 through FFFF, then interpret each as a Unicode scalar
# value and map the component to the string made of those
# scalar values. Note that the range and digit-length
# restrictions mean that the "uni" glyph name prefix can be
# used only with UVs in the Basic Multilingual Plane (BMP).
uni = _uniToUnicode(component)
if uni:
return uni
# Otherwise, if the component is of the form "uni" (U+0075,
# U+006E, and U+0069) followed by a sequence of uppercase
# hexadecimal digits (09 and AF, meaning U+0030 through
# U+0039 and U+0041 through U+0046), if the length of that
# sequence is a multiple of four, and if each group of four
# digits represents a value in the ranges 0000 through D7FF
# or E000 through FFFF, then interpret each as a Unicode scalar
# value and map the component to the string made of those
# scalar values. Note that the range and digit-length
# restrictions mean that the "uni" glyph name prefix can be
# used only with UVs in the Basic Multilingual Plane (BMP).
uni = _uniToUnicode(component)
if uni:
return uni
# Otherwise, if the component is of the form "u" (U+0075)
# followed by a sequence of four to six uppercase hexadecimal
# digits (09 and AF, meaning U+0030 through U+0039 and
# U+0041 through U+0046), and those digits represents a value
# in the ranges 0000 through D7FF or E000 through 10FFFF, then
# interpret it as a Unicode scalar value and map the component
# to the string made of this scalar value.
uni = _uToUnicode(component)
if uni:
return uni
# Otherwise, if the component is of the form "u" (U+0075)
# followed by a sequence of four to six uppercase hexadecimal
# digits (09 and AF, meaning U+0030 through U+0039 and
# U+0041 through U+0046), and those digits represents a value
# in the ranges 0000 through D7FF or E000 through 10FFFF, then
# interpret it as a Unicode scalar value and map the component
# to the string made of this scalar value.
uni = _uToUnicode(component)
if uni:
return uni
# Otherwise, map the component to an empty string.
return ''
# Otherwise, map the component to an empty string.
return ""
# https://github.com/adobe-type-tools/agl-aglfn/blob/master/zapfdingbats.txt
_AGL_ZAPF_DINGBATS = (
" ✁✂✄☎✆✝✞✟✠✡☛☞✌✍✎✏✑✒✓✔✕✖✗✘✙✚✛✜✢✣✤✥✦✧★✩✪✫✬✭✮✯✰✱✲✳✴✵✶✷✸✹✺✻✼✽✾✿❀"
"❁❂❃❄❅❆❇❈❉❊❋●❍■❏❑▲▼◆❖ ◗❘❙❚❯❱❲❳❨❩❬❭❪❫❴❵❛❜❝❞❡❢❣❤✐❥❦❧♠♥♦♣ ✉✈✇"
"①②③④⑤⑥⑦⑧⑨⑩❶❷❸❹❺❻❼❽❾❿➀➁➂➃➄➅➆➇➈➉➊➋➌➍➎➏➐➑➒➓➔→➣↔"
"↕➙➛➜➝➞➟➠➡➢➤➥➦➧➨➩➫➭➯➲➳➵➸➺➻➼➽➾➚➪➶➹➘➴➷➬➮➱✃❐❒❮❰")
" ✁✂✄☎✆✝✞✟✠✡☛☞✌✍✎✏✑✒✓✔✕✖✗✘✙✚✛✜✢✣✤✥✦✧★✩✪✫✬✭✮✯✰✱✲✳✴✵✶✷✸✹✺✻✼✽✾✿❀"
"❁❂❃❄❅❆❇❈❉❊❋●❍■❏❑▲▼◆❖ ◗❘❙❚❯❱❲❳❨❩❬❭❪❫❴❵❛❜❝❞❡❢❣❤✐❥❦❧♠♥♦♣ ✉✈✇"
"①②③④⑤⑥⑦⑧⑨⑩❶❷❸❹❺❻❼❽❾❿➀➁➂➃➄➅➆➇➈➉➊➋➌➍➎➏➐➑➒➓➔→➣↔"
"↕➙➛➜➝➞➟➠➡➢➤➥➦➧➨➩➫➭➯➲➳➵➸➺➻➼➽➾➚➪➶➹➘➴➷➬➮➱✃❐❒❮❰"
)
def _zapfDingbatsToUnicode(glyph):
"""Helper for toUnicode()."""
if len(glyph) < 2 or glyph[0] != 'a':
return None
try:
gid = int(glyph[1:])
except ValueError:
return None
if gid < 0 or gid >= len(_AGL_ZAPF_DINGBATS):
return None
uchar = _AGL_ZAPF_DINGBATS[gid]
return uchar if uchar != ' ' else None
"""Helper for toUnicode()."""
if len(glyph) < 2 or glyph[0] != "a":
return None
try:
gid = int(glyph[1:])
except ValueError:
return None
if gid < 0 or gid >= len(_AGL_ZAPF_DINGBATS):
return None
uchar = _AGL_ZAPF_DINGBATS[gid]
return uchar if uchar != " " else None
_re_uni = re.compile("^uni([0-9A-F]+)$")
def _uniToUnicode(component):
"""Helper for toUnicode() to handle "uniABCD" components."""
match = _re_uni.match(component)
if match is None:
return None
digits = match.group(1)
if len(digits) % 4 != 0:
return None
chars = [int(digits[i : i + 4], 16)
for i in range(0, len(digits), 4)]
if any(c >= 0xD800 and c <= 0xDFFF for c in chars):
# The AGL specification explicitly excluded surrogate pairs.
return None
return ''.join([chr(c) for c in chars])
"""Helper for toUnicode() to handle "uniABCD" components."""
match = _re_uni.match(component)
if match is None:
return None
digits = match.group(1)
if len(digits) % 4 != 0:
return None
chars = [int(digits[i : i + 4], 16) for i in range(0, len(digits), 4)]
if any(c >= 0xD800 and c <= 0xDFFF for c in chars):
# The AGL specification explicitly excluded surrogate pairs.
return None
return "".join([chr(c) for c in chars])
_re_u = re.compile("^u([0-9A-F]{4,6})$")
def _uToUnicode(component):
"""Helper for toUnicode() to handle "u1ABCD" components."""
match = _re_u.match(component)
if match is None:
return None
digits = match.group(1)
try:
value = int(digits, 16)
except ValueError:
return None
if ((value >= 0x0000 and value <= 0xD7FF) or
(value >= 0xE000 and value <= 0x10FFFF)):
return chr(value)
return None
"""Helper for toUnicode() to handle "u1ABCD" components."""
match = _re_u.match(component)
if match is None:
return None
digits = match.group(1)
try:
value = int(digits, 16)
except ValueError:
return None
if (value >= 0x0000 and value <= 0xD7FF) or (value >= 0xE000 and value <= 0x10FFFF):
return chr(value)
return None

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -14,170 +14,196 @@ from functools import reduce
class missingdict(dict):
def __init__(self, missing_func):
self.missing_func = missing_func
def __missing__(self, v):
return self.missing_func(v)
def __init__(self, missing_func):
self.missing_func = missing_func
def __missing__(self, v):
return self.missing_func(v)
def cumSum(f, op=add, start=0, decreasing=False):
keys = sorted(f.keys())
minx, maxx = keys[0], keys[-1]
total = reduce(op, f.values(), start)
keys = sorted(f.keys())
minx, maxx = keys[0], keys[-1]
if decreasing:
missing = lambda x: start if x > maxx else total
domain = range(maxx, minx - 1, -1)
else:
missing = lambda x: start if x < minx else total
domain = range(minx, maxx + 1)
total = reduce(op, f.values(), start)
out = missingdict(missing)
if decreasing:
missing = lambda x: start if x > maxx else total
domain = range(maxx, minx - 1, -1)
else:
missing = lambda x: start if x < minx else total
domain = range(minx, maxx + 1)
v = start
for x in domain:
v = op(v, f[x])
out[x] = v
out = missingdict(missing)
v = start
for x in domain:
v = op(v, f[x])
out[x] = v
return out
return out
def byteCost(widths, default, nominal):
if not hasattr(widths, 'items'):
d = defaultdict(int)
for w in widths:
d[w] += 1
widths = d
if not hasattr(widths, "items"):
d = defaultdict(int)
for w in widths:
d[w] += 1
widths = d
cost = 0
for w,freq in widths.items():
if w == default: continue
diff = abs(w - nominal)
if diff <= 107:
cost += freq
elif diff <= 1131:
cost += freq * 2
else:
cost += freq * 5
return cost
cost = 0
for w, freq in widths.items():
if w == default:
continue
diff = abs(w - nominal)
if diff <= 107:
cost += freq
elif diff <= 1131:
cost += freq * 2
else:
cost += freq * 5
return cost
def optimizeWidthsBruteforce(widths):
"""Bruteforce version. Veeeeeeeeeeeeeeeeery slow. Only works for smallests of fonts."""
"""Bruteforce version. Veeeeeeeeeeeeeeeeery slow. Only works for smallests of fonts."""
d = defaultdict(int)
for w in widths:
d[w] += 1
d = defaultdict(int)
for w in widths:
d[w] += 1
# Maximum number of bytes using default can possibly save
maxDefaultAdvantage = 5 * max(d.values())
# Maximum number of bytes using default can possibly save
maxDefaultAdvantage = 5 * max(d.values())
minw, maxw = min(widths), max(widths)
domain = list(range(minw, maxw+1))
minw, maxw = min(widths), max(widths)
domain = list(range(minw, maxw + 1))
bestCostWithoutDefault = min(byteCost(widths, None, nominal) for nominal in domain)
bestCostWithoutDefault = min(byteCost(widths, None, nominal) for nominal in domain)
bestCost = len(widths) * 5 + 1
for nominal in domain:
if byteCost(widths, None, nominal) > bestCost + maxDefaultAdvantage:
continue
for default in domain:
cost = byteCost(widths, default, nominal)
if cost < bestCost:
bestCost = cost
bestDefault = default
bestNominal = nominal
bestCost = len(widths) * 5 + 1
for nominal in domain:
if byteCost(widths, None, nominal) > bestCost + maxDefaultAdvantage:
continue
for default in domain:
cost = byteCost(widths, default, nominal)
if cost < bestCost:
bestCost = cost
bestDefault = default
bestNominal = nominal
return bestDefault, bestNominal
return bestDefault, bestNominal
def optimizeWidths(widths):
"""Given a list of glyph widths, or dictionary mapping glyph width to number of
glyphs having that, returns a tuple of best CFF default and nominal glyph widths.
"""Given a list of glyph widths, or dictionary mapping glyph width to number of
glyphs having that, returns a tuple of best CFF default and nominal glyph widths.
This algorithm is linear in UPEM+numGlyphs."""
This algorithm is linear in UPEM+numGlyphs."""
if not hasattr(widths, 'items'):
d = defaultdict(int)
for w in widths:
d[w] += 1
widths = d
keys = sorted(widths.keys())
minw, maxw = keys[0], keys[-1]
domain = list(range(minw, maxw+1))
if not hasattr(widths, "items"):
d = defaultdict(int)
for w in widths:
d[w] += 1
widths = d
# Cumulative sum/max forward/backward.
cumFrqU = cumSum(widths, op=add)
cumMaxU = cumSum(widths, op=max)
cumFrqD = cumSum(widths, op=add, decreasing=True)
cumMaxD = cumSum(widths, op=max, decreasing=True)
keys = sorted(widths.keys())
minw, maxw = keys[0], keys[-1]
domain = list(range(minw, maxw + 1))
# Cost per nominal choice, without default consideration.
nomnCostU = missingdict(lambda x: cumFrqU[x] + cumFrqU[x-108] + cumFrqU[x-1132]*3)
nomnCostD = missingdict(lambda x: cumFrqD[x] + cumFrqD[x+108] + cumFrqD[x+1132]*3)
nomnCost = missingdict(lambda x: nomnCostU[x] + nomnCostD[x] - widths[x])
# Cumulative sum/max forward/backward.
cumFrqU = cumSum(widths, op=add)
cumMaxU = cumSum(widths, op=max)
cumFrqD = cumSum(widths, op=add, decreasing=True)
cumMaxD = cumSum(widths, op=max, decreasing=True)
# Cost-saving per nominal choice, by best default choice.
dfltCostU = missingdict(lambda x: max(cumMaxU[x], cumMaxU[x-108]*2, cumMaxU[x-1132]*5))
dfltCostD = missingdict(lambda x: max(cumMaxD[x], cumMaxD[x+108]*2, cumMaxD[x+1132]*5))
dfltCost = missingdict(lambda x: max(dfltCostU[x], dfltCostD[x]))
# Cost per nominal choice, without default consideration.
nomnCostU = missingdict(
lambda x: cumFrqU[x] + cumFrqU[x - 108] + cumFrqU[x - 1132] * 3
)
nomnCostD = missingdict(
lambda x: cumFrqD[x] + cumFrqD[x + 108] + cumFrqD[x + 1132] * 3
)
nomnCost = missingdict(lambda x: nomnCostU[x] + nomnCostD[x] - widths[x])
# Combined cost per nominal choice.
bestCost = missingdict(lambda x: nomnCost[x] - dfltCost[x])
# Cost-saving per nominal choice, by best default choice.
dfltCostU = missingdict(
lambda x: max(cumMaxU[x], cumMaxU[x - 108] * 2, cumMaxU[x - 1132] * 5)
)
dfltCostD = missingdict(
lambda x: max(cumMaxD[x], cumMaxD[x + 108] * 2, cumMaxD[x + 1132] * 5)
)
dfltCost = missingdict(lambda x: max(dfltCostU[x], dfltCostD[x]))
# Best nominal.
nominal = min(domain, key=lambda x: bestCost[x])
# Combined cost per nominal choice.
bestCost = missingdict(lambda x: nomnCost[x] - dfltCost[x])
# Work back the best default.
bestC = bestCost[nominal]
dfltC = nomnCost[nominal] - bestCost[nominal]
ends = []
if dfltC == dfltCostU[nominal]:
starts = [nominal, nominal-108, nominal-1132]
for start in starts:
while cumMaxU[start] and cumMaxU[start] == cumMaxU[start-1]:
start -= 1
ends.append(start)
else:
starts = [nominal, nominal+108, nominal+1132]
for start in starts:
while cumMaxD[start] and cumMaxD[start] == cumMaxD[start+1]:
start += 1
ends.append(start)
default = min(ends, key=lambda default: byteCost(widths, default, nominal))
# Best nominal.
nominal = min(domain, key=lambda x: bestCost[x])
# Work back the best default.
bestC = bestCost[nominal]
dfltC = nomnCost[nominal] - bestCost[nominal]
ends = []
if dfltC == dfltCostU[nominal]:
starts = [nominal, nominal - 108, nominal - 1132]
for start in starts:
while cumMaxU[start] and cumMaxU[start] == cumMaxU[start - 1]:
start -= 1
ends.append(start)
else:
starts = [nominal, nominal + 108, nominal + 1132]
for start in starts:
while cumMaxD[start] and cumMaxD[start] == cumMaxD[start + 1]:
start += 1
ends.append(start)
default = min(ends, key=lambda default: byteCost(widths, default, nominal))
return default, nominal
return default, nominal
def main(args=None):
"""Calculate optimum defaultWidthX/nominalWidthX values"""
"""Calculate optimum defaultWidthX/nominalWidthX values"""
import argparse
parser = argparse.ArgumentParser(
"fonttools cffLib.width",
description=main.__doc__,
)
parser.add_argument('inputs', metavar='FILE', type=str, nargs='+',
help="Input TTF files")
parser.add_argument('-b', '--brute-force', dest="brute", action="store_true",
help="Use brute-force approach (VERY slow)")
import argparse
args = parser.parse_args(args)
parser = argparse.ArgumentParser(
"fonttools cffLib.width",
description=main.__doc__,
)
parser.add_argument(
"inputs", metavar="FILE", type=str, nargs="+", help="Input TTF files"
)
parser.add_argument(
"-b",
"--brute-force",
dest="brute",
action="store_true",
help="Use brute-force approach (VERY slow)",
)
for fontfile in args.inputs:
font = TTFont(fontfile)
hmtx = font['hmtx']
widths = [m[0] for m in hmtx.metrics.values()]
if args.brute:
default, nominal = optimizeWidthsBruteforce(widths)
else:
default, nominal = optimizeWidths(widths)
print("glyphs=%d default=%d nominal=%d byteCost=%d" % (len(widths), default, nominal, byteCost(widths, default, nominal)))
args = parser.parse_args(args)
if __name__ == '__main__':
import sys
if len(sys.argv) == 1:
import doctest
sys.exit(doctest.testmod().failed)
main()
for fontfile in args.inputs:
font = TTFont(fontfile)
hmtx = font["hmtx"]
widths = [m[0] for m in hmtx.metrics.values()]
if args.brute:
default, nominal = optimizeWidthsBruteforce(widths)
else:
default, nominal = optimizeWidths(widths)
print(
"glyphs=%d default=%d nominal=%d byteCost=%d"
% (len(widths), default, nominal, byteCost(widths, default, nominal))
)
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
import doctest
sys.exit(doctest.testmod().failed)
main()

View File

@ -1,3 +1,2 @@
class ColorLibError(Exception):
pass

View File

@ -67,9 +67,7 @@ def _split_format(cls, source):
assert isinstance(
fmt, collections.abc.Hashable
), f"{cls} Format is not hashable: {fmt!r}"
assert (
fmt in cls.convertersByName
), f"{cls} invalid Format: {fmt!r}"
assert fmt in cls.convertersByName, f"{cls} invalid Format: {fmt!r}"
return fmt, remainder

View File

@ -6,44 +6,52 @@ import timeit
MAX_ERR = 5
def generate_curve():
return [
tuple(float(random.randint(0, 2048)) for coord in range(2))
for point in range(4)]
for point in range(4)
]
def setup_curve_to_quadratic():
return generate_curve(), MAX_ERR
def setup_curves_to_quadratic():
num_curves = 3
return (
[generate_curve() for curve in range(num_curves)],
[MAX_ERR] * num_curves)
return ([generate_curve() for curve in range(num_curves)], [MAX_ERR] * num_curves)
def run_benchmark(
benchmark_module, module, function, setup_suffix='', repeat=5, number=1000):
setup_func = 'setup_' + function
benchmark_module, module, function, setup_suffix="", repeat=5, number=1000
):
setup_func = "setup_" + function
if setup_suffix:
print('%s with %s:' % (function, setup_suffix), end='')
setup_func += '_' + setup_suffix
print("%s with %s:" % (function, setup_suffix), end="")
setup_func += "_" + setup_suffix
else:
print('%s:' % function, end='')
print("%s:" % function, end="")
def wrapper(function, setup_func):
function = globals()[function]
setup_func = globals()[setup_func]
def wrapped():
return function(*setup_func())
return wrapped
results = timeit.repeat(wrapper(function, setup_func), repeat=repeat, number=number)
print('\t%5.1fus' % (min(results) * 1000000. / number))
print("\t%5.1fus" % (min(results) * 1000000.0 / number))
def main():
"""Benchmark the cu2qu algorithm performance."""
run_benchmark('cu2qu.benchmark', 'cu2qu', 'curve_to_quadratic')
run_benchmark('cu2qu.benchmark', 'cu2qu', 'curves_to_quadratic')
run_benchmark("cu2qu.benchmark", "cu2qu", "curve_to_quadratic")
run_benchmark("cu2qu.benchmark", "cu2qu", "curves_to_quadratic")
if __name__ == '__main__':
if __name__ == "__main__":
random.seed(1)
main()

View File

@ -37,7 +37,7 @@ def open_ufo(path):
def _font_to_quadratic(input_path, output_path=None, **kwargs):
ufo = open_ufo(input_path)
logger.info('Converting curves for %s', input_path)
logger.info("Converting curves for %s", input_path)
if font_to_quadratic(ufo, **kwargs):
logger.info("Saving %s", output_path)
if output_path:
@ -67,13 +67,13 @@ def _copytree(input_path, output_path):
def main(args=None):
"""Convert a UFO font from cubic to quadratic curves"""
parser = argparse.ArgumentParser(prog="cu2qu")
parser.add_argument(
"--version", action="version", version=fontTools.__version__)
parser.add_argument("--version", action="version", version=fontTools.__version__)
parser.add_argument(
"infiles",
nargs="+",
metavar="INPUT",
help="one or more input UFO source file(s).")
help="one or more input UFO source file(s).",
)
parser.add_argument("-v", "--verbose", action="count", default=0)
parser.add_argument(
"-e",
@ -81,19 +81,21 @@ def main(args=None):
type=float,
metavar="ERROR",
default=None,
help="maxiumum approximation error measured in EM (default: 0.001)")
help="maxiumum approximation error measured in EM (default: 0.001)",
)
parser.add_argument(
"--keep-direction",
dest="reverse_direction",
action="store_false",
help="do not reverse the contour direction")
help="do not reverse the contour direction",
)
mode_parser = parser.add_mutually_exclusive_group()
mode_parser.add_argument(
"-i",
"--interpolatable",
action="store_true",
help="whether curve conversion should keep interpolation compatibility"
help="whether curve conversion should keep interpolation compatibility",
)
mode_parser.add_argument(
"-j",
@ -103,7 +105,8 @@ def main(args=None):
default=1,
const=_cpu_count(),
metavar="N",
help="Convert using N multiple processes (default: %(default)s)")
help="Convert using N multiple processes (default: %(default)s)",
)
output_parser = parser.add_mutually_exclusive_group()
output_parser.add_argument(
@ -111,14 +114,18 @@ def main(args=None):
"--output-file",
default=None,
metavar="OUTPUT",
help=("output filename for the converted UFO. By default fonts are "
"modified in place. This only works with a single input."))
help=(
"output filename for the converted UFO. By default fonts are "
"modified in place. This only works with a single input."
),
)
output_parser.add_argument(
"-d",
"--output-dir",
default=None,
metavar="DIRECTORY",
help="output directory where to save converted UFOs")
help="output directory where to save converted UFOs",
)
options = parser.parse_args(args)
@ -143,8 +150,7 @@ def main(args=None):
elif not os.path.isdir(output_dir):
parser.error("'%s' is not a directory" % output_dir)
output_paths = [
os.path.join(output_dir, os.path.basename(p))
for p in options.infiles
os.path.join(output_dir, os.path.basename(p)) for p in options.infiles
]
elif options.output_file:
output_paths = [options.output_file]
@ -152,12 +158,14 @@ def main(args=None):
# save in-place
output_paths = [None] * len(options.infiles)
kwargs = dict(dump_stats=options.verbose > 0,
max_err_em=options.conversion_error,
reverse_direction=options.reverse_direction)
kwargs = dict(
dump_stats=options.verbose > 0,
max_err_em=options.conversion_error,
reverse_direction=options.reverse_direction,
)
if options.interpolatable:
logger.info('Converting curves compatibly')
logger.info("Converting curves compatibly")
ufos = [open_ufo(infile) for infile in options.infiles]
if fonts_to_quadratic(ufos, **kwargs):
for ufo, output_path in zip(ufos, output_paths):
@ -171,11 +179,10 @@ def main(args=None):
if output_path:
_copytree(input_path, output_path)
else:
jobs = min(len(options.infiles),
options.jobs) if options.jobs > 1 else 1
jobs = min(len(options.infiles), options.jobs) if options.jobs > 1 else 1
if jobs > 1:
func = partial(_font_to_quadratic, **kwargs)
logger.info('Running %d parallel processes', jobs)
logger.info("Running %d parallel processes", jobs)
with closing(mp.Pool(jobs)) as pool:
pool.starmap(func, zip(options.infiles, output_paths))
else:

View File

@ -1,5 +1,5 @@
#cython: language_level=3
#distutils: define_macros=CYTHON_TRACE_NOGIL=1
# cython: language_level=3
# distutils: define_macros=CYTHON_TRACE_NOGIL=1
# Copyright 2015 Google Inc. All Rights Reserved.
#
@ -26,7 +26,7 @@ import math
from .errors import Error as Cu2QuError, ApproxNotFoundError
__all__ = ['curve_to_quadratic', 'curves_to_quadratic']
__all__ = ["curve_to_quadratic", "curves_to_quadratic"]
MAX_N = 100
@ -61,7 +61,9 @@ def dot(v1, v2):
@cython.cfunc
@cython.inline
@cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex)
@cython.locals(_1=cython.complex, _2=cython.complex, _3=cython.complex, _4=cython.complex)
@cython.locals(
_1=cython.complex, _2=cython.complex, _3=cython.complex, _4=cython.complex
)
def calc_cubic_points(a, b, c, d):
_1 = d
_2 = (c / 3.0) + d
@ -72,7 +74,9 @@ def calc_cubic_points(a, b, c, d):
@cython.cfunc
@cython.inline
@cython.locals(p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex)
@cython.locals(
p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex
)
@cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex)
def calc_cubic_parameters(p0, p1, p2, p3):
c = (p1 - p0) * 3.0
@ -83,7 +87,9 @@ def calc_cubic_parameters(p0, p1, p2, p3):
@cython.cfunc
@cython.locals(p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex)
@cython.locals(
p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex
)
def split_cubic_into_n_iter(p0, p1, p2, p3, n):
"""Split a cubic Bezier into n equal parts.
@ -112,13 +118,23 @@ def split_cubic_into_n_iter(p0, p1, p2, p3, n):
a, b = split_cubic_into_two(p0, p1, p2, p3)
return iter(split_cubic_into_three(*a) + split_cubic_into_three(*b))
return _split_cubic_into_n_gen(p0,p1,p2,p3,n)
return _split_cubic_into_n_gen(p0, p1, p2, p3, n)
@cython.locals(p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex, n=cython.int)
@cython.locals(
p0=cython.complex,
p1=cython.complex,
p2=cython.complex,
p3=cython.complex,
n=cython.int,
)
@cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex)
@cython.locals(dt=cython.double, delta_2=cython.double, delta_3=cython.double, i=cython.int)
@cython.locals(a1=cython.complex, b1=cython.complex, c1=cython.complex, d1=cython.complex)
@cython.locals(
dt=cython.double, delta_2=cython.double, delta_3=cython.double, i=cython.int
)
@cython.locals(
a1=cython.complex, b1=cython.complex, c1=cython.complex, d1=cython.complex
)
def _split_cubic_into_n_gen(p0, p1, p2, p3, n):
a, b, c, d = calc_cubic_parameters(p0, p1, p2, p3)
dt = 1 / n
@ -129,13 +145,15 @@ def _split_cubic_into_n_gen(p0, p1, p2, p3, n):
t1_2 = t1 * t1
# calc new a, b, c and d
a1 = a * delta_3
b1 = (3*a*t1 + b) * delta_2
c1 = (2*b*t1 + c + 3*a*t1_2) * dt
d1 = a*t1*t1_2 + b*t1_2 + c*t1 + d
b1 = (3 * a * t1 + b) * delta_2
c1 = (2 * b * t1 + c + 3 * a * t1_2) * dt
d1 = a * t1 * t1_2 + b * t1_2 + c * t1 + d
yield calc_cubic_points(a1, b1, c1, d1)
@cython.locals(p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex)
@cython.locals(
p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex
)
@cython.locals(mid=cython.complex, deriv3=cython.complex)
def split_cubic_into_two(p0, p1, p2, p3):
"""Split a cubic Bezier into two equal parts.
@ -152,15 +170,28 @@ def split_cubic_into_two(p0, p1, p2, p3):
tuple: Two cubic Beziers (each expressed as a tuple of four complex
values).
"""
mid = (p0 + 3 * (p1 + p2) + p3) * .125
deriv3 = (p3 + p2 - p1 - p0) * .125
return ((p0, (p0 + p1) * .5, mid - deriv3, mid),
(mid, mid + deriv3, (p2 + p3) * .5, p3))
mid = (p0 + 3 * (p1 + p2) + p3) * 0.125
deriv3 = (p3 + p2 - p1 - p0) * 0.125
return (
(p0, (p0 + p1) * 0.5, mid - deriv3, mid),
(mid, mid + deriv3, (p2 + p3) * 0.5, p3),
)
@cython.locals(p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex, _27=cython.double)
@cython.locals(mid1=cython.complex, deriv1=cython.complex, mid2=cython.complex, deriv2=cython.complex)
def split_cubic_into_three(p0, p1, p2, p3, _27=1/27):
@cython.locals(
p0=cython.complex,
p1=cython.complex,
p2=cython.complex,
p3=cython.complex,
_27=cython.double,
)
@cython.locals(
mid1=cython.complex,
deriv1=cython.complex,
mid2=cython.complex,
deriv2=cython.complex,
)
def split_cubic_into_three(p0, p1, p2, p3, _27=1 / 27):
"""Split a cubic Bezier into three equal parts.
Splits the curve into three equal parts at t = 1/3 and t = 2/3
@ -177,17 +208,25 @@ def split_cubic_into_three(p0, p1, p2, p3, _27=1/27):
"""
# we define 1/27 as a keyword argument so that it will be evaluated only
# once but still in the scope of this function
mid1 = (8*p0 + 12*p1 + 6*p2 + p3) * _27
deriv1 = (p3 + 3*p2 - 4*p0) * _27
mid2 = (p0 + 6*p1 + 12*p2 + 8*p3) * _27
deriv2 = (4*p3 - 3*p1 - p0) * _27
return ((p0, (2*p0 + p1) / 3.0, mid1 - deriv1, mid1),
(mid1, mid1 + deriv1, mid2 - deriv2, mid2),
(mid2, mid2 + deriv2, (p2 + 2*p3) / 3.0, p3))
mid1 = (8 * p0 + 12 * p1 + 6 * p2 + p3) * _27
deriv1 = (p3 + 3 * p2 - 4 * p0) * _27
mid2 = (p0 + 6 * p1 + 12 * p2 + 8 * p3) * _27
deriv2 = (4 * p3 - 3 * p1 - p0) * _27
return (
(p0, (2 * p0 + p1) / 3.0, mid1 - deriv1, mid1),
(mid1, mid1 + deriv1, mid2 - deriv2, mid2),
(mid2, mid2 + deriv2, (p2 + 2 * p3) / 3.0, p3),
)
@cython.returns(cython.complex)
@cython.locals(t=cython.double, p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex)
@cython.locals(
t=cython.double,
p0=cython.complex,
p1=cython.complex,
p2=cython.complex,
p3=cython.complex,
)
@cython.locals(_p1=cython.complex, _p2=cython.complex)
def cubic_approx_control(t, p0, p1, p2, p3):
"""Approximate a cubic Bezier using a quadratic one.
@ -235,7 +274,13 @@ def calc_intersect(a, b, c, d):
@cython.cfunc
@cython.returns(cython.int)
@cython.locals(tolerance=cython.double, p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex)
@cython.locals(
tolerance=cython.double,
p0=cython.complex,
p1=cython.complex,
p2=cython.complex,
p3=cython.complex,
)
@cython.locals(mid=cython.complex, deriv3=cython.complex)
def cubic_farthest_fit_inside(p0, p1, p2, p3, tolerance):
"""Check if a cubic Bezier lies within a given distance of the origin.
@ -260,18 +305,25 @@ def cubic_farthest_fit_inside(p0, p1, p2, p3, tolerance):
return True
# Split.
mid = (p0 + 3 * (p1 + p2) + p3) * .125
mid = (p0 + 3 * (p1 + p2) + p3) * 0.125
if abs(mid) > tolerance:
return False
deriv3 = (p3 + p2 - p1 - p0) * .125
return (cubic_farthest_fit_inside(p0, (p0+p1)*.5, mid-deriv3, mid, tolerance) and
cubic_farthest_fit_inside(mid, mid+deriv3, (p2+p3)*.5, p3, tolerance))
deriv3 = (p3 + p2 - p1 - p0) * 0.125
return cubic_farthest_fit_inside(
p0, (p0 + p1) * 0.5, mid - deriv3, mid, tolerance
) and cubic_farthest_fit_inside(mid, mid + deriv3, (p2 + p3) * 0.5, p3, tolerance)
@cython.cfunc
@cython.locals(tolerance=cython.double, _2_3=cython.double)
@cython.locals(q1=cython.complex, c0=cython.complex, c1=cython.complex, c2=cython.complex, c3=cython.complex)
def cubic_approx_quadratic(cubic, tolerance, _2_3=2/3):
@cython.locals(
q1=cython.complex,
c0=cython.complex,
c1=cython.complex,
c2=cython.complex,
c3=cython.complex,
)
def cubic_approx_quadratic(cubic, tolerance, _2_3=2 / 3):
"""Approximate a cubic Bezier with a single quadratic within a given tolerance.
Args:
@ -294,10 +346,7 @@ def cubic_approx_quadratic(cubic, tolerance, _2_3=2/3):
c3 = cubic[3]
c1 = c0 + (q1 - c0) * _2_3
c2 = c3 + (q1 - c3) * _2_3
if not cubic_farthest_fit_inside(0,
c1 - cubic[1],
c2 - cubic[2],
0, tolerance):
if not cubic_farthest_fit_inside(0, c1 - cubic[1], c2 - cubic[2], 0, tolerance):
return None
return c0, q1, c3
@ -305,9 +354,17 @@ def cubic_approx_quadratic(cubic, tolerance, _2_3=2/3):
@cython.cfunc
@cython.locals(n=cython.int, tolerance=cython.double, _2_3=cython.double)
@cython.locals(i=cython.int)
@cython.locals(c0=cython.complex, c1=cython.complex, c2=cython.complex, c3=cython.complex)
@cython.locals(q0=cython.complex, q1=cython.complex, next_q1=cython.complex, q2=cython.complex, d1=cython.complex)
def cubic_approx_spline(cubic, n, tolerance, _2_3=2/3):
@cython.locals(
c0=cython.complex, c1=cython.complex, c2=cython.complex, c3=cython.complex
)
@cython.locals(
q0=cython.complex,
q1=cython.complex,
next_q1=cython.complex,
q2=cython.complex,
d1=cython.complex,
)
def cubic_approx_spline(cubic, n, tolerance, _2_3=2 / 3):
"""Approximate a cubic Bezier curve with a spline of n quadratics.
Args:
@ -335,7 +392,7 @@ def cubic_approx_spline(cubic, n, tolerance, _2_3=2/3):
q2 = cubic[0]
d1 = 0j
spline = [cubic[0], next_q1]
for i in range(1, n+1):
for i in range(1, n + 1):
# Current cubic to convert
c0, c1, c2, c3 = next_cubic
@ -345,9 +402,9 @@ def cubic_approx_spline(cubic, n, tolerance, _2_3=2/3):
q1 = next_q1
if i < n:
next_cubic = next(cubics)
next_q1 = cubic_approx_control(i / (n-1), *next_cubic)
next_q1 = cubic_approx_control(i / (n - 1), *next_cubic)
spline.append(next_q1)
q2 = (q1 + next_q1) * .5
q2 = (q1 + next_q1) * 0.5
else:
q2 = c3
@ -355,12 +412,9 @@ def cubic_approx_spline(cubic, n, tolerance, _2_3=2/3):
d0 = d1
d1 = q2 - c3
if (abs(d1) > tolerance or
not cubic_farthest_fit_inside(d0,
q0 + (q1 - q0) * _2_3 - c1,
q2 + (q1 - q2) * _2_3 - c2,
d1,
tolerance)):
if abs(d1) > tolerance or not cubic_farthest_fit_inside(
d0, q0 + (q1 - q0) * _2_3 - c1, q2 + (q1 - q2) * _2_3 - c2, d1, tolerance
):
return None
spline.append(cubic[3])
@ -394,7 +448,6 @@ def curve_to_quadratic(curve, max_err):
raise ApproxNotFoundError(curve)
@cython.locals(l=cython.int, last_i=cython.int, i=cython.int)
def curves_to_quadratic(curves, max_errors):
"""Return quadratic Bezier splines approximating the input cubic Beziers.
@ -448,5 +501,3 @@ def curves_to_quadratic(curves, max_errors):
return [[(s.real, s.imag) for s in spline] for spline in splines]
raise ApproxNotFoundError(curves)

View File

@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
class Error(Exception):
"""Base Cu2Qu exception class for all other errors."""

View File

@ -30,12 +30,15 @@ from fontTools.pens.reverseContourPen import ReverseContourPen
from . import curves_to_quadratic
from .errors import (
UnequalZipLengthsError, IncompatibleSegmentNumberError,
IncompatibleSegmentTypesError, IncompatibleGlyphsError,
IncompatibleFontsError)
UnequalZipLengthsError,
IncompatibleSegmentNumberError,
IncompatibleSegmentTypesError,
IncompatibleGlyphsError,
IncompatibleFontsError,
)
__all__ = ['fonts_to_quadratic', 'font_to_quadratic']
__all__ = ["fonts_to_quadratic", "font_to_quadratic"]
# The default approximation error below is a relative value (1/1000 of the EM square).
# Later on, we convert it to absolute font units by multiplying it by a font's UPEM
@ -47,6 +50,8 @@ logger = logging.getLogger(__name__)
_zip = zip
def zip(*args):
"""Ensure each argument to zip has the same length. Also make sure a list is
returned for python 2/3 compatibility.
@ -69,27 +74,27 @@ class GetSegmentsPen(AbstractPen):
self.segments = []
def _add_segment(self, tag, *args):
if tag in ['move', 'line', 'qcurve', 'curve']:
if tag in ["move", "line", "qcurve", "curve"]:
self._last_pt = args[-1]
self.segments.append((tag, args))
def moveTo(self, pt):
self._add_segment('move', pt)
self._add_segment("move", pt)
def lineTo(self, pt):
self._add_segment('line', pt)
self._add_segment("line", pt)
def qCurveTo(self, *points):
self._add_segment('qcurve', self._last_pt, *points)
self._add_segment("qcurve", self._last_pt, *points)
def curveTo(self, *points):
self._add_segment('curve', self._last_pt, *points)
self._add_segment("curve", self._last_pt, *points)
def closePath(self):
self._add_segment('close')
self._add_segment("close")
def endPath(self):
self._add_segment('end')
self._add_segment("end")
def addComponent(self, glyphName, transformation):
pass
@ -122,17 +127,17 @@ def _set_segments(glyph, segments, reverse_direction):
if reverse_direction:
pen = ReverseContourPen(pen)
for tag, args in segments:
if tag == 'move':
if tag == "move":
pen.moveTo(*args)
elif tag == 'line':
elif tag == "line":
pen.lineTo(*args)
elif tag == 'curve':
elif tag == "curve":
pen.curveTo(*args[1:])
elif tag == 'qcurve':
elif tag == "qcurve":
pen.qCurveTo(*args[1:])
elif tag == 'close':
elif tag == "close":
pen.closePath()
elif tag == 'end':
elif tag == "end":
pen.endPath()
else:
raise AssertionError('Unhandled segment type "%s"' % tag)
@ -141,16 +146,16 @@ def _set_segments(glyph, segments, reverse_direction):
def _segments_to_quadratic(segments, max_err, stats):
"""Return quadratic approximations of cubic segments."""
assert all(s[0] == 'curve' for s in segments), 'Non-cubic given to convert'
assert all(s[0] == "curve" for s in segments), "Non-cubic given to convert"
new_points = curves_to_quadratic([s[1] for s in segments], max_err)
n = len(new_points[0])
assert all(len(s) == n for s in new_points[1:]), 'Converted incompatibly'
assert all(len(s) == n for s in new_points[1:]), "Converted incompatibly"
spline_length = str(n - 2)
stats[spline_length] = stats.get(spline_length, 0) + 1
return [('qcurve', p) for p in new_points]
return [("qcurve", p) for p in new_points]
def _glyphs_to_quadratic(glyphs, max_err, reverse_direction, stats):
@ -176,7 +181,7 @@ def _glyphs_to_quadratic(glyphs, max_err, reverse_direction, stats):
tag = segments[0][0]
if not all(s[0] == tag for s in segments[1:]):
incompatible[i] = [s[0] for s in segments]
elif tag == 'curve':
elif tag == "curve":
segments = _segments_to_quadratic(segments, max_err, stats)
glyphs_modified = True
new_segments_by_location.append(segments)
@ -191,8 +196,7 @@ def _glyphs_to_quadratic(glyphs, max_err, reverse_direction, stats):
return glyphs_modified
def glyphs_to_quadratic(
glyphs, max_err=None, reverse_direction=False, stats=None):
def glyphs_to_quadratic(glyphs, max_err=None, reverse_direction=False, stats=None):
"""Convert the curves of a set of compatible of glyphs to quadratic.
All curves will be converted to quadratic at once, ensuring interpolation
@ -220,8 +224,14 @@ def glyphs_to_quadratic(
def fonts_to_quadratic(
fonts, max_err_em=None, max_err=None, reverse_direction=False,
stats=None, dump_stats=False, remember_curve_type=True):
fonts,
max_err_em=None,
max_err=None,
reverse_direction=False,
stats=None,
dump_stats=False,
remember_curve_type=True,
):
"""Convert the curves of a collection of fonts to quadratic.
All curves will be converted to quadratic at once, ensuring interpolation
@ -258,7 +268,7 @@ def fonts_to_quadratic(
stats = {}
if max_err_em and max_err:
raise TypeError('Only one of max_err and max_err_em can be specified.')
raise TypeError("Only one of max_err and max_err_em can be specified.")
if not (max_err_em or max_err):
max_err_em = DEFAULT_MAX_ERR
@ -270,8 +280,7 @@ def fonts_to_quadratic(
if isinstance(max_err_em, (list, tuple)):
assert len(fonts) == len(max_err_em)
max_errors = [f.info.unitsPerEm * e
for f, e in zip(fonts, max_err_em)]
max_errors = [f.info.unitsPerEm * e for f, e in zip(fonts, max_err_em)]
elif max_err_em:
max_errors = [f.info.unitsPerEm * max_err_em for f in fonts]
@ -286,7 +295,8 @@ def fonts_to_quadratic(
cur_max_errors.append(error)
try:
modified |= _glyphs_to_quadratic(
glyphs, cur_max_errors, reverse_direction, stats)
glyphs, cur_max_errors, reverse_direction, stats
)
except IncompatibleGlyphsError as exc:
logger.error(exc)
glyph_errors[name] = exc
@ -296,8 +306,10 @@ def fonts_to_quadratic(
if modified and dump_stats:
spline_lengths = sorted(stats.keys())
logger.info('New spline lengths: %s' % (', '.join(
'%s: %d' % (l, stats[l]) for l in spline_lengths)))
logger.info(
"New spline lengths: %s"
% (", ".join("%s: %d" % (l, stats[l]) for l in spline_lengths))
)
if remember_curve_type:
for font in fonts:

File diff suppressed because it is too large Load Diff

View File

@ -1,36 +1,258 @@
MacRoman = [
'NUL', 'Eth', 'eth', 'Lslash', 'lslash', 'Scaron', 'scaron', 'Yacute',
'yacute', 'HT', 'LF', 'Thorn', 'thorn', 'CR', 'Zcaron', 'zcaron', 'DLE', 'DC1',
'DC2', 'DC3', 'DC4', 'onehalf', 'onequarter', 'onesuperior', 'threequarters',
'threesuperior', 'twosuperior', 'brokenbar', 'minus', 'multiply', 'RS', 'US',
'space', 'exclam', 'quotedbl', 'numbersign', 'dollar', 'percent', 'ampersand',
'quotesingle', 'parenleft', 'parenright', 'asterisk', 'plus', 'comma',
'hyphen', 'period', 'slash', 'zero', 'one', 'two', 'three', 'four', 'five',
'six', 'seven', 'eight', 'nine', 'colon', 'semicolon', 'less', 'equal',
'greater', 'question', 'at', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J',
'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
'bracketleft', 'backslash', 'bracketright', 'asciicircum', 'underscore',
'grave', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n',
'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'braceleft', 'bar',
'braceright', 'asciitilde', 'DEL', 'Adieresis', 'Aring', 'Ccedilla', 'Eacute',
'Ntilde', 'Odieresis', 'Udieresis', 'aacute', 'agrave', 'acircumflex',
'adieresis', 'atilde', 'aring', 'ccedilla', 'eacute', 'egrave', 'ecircumflex',
'edieresis', 'iacute', 'igrave', 'icircumflex', 'idieresis', 'ntilde',
'oacute', 'ograve', 'ocircumflex', 'odieresis', 'otilde', 'uacute', 'ugrave',
'ucircumflex', 'udieresis', 'dagger', 'degree', 'cent', 'sterling', 'section',
'bullet', 'paragraph', 'germandbls', 'registered', 'copyright', 'trademark',
'acute', 'dieresis', 'notequal', 'AE', 'Oslash', 'infinity', 'plusminus',
'lessequal', 'greaterequal', 'yen', 'mu', 'partialdiff', 'summation',
'product', 'pi', 'integral', 'ordfeminine', 'ordmasculine', 'Omega', 'ae',
'oslash', 'questiondown', 'exclamdown', 'logicalnot', 'radical', 'florin',
'approxequal', 'Delta', 'guillemotleft', 'guillemotright', 'ellipsis',
'nbspace', 'Agrave', 'Atilde', 'Otilde', 'OE', 'oe', 'endash', 'emdash',
'quotedblleft', 'quotedblright', 'quoteleft', 'quoteright', 'divide', 'lozenge',
'ydieresis', 'Ydieresis', 'fraction', 'currency', 'guilsinglleft',
'guilsinglright', 'fi', 'fl', 'daggerdbl', 'periodcentered', 'quotesinglbase',
'quotedblbase', 'perthousand', 'Acircumflex', 'Ecircumflex', 'Aacute',
'Edieresis', 'Egrave', 'Iacute', 'Icircumflex', 'Idieresis', 'Igrave', 'Oacute',
'Ocircumflex', 'apple', 'Ograve', 'Uacute', 'Ucircumflex', 'Ugrave', 'dotlessi',
'circumflex', 'tilde', 'macron', 'breve', 'dotaccent', 'ring', 'cedilla',
'hungarumlaut', 'ogonek', 'caron'
]
"NUL",
"Eth",
"eth",
"Lslash",
"lslash",
"Scaron",
"scaron",
"Yacute",
"yacute",
"HT",
"LF",
"Thorn",
"thorn",
"CR",
"Zcaron",
"zcaron",
"DLE",
"DC1",
"DC2",
"DC3",
"DC4",
"onehalf",
"onequarter",
"onesuperior",
"threequarters",
"threesuperior",
"twosuperior",
"brokenbar",
"minus",
"multiply",
"RS",
"US",
"space",
"exclam",
"quotedbl",
"numbersign",
"dollar",
"percent",
"ampersand",
"quotesingle",
"parenleft",
"parenright",
"asterisk",
"plus",
"comma",
"hyphen",
"period",
"slash",
"zero",
"one",
"two",
"three",
"four",
"five",
"six",
"seven",
"eight",
"nine",
"colon",
"semicolon",
"less",
"equal",
"greater",
"question",
"at",
"A",
"B",
"C",
"D",
"E",
"F",
"G",
"H",
"I",
"J",
"K",
"L",
"M",
"N",
"O",
"P",
"Q",
"R",
"S",
"T",
"U",
"V",
"W",
"X",
"Y",
"Z",
"bracketleft",
"backslash",
"bracketright",
"asciicircum",
"underscore",
"grave",
"a",
"b",
"c",
"d",
"e",
"f",
"g",
"h",
"i",
"j",
"k",
"l",
"m",
"n",
"o",
"p",
"q",
"r",
"s",
"t",
"u",
"v",
"w",
"x",
"y",
"z",
"braceleft",
"bar",
"braceright",
"asciitilde",
"DEL",
"Adieresis",
"Aring",
"Ccedilla",
"Eacute",
"Ntilde",
"Odieresis",
"Udieresis",
"aacute",
"agrave",
"acircumflex",
"adieresis",
"atilde",
"aring",
"ccedilla",
"eacute",
"egrave",
"ecircumflex",
"edieresis",
"iacute",
"igrave",
"icircumflex",
"idieresis",
"ntilde",
"oacute",
"ograve",
"ocircumflex",
"odieresis",
"otilde",
"uacute",
"ugrave",
"ucircumflex",
"udieresis",
"dagger",
"degree",
"cent",
"sterling",
"section",
"bullet",
"paragraph",
"germandbls",
"registered",
"copyright",
"trademark",
"acute",
"dieresis",
"notequal",
"AE",
"Oslash",
"infinity",
"plusminus",
"lessequal",
"greaterequal",
"yen",
"mu",
"partialdiff",
"summation",
"product",
"pi",
"integral",
"ordfeminine",
"ordmasculine",
"Omega",
"ae",
"oslash",
"questiondown",
"exclamdown",
"logicalnot",
"radical",
"florin",
"approxequal",
"Delta",
"guillemotleft",
"guillemotright",
"ellipsis",
"nbspace",
"Agrave",
"Atilde",
"Otilde",
"OE",
"oe",
"endash",
"emdash",
"quotedblleft",
"quotedblright",
"quoteleft",
"quoteright",
"divide",
"lozenge",
"ydieresis",
"Ydieresis",
"fraction",
"currency",
"guilsinglleft",
"guilsinglright",
"fi",
"fl",
"daggerdbl",
"periodcentered",
"quotesinglbase",
"quotedblbase",
"perthousand",
"Acircumflex",
"Ecircumflex",
"Aacute",
"Edieresis",
"Egrave",
"Iacute",
"Icircumflex",
"Idieresis",
"Igrave",
"Oacute",
"Ocircumflex",
"apple",
"Ograve",
"Uacute",
"Ucircumflex",
"Ugrave",
"dotlessi",
"circumflex",
"tilde",
"macron",
"breve",
"dotaccent",
"ring",
"cedilla",
"hungarumlaut",
"ogonek",
"caron",
]

View File

@ -1,48 +1,258 @@
StandardEncoding = [
'.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
'.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
'.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
'.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
'.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
'.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
'.notdef', '.notdef', 'space', 'exclam', 'quotedbl',
'numbersign', 'dollar', 'percent', 'ampersand',
'quoteright', 'parenleft', 'parenright', 'asterisk', 'plus',
'comma', 'hyphen', 'period', 'slash', 'zero', 'one', 'two',
'three', 'four', 'five', 'six', 'seven', 'eight', 'nine',
'colon', 'semicolon', 'less', 'equal', 'greater',
'question', 'at', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',
'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T',
'U', 'V', 'W', 'X', 'Y', 'Z', 'bracketleft', 'backslash',
'bracketright', 'asciicircum', 'underscore', 'quoteleft',
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l',
'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x',
'y', 'z', 'braceleft', 'bar', 'braceright', 'asciitilde',
'.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
'.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
'.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
'.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
'.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
'.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
'.notdef', '.notdef', '.notdef', '.notdef', 'exclamdown',
'cent', 'sterling', 'fraction', 'yen', 'florin', 'section',
'currency', 'quotesingle', 'quotedblleft', 'guillemotleft',
'guilsinglleft', 'guilsinglright', 'fi', 'fl', '.notdef',
'endash', 'dagger', 'daggerdbl', 'periodcentered',
'.notdef', 'paragraph', 'bullet', 'quotesinglbase',
'quotedblbase', 'quotedblright', 'guillemotright',
'ellipsis', 'perthousand', '.notdef', 'questiondown',
'.notdef', 'grave', 'acute', 'circumflex', 'tilde',
'macron', 'breve', 'dotaccent', 'dieresis', '.notdef',
'ring', 'cedilla', '.notdef', 'hungarumlaut', 'ogonek',
'caron', 'emdash', '.notdef', '.notdef', '.notdef',
'.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
'.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
'.notdef', '.notdef', '.notdef', 'AE', '.notdef',
'ordfeminine', '.notdef', '.notdef', '.notdef', '.notdef',
'Lslash', 'Oslash', 'OE', 'ordmasculine', '.notdef',
'.notdef', '.notdef', '.notdef', '.notdef', 'ae', '.notdef',
'.notdef', '.notdef', 'dotlessi', '.notdef', '.notdef',
'lslash', 'oslash', 'oe', 'germandbls', '.notdef',
'.notdef', '.notdef', '.notdef'
]
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
"space",
"exclam",
"quotedbl",
"numbersign",
"dollar",
"percent",
"ampersand",
"quoteright",
"parenleft",
"parenright",
"asterisk",
"plus",
"comma",
"hyphen",
"period",
"slash",
"zero",
"one",
"two",
"three",
"four",
"five",
"six",
"seven",
"eight",
"nine",
"colon",
"semicolon",
"less",
"equal",
"greater",
"question",
"at",
"A",
"B",
"C",
"D",
"E",
"F",
"G",
"H",
"I",
"J",
"K",
"L",
"M",
"N",
"O",
"P",
"Q",
"R",
"S",
"T",
"U",
"V",
"W",
"X",
"Y",
"Z",
"bracketleft",
"backslash",
"bracketright",
"asciicircum",
"underscore",
"quoteleft",
"a",
"b",
"c",
"d",
"e",
"f",
"g",
"h",
"i",
"j",
"k",
"l",
"m",
"n",
"o",
"p",
"q",
"r",
"s",
"t",
"u",
"v",
"w",
"x",
"y",
"z",
"braceleft",
"bar",
"braceright",
"asciitilde",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
"exclamdown",
"cent",
"sterling",
"fraction",
"yen",
"florin",
"section",
"currency",
"quotesingle",
"quotedblleft",
"guillemotleft",
"guilsinglleft",
"guilsinglright",
"fi",
"fl",
".notdef",
"endash",
"dagger",
"daggerdbl",
"periodcentered",
".notdef",
"paragraph",
"bullet",
"quotesinglbase",
"quotedblbase",
"quotedblright",
"guillemotright",
"ellipsis",
"perthousand",
".notdef",
"questiondown",
".notdef",
"grave",
"acute",
"circumflex",
"tilde",
"macron",
"breve",
"dotaccent",
"dieresis",
".notdef",
"ring",
"cedilla",
".notdef",
"hungarumlaut",
"ogonek",
"caron",
"emdash",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
"AE",
".notdef",
"ordfeminine",
".notdef",
".notdef",
".notdef",
".notdef",
"Lslash",
"Oslash",
"OE",
"ordmasculine",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
"ae",
".notdef",
".notdef",
".notdef",
"dotlessi",
".notdef",
".notdef",
"lslash",
"oslash",
"oe",
"germandbls",
".notdef",
".notdef",
".notdef",
".notdef",
]

View File

@ -4,116 +4,132 @@ but missing from Python. See https://github.com/fonttools/fonttools/issues/236
import codecs
import encodings
class ExtendCodec(codecs.Codec):
def __init__(self, name, base_encoding, mapping):
self.name = name
self.base_encoding = base_encoding
self.mapping = mapping
self.reverse = {v: k for k, v in mapping.items()}
self.max_len = max(len(v) for v in mapping.values())
self.info = codecs.CodecInfo(
name=self.name, encode=self.encode, decode=self.decode
)
codecs.register_error(name, self.error)
def __init__(self, name, base_encoding, mapping):
self.name = name
self.base_encoding = base_encoding
self.mapping = mapping
self.reverse = {v:k for k,v in mapping.items()}
self.max_len = max(len(v) for v in mapping.values())
self.info = codecs.CodecInfo(name=self.name, encode=self.encode, decode=self.decode)
codecs.register_error(name, self.error)
def _map(self, mapper, output_type, exc_type, input, errors):
base_error_handler = codecs.lookup_error(errors)
length = len(input)
out = output_type()
while input:
# first try to use self.error as the error handler
try:
part = mapper(input, self.base_encoding, errors=self.name)
out += part
break # All converted
except exc_type as e:
# else convert the correct part, handle error as requested and continue
out += mapper(input[: e.start], self.base_encoding, self.name)
replacement, pos = base_error_handler(e)
out += replacement
input = input[pos:]
return out, length
def _map(self, mapper, output_type, exc_type, input, errors):
base_error_handler = codecs.lookup_error(errors)
length = len(input)
out = output_type()
while input:
# first try to use self.error as the error handler
try:
part = mapper(input, self.base_encoding, errors=self.name)
out += part
break # All converted
except exc_type as e:
# else convert the correct part, handle error as requested and continue
out += mapper(input[:e.start], self.base_encoding, self.name)
replacement, pos = base_error_handler(e)
out += replacement
input = input[pos:]
return out, length
def encode(self, input, errors="strict"):
return self._map(codecs.encode, bytes, UnicodeEncodeError, input, errors)
def encode(self, input, errors='strict'):
return self._map(codecs.encode, bytes, UnicodeEncodeError, input, errors)
def decode(self, input, errors="strict"):
return self._map(codecs.decode, str, UnicodeDecodeError, input, errors)
def decode(self, input, errors='strict'):
return self._map(codecs.decode, str, UnicodeDecodeError, input, errors)
def error(self, e):
if isinstance(e, UnicodeDecodeError):
for end in range(e.start + 1, e.end + 1):
s = e.object[e.start:end]
if s in self.mapping:
return self.mapping[s], end
elif isinstance(e, UnicodeEncodeError):
for end in range(e.start + 1, e.start + self.max_len + 1):
s = e.object[e.start:end]
if s in self.reverse:
return self.reverse[s], end
e.encoding = self.name
raise e
def error(self, e):
if isinstance(e, UnicodeDecodeError):
for end in range(e.start + 1, e.end + 1):
s = e.object[e.start : end]
if s in self.mapping:
return self.mapping[s], end
elif isinstance(e, UnicodeEncodeError):
for end in range(e.start + 1, e.start + self.max_len + 1):
s = e.object[e.start : end]
if s in self.reverse:
return self.reverse[s], end
e.encoding = self.name
raise e
_extended_encodings = {
"x_mac_japanese_ttx": ("shift_jis", {
b"\xFC": chr(0x007C),
b"\x7E": chr(0x007E),
b"\x80": chr(0x005C),
b"\xA0": chr(0x00A0),
b"\xFD": chr(0x00A9),
b"\xFE": chr(0x2122),
b"\xFF": chr(0x2026),
}),
"x_mac_trad_chinese_ttx": ("big5", {
b"\x80": chr(0x005C),
b"\xA0": chr(0x00A0),
b"\xFD": chr(0x00A9),
b"\xFE": chr(0x2122),
b"\xFF": chr(0x2026),
}),
"x_mac_korean_ttx": ("euc_kr", {
b"\x80": chr(0x00A0),
b"\x81": chr(0x20A9),
b"\x82": chr(0x2014),
b"\x83": chr(0x00A9),
b"\xFE": chr(0x2122),
b"\xFF": chr(0x2026),
}),
"x_mac_simp_chinese_ttx": ("gb2312", {
b"\x80": chr(0x00FC),
b"\xA0": chr(0x00A0),
b"\xFD": chr(0x00A9),
b"\xFE": chr(0x2122),
b"\xFF": chr(0x2026),
}),
"x_mac_japanese_ttx": (
"shift_jis",
{
b"\xFC": chr(0x007C),
b"\x7E": chr(0x007E),
b"\x80": chr(0x005C),
b"\xA0": chr(0x00A0),
b"\xFD": chr(0x00A9),
b"\xFE": chr(0x2122),
b"\xFF": chr(0x2026),
},
),
"x_mac_trad_chinese_ttx": (
"big5",
{
b"\x80": chr(0x005C),
b"\xA0": chr(0x00A0),
b"\xFD": chr(0x00A9),
b"\xFE": chr(0x2122),
b"\xFF": chr(0x2026),
},
),
"x_mac_korean_ttx": (
"euc_kr",
{
b"\x80": chr(0x00A0),
b"\x81": chr(0x20A9),
b"\x82": chr(0x2014),
b"\x83": chr(0x00A9),
b"\xFE": chr(0x2122),
b"\xFF": chr(0x2026),
},
),
"x_mac_simp_chinese_ttx": (
"gb2312",
{
b"\x80": chr(0x00FC),
b"\xA0": chr(0x00A0),
b"\xFD": chr(0x00A9),
b"\xFE": chr(0x2122),
b"\xFF": chr(0x2026),
},
),
}
_cache = {}
def search_function(name):
name = encodings.normalize_encoding(name) # Rather undocumented...
if name in _extended_encodings:
if name not in _cache:
base_encoding, mapping = _extended_encodings[name]
assert(name[-4:] == "_ttx")
# Python 2 didn't have any of the encodings that we are implementing
# in this file. Python 3 added aliases for the East Asian ones, mapping
# them "temporarily" to the same base encoding as us, with a comment
# suggesting that full implementation will appear some time later.
# As such, try the Python version of the x_mac_... first, if that is found,
# use *that* as our base encoding. This would make our encoding upgrade
# to the full encoding when and if Python finally implements that.
# http://bugs.python.org/issue24041
base_encodings = [name[:-4], base_encoding]
for base_encoding in base_encodings:
try:
codecs.lookup(base_encoding)
except LookupError:
continue
_cache[name] = ExtendCodec(name, base_encoding, mapping)
break
return _cache[name].info
return None
def search_function(name):
name = encodings.normalize_encoding(name) # Rather undocumented...
if name in _extended_encodings:
if name not in _cache:
base_encoding, mapping = _extended_encodings[name]
assert name[-4:] == "_ttx"
# Python 2 didn't have any of the encodings that we are implementing
# in this file. Python 3 added aliases for the East Asian ones, mapping
# them "temporarily" to the same base encoding as us, with a comment
# suggesting that full implementation will appear some time later.
# As such, try the Python version of the x_mac_... first, if that is found,
# use *that* as our base encoding. This would make our encoding upgrade
# to the full encoding when and if Python finally implements that.
# http://bugs.python.org/issue24041
base_encodings = [name[:-4], base_encoding]
for base_encoding in base_encodings:
try:
codecs.lookup(base_encoding)
except LookupError:
continue
_cache[name] = ExtendCodec(name, base_encoding, mapping)
break
return _cache[name].info
return None
codecs.register(search_function)

View File

@ -768,8 +768,8 @@ class Builder(object):
varidx_map = store.optimize()
gdef.remap_device_varidxes(varidx_map)
if 'GPOS' in self.font:
self.font['GPOS'].table.remap_device_varidxes(varidx_map)
if "GPOS" in self.font:
self.font["GPOS"].table.remap_device_varidxes(varidx_map)
VariableScalar.clear_cache()
if any(
(
@ -1339,7 +1339,9 @@ class Builder(object):
# GSUB 5/6
def add_chain_context_subst(self, location, prefix, glyphs, suffix, lookups):
if not all(glyphs) or not all(prefix) or not all(suffix):
raise FeatureLibError("Empty glyph class in contextual substitution", location)
raise FeatureLibError(
"Empty glyph class in contextual substitution", location
)
lookup = self.get_lookup_(location, ChainContextSubstBuilder)
lookup.rules.append(
ChainContextualRule(
@ -1349,7 +1351,9 @@ class Builder(object):
def add_single_subst_chained_(self, location, prefix, suffix, mapping):
if not mapping or not all(prefix) or not all(suffix):
raise FeatureLibError("Empty glyph class in contextual substitution", location)
raise FeatureLibError(
"Empty glyph class in contextual substitution", location
)
# https://github.com/fonttools/fonttools/issues/512
chain = self.get_lookup_(location, ChainContextSubstBuilder)
sub = chain.find_chainable_single_subst(set(mapping.keys()))
@ -1377,8 +1381,12 @@ class Builder(object):
lookup = self.get_lookup_(location, SinglePosBuilder)
for glyphs, value in pos:
if not glyphs:
raise FeatureLibError("Empty glyph class in positioning rule", location)
otValueRecord = self.makeOpenTypeValueRecord(location, value, pairPosContext=False)
raise FeatureLibError(
"Empty glyph class in positioning rule", location
)
otValueRecord = self.makeOpenTypeValueRecord(
location, value, pairPosContext=False
)
for glyph in glyphs:
try:
lookup.add_pos(location, glyph, otValueRecord)
@ -1388,9 +1396,7 @@ class Builder(object):
# GPOS 2
def add_class_pair_pos(self, location, glyphclass1, value1, glyphclass2, value2):
if not glyphclass1 or not glyphclass2:
raise FeatureLibError(
"Empty glyph class in positioning rule", location
)
raise FeatureLibError("Empty glyph class in positioning rule", location)
lookup = self.get_lookup_(location, PairPosBuilder)
v1 = self.makeOpenTypeValueRecord(location, value1, pairPosContext=True)
v2 = self.makeOpenTypeValueRecord(location, value2, pairPosContext=True)
@ -1458,7 +1464,9 @@ class Builder(object):
# GPOS 7/8
def add_chain_context_pos(self, location, prefix, glyphs, suffix, lookups):
if not all(glyphs) or not all(prefix) or not all(suffix):
raise FeatureLibError("Empty glyph class in contextual positioning rule", location)
raise FeatureLibError(
"Empty glyph class in contextual positioning rule", location
)
lookup = self.get_lookup_(location, ChainContextPosBuilder)
lookup.rules.append(
ChainContextualRule(
@ -1468,7 +1476,9 @@ class Builder(object):
def add_single_pos_chained_(self, location, prefix, suffix, pos):
if not pos or not all(prefix) or not all(suffix):
raise FeatureLibError("Empty glyph class in contextual positioning rule", location)
raise FeatureLibError(
"Empty glyph class in contextual positioning rule", location
)
# https://github.com/fonttools/fonttools/issues/514
chain = self.get_lookup_(location, ChainContextPosBuilder)
targets = []
@ -1479,7 +1489,9 @@ class Builder(object):
if value is None:
subs.append(None)
continue
otValue = self.makeOpenTypeValueRecord(location, value, pairPosContext=False)
otValue = self.makeOpenTypeValueRecord(
location, value, pairPosContext=False
)
sub = chain.find_chainable_single_pos(targets, glyphs, otValue)
if sub is None:
sub = self.get_chained_lookup_(location, SinglePosBuilder)
@ -1498,7 +1510,9 @@ class Builder(object):
for markClassDef in markClass.definitions:
for mark in markClassDef.glyphs.glyphSet():
if mark not in lookupBuilder.marks:
otMarkAnchor = self.makeOpenTypeAnchor(location, markClassDef.anchor)
otMarkAnchor = self.makeOpenTypeAnchor(
location, markClassDef.anchor
)
lookupBuilder.marks[mark] = (markClass.name, otMarkAnchor)
else:
existingMarkClass = lookupBuilder.marks[mark][0]
@ -1591,11 +1605,15 @@ class Builder(object):
for dim in ("x", "y"):
if not isinstance(getattr(anchor, dim), VariableScalar):
continue
if getattr(anchor, dim+"DeviceTable") is not None:
raise FeatureLibError("Can't define a device coordinate and variable scalar", location)
if getattr(anchor, dim + "DeviceTable") is not None:
raise FeatureLibError(
"Can't define a device coordinate and variable scalar", location
)
if not self.varstorebuilder:
raise FeatureLibError("Can't define a variable scalar in a non-variable font", location)
varscalar = getattr(anchor,dim)
raise FeatureLibError(
"Can't define a variable scalar in a non-variable font", location
)
varscalar = getattr(anchor, dim)
varscalar.axes = self.axes
default, index = varscalar.add_to_variation_store(self.varstorebuilder)
setattr(anchor, dim, default)
@ -1606,7 +1624,9 @@ class Builder(object):
deviceY = buildVarDevTable(index)
variable = True
otlanchor = otl.buildAnchor(anchor.x, anchor.y, anchor.contourpoint, deviceX, deviceY)
otlanchor = otl.buildAnchor(
anchor.x, anchor.y, anchor.contourpoint, deviceX, deviceY
)
if variable:
otlanchor.Format = 3
return otlanchor
@ -1617,7 +1637,6 @@ class Builder(object):
if not name.startswith("Reserved")
}
def makeOpenTypeValueRecord(self, location, v, pairPosContext):
"""ast.ValueRecord --> otBase.ValueRecord"""
if not v:
@ -1635,9 +1654,14 @@ class Builder(object):
otDeviceName = otName[0:4] + "Device"
feaDeviceName = otDeviceName[0].lower() + otDeviceName[1:]
if getattr(v, feaDeviceName):
raise FeatureLibError("Can't define a device coordinate and variable scalar", location)
raise FeatureLibError(
"Can't define a device coordinate and variable scalar", location
)
if not self.varstorebuilder:
raise FeatureLibError("Can't define a variable scalar in a non-variable font", location)
raise FeatureLibError(
"Can't define a variable scalar in a non-variable font",
location,
)
val.axes = self.axes
default, index = val.add_to_variation_store(self.varstorebuilder)
vr[otName] = default

View File

@ -197,7 +197,7 @@ class IncludingLexer(object):
"""A Lexer that follows include statements.
The OpenType feature file specification states that due to
historical reasons, relative imports should be resolved in this
historical reasons, relative imports should be resolved in this
order:
1. If the source font is UFO format, then relative to the UFO's

View File

@ -1,7 +1,8 @@
from typing import NamedTuple
LOOKUP_DEBUG_INFO_KEY = "com.github.fonttools.feaLib"
LOOKUP_DEBUG_ENV_VAR = "FONTTOOLS_LOOKUP_DEBUGGING"
LOOKUP_DEBUG_ENV_VAR = "FONTTOOLS_LOOKUP_DEBUGGING"
class LookupDebugInfo(NamedTuple):
"""Information about where a lookup came from, to be embedded in a font"""

View File

@ -134,7 +134,8 @@ class Parser(object):
]
raise FeatureLibError(
"The following glyph names are referenced but are missing from the "
"glyph set:\n" + ("\n".join(error)), None
"glyph set:\n" + ("\n".join(error)),
None,
)
return self.doc_
@ -396,7 +397,8 @@ class Parser(object):
self.expect_symbol_("-")
range_end = self.expect_cid_()
self.check_glyph_name_in_glyph_set(
f"cid{range_start:05d}", f"cid{range_end:05d}",
f"cid{range_start:05d}",
f"cid{range_end:05d}",
)
glyphs.add_cid_range(
range_start,
@ -696,7 +698,9 @@ class Parser(object):
location = self.cur_token_location_
glyphs = self.parse_glyphclass_(accept_glyphname=True)
if not glyphs.glyphSet():
raise FeatureLibError("Empty glyph class in mark class definition", location)
raise FeatureLibError(
"Empty glyph class in mark class definition", location
)
anchor = self.parse_anchor_()
name = self.expect_class_name_()
self.expect_symbol_(";")

View File

@ -4,7 +4,11 @@
from fontTools import ttLib
import fontTools.merge.base
from fontTools.merge.cmap import computeMegaGlyphOrder, computeMegaCmap, renameCFFCharStrings
from fontTools.merge.cmap import (
computeMegaGlyphOrder,
computeMegaCmap,
renameCFFCharStrings,
)
from fontTools.merge.layout import layoutPreMerge, layoutPostMerge
from fontTools.merge.options import Options
import fontTools.merge.tables
@ -15,191 +19,193 @@ import logging
log = logging.getLogger("fontTools.merge")
timer = Timer(logger=logging.getLogger(__name__+".timer"), level=logging.INFO)
timer = Timer(logger=logging.getLogger(__name__ + ".timer"), level=logging.INFO)
class Merger(object):
"""Font merger.
"""Font merger.
This class merges multiple files into a single OpenType font, taking into
account complexities such as OpenType layout (``GSUB``/``GPOS``) tables and
cross-font metrics (e.g. ``hhea.ascent`` is set to the maximum value across
all the fonts).
This class merges multiple files into a single OpenType font, taking into
account complexities such as OpenType layout (``GSUB``/``GPOS``) tables and
cross-font metrics (e.g. ``hhea.ascent`` is set to the maximum value across
all the fonts).
If multiple glyphs map to the same Unicode value, and the glyphs are considered
sufficiently different (that is, they differ in any of paths, widths, or
height), then subsequent glyphs are renamed and a lookup in the ``locl``
feature will be created to disambiguate them. For example, if the arguments
are an Arabic font and a Latin font and both contain a set of parentheses,
the Latin glyphs will be renamed to ``parenleft#1`` and ``parenright#1``,
and a lookup will be inserted into the to ``locl`` feature (creating it if
necessary) under the ``latn`` script to substitute ``parenleft`` with
``parenleft#1`` etc.
If multiple glyphs map to the same Unicode value, and the glyphs are considered
sufficiently different (that is, they differ in any of paths, widths, or
height), then subsequent glyphs are renamed and a lookup in the ``locl``
feature will be created to disambiguate them. For example, if the arguments
are an Arabic font and a Latin font and both contain a set of parentheses,
the Latin glyphs will be renamed to ``parenleft#1`` and ``parenright#1``,
and a lookup will be inserted into the to ``locl`` feature (creating it if
necessary) under the ``latn`` script to substitute ``parenleft`` with
``parenleft#1`` etc.
Restrictions:
Restrictions:
- All fonts must have the same units per em.
- If duplicate glyph disambiguation takes place as described above then the
fonts must have a ``GSUB`` table.
- All fonts must have the same units per em.
- If duplicate glyph disambiguation takes place as described above then the
fonts must have a ``GSUB`` table.
Attributes:
options: Currently unused.
"""
Attributes:
options: Currently unused.
"""
def __init__(self, options=None):
def __init__(self, options=None):
if not options:
options = Options()
if not options:
options = Options()
self.options = options
self.options = options
def _openFonts(self, fontfiles):
fonts = [ttLib.TTFont(fontfile) for fontfile in fontfiles]
for font,fontfile in zip(fonts, fontfiles):
font._merger__fontfile = fontfile
font._merger__name = font['name'].getDebugName(4)
return fonts
def _openFonts(self, fontfiles):
fonts = [ttLib.TTFont(fontfile) for fontfile in fontfiles]
for font, fontfile in zip(fonts, fontfiles):
font._merger__fontfile = fontfile
font._merger__name = font["name"].getDebugName(4)
return fonts
def merge(self, fontfiles):
"""Merges fonts together.
def merge(self, fontfiles):
"""Merges fonts together.
Args:
fontfiles: A list of file names to be merged
Args:
fontfiles: A list of file names to be merged
Returns:
A :class:`fontTools.ttLib.TTFont` object. Call the ``save`` method on
this to write it out to an OTF file.
"""
#
# Settle on a mega glyph order.
#
fonts = self._openFonts(fontfiles)
glyphOrders = [list(font.getGlyphOrder()) for font in fonts]
computeMegaGlyphOrder(self, glyphOrders)
Returns:
A :class:`fontTools.ttLib.TTFont` object. Call the ``save`` method on
this to write it out to an OTF file.
"""
#
# Settle on a mega glyph order.
#
fonts = self._openFonts(fontfiles)
glyphOrders = [list(font.getGlyphOrder()) for font in fonts]
computeMegaGlyphOrder(self, glyphOrders)
# Take first input file sfntVersion
sfntVersion = fonts[0].sfntVersion
# Take first input file sfntVersion
sfntVersion = fonts[0].sfntVersion
# Reload fonts and set new glyph names on them.
fonts = self._openFonts(fontfiles)
for font,glyphOrder in zip(fonts, glyphOrders):
font.setGlyphOrder(glyphOrder)
if 'CFF ' in font:
renameCFFCharStrings(self, glyphOrder, font['CFF '])
# Reload fonts and set new glyph names on them.
fonts = self._openFonts(fontfiles)
for font, glyphOrder in zip(fonts, glyphOrders):
font.setGlyphOrder(glyphOrder)
if "CFF " in font:
renameCFFCharStrings(self, glyphOrder, font["CFF "])
cmaps = [font['cmap'] for font in fonts]
self.duplicateGlyphsPerFont = [{} for _ in fonts]
computeMegaCmap(self, cmaps)
cmaps = [font["cmap"] for font in fonts]
self.duplicateGlyphsPerFont = [{} for _ in fonts]
computeMegaCmap(self, cmaps)
mega = ttLib.TTFont(sfntVersion=sfntVersion)
mega.setGlyphOrder(self.glyphOrder)
mega = ttLib.TTFont(sfntVersion=sfntVersion)
mega.setGlyphOrder(self.glyphOrder)
for font in fonts:
self._preMerge(font)
for font in fonts:
self._preMerge(font)
self.fonts = fonts
self.fonts = fonts
allTags = reduce(set.union, (list(font.keys()) for font in fonts), set())
allTags.remove('GlyphOrder')
allTags = reduce(set.union, (list(font.keys()) for font in fonts), set())
allTags.remove("GlyphOrder")
for tag in allTags:
if tag in self.options.drop_tables:
continue
for tag in allTags:
if tag in self.options.drop_tables:
continue
with timer("merge '%s'" % tag):
tables = [font.get(tag, NotImplemented) for font in fonts]
with timer("merge '%s'" % tag):
tables = [font.get(tag, NotImplemented) for font in fonts]
log.info("Merging '%s'.", tag)
clazz = ttLib.getTableClass(tag)
table = clazz(tag).merge(self, tables)
# XXX Clean this up and use: table = mergeObjects(tables)
log.info("Merging '%s'.", tag)
clazz = ttLib.getTableClass(tag)
table = clazz(tag).merge(self, tables)
# XXX Clean this up and use: table = mergeObjects(tables)
if table is not NotImplemented and table is not False:
mega[tag] = table
log.info("Merged '%s'.", tag)
else:
log.info("Dropped '%s'.", tag)
if table is not NotImplemented and table is not False:
mega[tag] = table
log.info("Merged '%s'.", tag)
else:
log.info("Dropped '%s'.", tag)
del self.duplicateGlyphsPerFont
del self.fonts
del self.duplicateGlyphsPerFont
del self.fonts
self._postMerge(mega)
self._postMerge(mega)
return mega
return mega
def mergeObjects(self, returnTable, logic, tables):
# Right now we don't use self at all. Will use in the future
# for options and logging.
def mergeObjects(self, returnTable, logic, tables):
# Right now we don't use self at all. Will use in the future
# for options and logging.
allKeys = set.union(set(), *(vars(table).keys() for table in tables if table is not NotImplemented))
for key in allKeys:
try:
mergeLogic = logic[key]
except KeyError:
try:
mergeLogic = logic['*']
except KeyError:
raise Exception("Don't know how to merge key %s of class %s" %
(key, returnTable.__class__.__name__))
if mergeLogic is NotImplemented:
continue
value = mergeLogic(getattr(table, key, NotImplemented) for table in tables)
if value is not NotImplemented:
setattr(returnTable, key, value)
allKeys = set.union(
set(),
*(vars(table).keys() for table in tables if table is not NotImplemented),
)
for key in allKeys:
try:
mergeLogic = logic[key]
except KeyError:
try:
mergeLogic = logic["*"]
except KeyError:
raise Exception(
"Don't know how to merge key %s of class %s"
% (key, returnTable.__class__.__name__)
)
if mergeLogic is NotImplemented:
continue
value = mergeLogic(getattr(table, key, NotImplemented) for table in tables)
if value is not NotImplemented:
setattr(returnTable, key, value)
return returnTable
return returnTable
def _preMerge(self, font):
layoutPreMerge(font)
def _preMerge(self, font):
layoutPreMerge(font)
def _postMerge(self, font):
layoutPostMerge(font)
def _postMerge(self, font):
layoutPostMerge(font)
if "OS/2" in font:
# https://github.com/fonttools/fonttools/issues/2538
# TODO: Add an option to disable this?
font["OS/2"].recalcAvgCharWidth(font)
if "OS/2" in font:
# https://github.com/fonttools/fonttools/issues/2538
# TODO: Add an option to disable this?
font["OS/2"].recalcAvgCharWidth(font)
__all__ = [
'Options',
'Merger',
'main'
]
__all__ = ["Options", "Merger", "main"]
@timer("make one with everything (TOTAL TIME)")
def main(args=None):
"""Merge multiple fonts into one"""
from fontTools import configLogger
"""Merge multiple fonts into one"""
from fontTools import configLogger
if args is None:
args = sys.argv[1:]
if args is None:
args = sys.argv[1:]
options = Options()
args = options.parse_opts(args, ignore_unknown=['output-file'])
outfile = 'merged.ttf'
fontfiles = []
for g in args:
if g.startswith('--output-file='):
outfile = g[14:]
continue
fontfiles.append(g)
options = Options()
args = options.parse_opts(args, ignore_unknown=["output-file"])
outfile = "merged.ttf"
fontfiles = []
for g in args:
if g.startswith("--output-file="):
outfile = g[14:]
continue
fontfiles.append(g)
if len(args) < 1:
print("usage: pyftmerge font...", file=sys.stderr)
return 1
if len(args) < 1:
print("usage: pyftmerge font...", file=sys.stderr)
return 1
configLogger(level=logging.INFO if options.verbose else logging.WARNING)
if options.timing:
timer.logger.setLevel(logging.DEBUG)
else:
timer.logger.disabled = True
configLogger(level=logging.INFO if options.verbose else logging.WARNING)
if options.timing:
timer.logger.setLevel(logging.DEBUG)
else:
timer.logger.disabled = True
merger = Merger(options=options)
font = merger.merge(fontfiles)
with timer("compile and save font"):
font.save(outfile)
merger = Merger(options=options)
font = merger.merge(fontfiles)
with timer("compile and save font"):
font.save(outfile)
if __name__ == "__main__":
sys.exit(main())
sys.exit(main())

View File

@ -2,5 +2,5 @@ import sys
from fontTools.merge import main
if __name__ == '__main__':
if __name__ == "__main__":
sys.exit(main())

View File

@ -10,67 +10,72 @@ log = logging.getLogger("fontTools.merge")
def add_method(*clazzes, **kwargs):
"""Returns a decorator function that adds a new method to one or
more classes."""
allowDefault = kwargs.get('allowDefaultTable', False)
def wrapper(method):
done = []
for clazz in clazzes:
if clazz in done: continue # Support multiple names of a clazz
done.append(clazz)
assert allowDefault or clazz != DefaultTable, 'Oops, table class not found.'
assert method.__name__ not in clazz.__dict__, \
"Oops, class '%s' has method '%s'." % (clazz.__name__, method.__name__)
setattr(clazz, method.__name__, method)
return None
return wrapper
"""Returns a decorator function that adds a new method to one or
more classes."""
allowDefault = kwargs.get("allowDefaultTable", False)
def wrapper(method):
done = []
for clazz in clazzes:
if clazz in done:
continue # Support multiple names of a clazz
done.append(clazz)
assert allowDefault or clazz != DefaultTable, "Oops, table class not found."
assert (
method.__name__ not in clazz.__dict__
), "Oops, class '%s' has method '%s'." % (clazz.__name__, method.__name__)
setattr(clazz, method.__name__, method)
return None
return wrapper
def mergeObjects(lst):
lst = [item for item in lst if item is not NotImplemented]
if not lst:
return NotImplemented
lst = [item for item in lst if item is not None]
if not lst:
return None
lst = [item for item in lst if item is not NotImplemented]
if not lst:
return NotImplemented
lst = [item for item in lst if item is not None]
if not lst:
return None
clazz = lst[0].__class__
assert all(type(item) == clazz for item in lst), lst
clazz = lst[0].__class__
assert all(type(item) == clazz for item in lst), lst
logic = clazz.mergeMap
returnTable = clazz()
returnDict = {}
logic = clazz.mergeMap
returnTable = clazz()
returnDict = {}
allKeys = set.union(set(), *(vars(table).keys() for table in lst))
for key in allKeys:
try:
mergeLogic = logic[key]
except KeyError:
try:
mergeLogic = logic['*']
except KeyError:
raise Exception("Don't know how to merge key %s of class %s" %
(key, clazz.__name__))
if mergeLogic is NotImplemented:
continue
value = mergeLogic(getattr(table, key, NotImplemented) for table in lst)
if value is not NotImplemented:
returnDict[key] = value
allKeys = set.union(set(), *(vars(table).keys() for table in lst))
for key in allKeys:
try:
mergeLogic = logic[key]
except KeyError:
try:
mergeLogic = logic["*"]
except KeyError:
raise Exception(
"Don't know how to merge key %s of class %s" % (key, clazz.__name__)
)
if mergeLogic is NotImplemented:
continue
value = mergeLogic(getattr(table, key, NotImplemented) for table in lst)
if value is not NotImplemented:
returnDict[key] = value
returnTable.__dict__ = returnDict
returnTable.__dict__ = returnDict
return returnTable
return returnTable
@add_method(DefaultTable, allowDefaultTable=True)
def merge(self, m, tables):
if not hasattr(self, 'mergeMap'):
log.info("Don't know how to merge '%s'.", self.tableTag)
return NotImplemented
logic = self.mergeMap
if isinstance(logic, dict):
return m.mergeObjects(self, self.mergeMap, tables)
else:
return logic(tables)
if not hasattr(self, "mergeMap"):
log.info("Don't know how to merge '%s'.", self.tableTag)
return NotImplemented
logic = self.mergeMap
if isinstance(logic, dict):
return m.mergeObjects(self, self.mergeMap, tables)
else:
return logic(tables)

View File

@ -11,119 +11,131 @@ log = logging.getLogger("fontTools.merge")
def computeMegaGlyphOrder(merger, glyphOrders):
"""Modifies passed-in glyphOrders to reflect new glyph names.
"""Modifies passed-in glyphOrders to reflect new glyph names.
Stores merger.glyphOrder."""
megaOrder = {}
for glyphOrder in glyphOrders:
for i,glyphName in enumerate(glyphOrder):
if glyphName in megaOrder:
n = megaOrder[glyphName]
while (glyphName + "." + repr(n)) in megaOrder:
n += 1
megaOrder[glyphName] = n
glyphName += "." + repr(n)
glyphOrder[i] = glyphName
megaOrder[glyphName] = 1
merger.glyphOrder = megaOrder = list(megaOrder.keys())
megaOrder = {}
for glyphOrder in glyphOrders:
for i, glyphName in enumerate(glyphOrder):
if glyphName in megaOrder:
n = megaOrder[glyphName]
while (glyphName + "." + repr(n)) in megaOrder:
n += 1
megaOrder[glyphName] = n
glyphName += "." + repr(n)
glyphOrder[i] = glyphName
megaOrder[glyphName] = 1
merger.glyphOrder = megaOrder = list(megaOrder.keys())
def _glyphsAreSame(glyphSet1, glyphSet2, glyph1, glyph2,
advanceTolerance=.05,
advanceToleranceEmpty=.20):
pen1 = DecomposingRecordingPen(glyphSet1)
pen2 = DecomposingRecordingPen(glyphSet2)
g1 = glyphSet1[glyph1]
g2 = glyphSet2[glyph2]
g1.draw(pen1)
g2.draw(pen2)
if pen1.value != pen2.value:
return False
# Allow more width tolerance for glyphs with no ink
tolerance = advanceTolerance if pen1.value else advanceToleranceEmpty
def _glyphsAreSame(
glyphSet1,
glyphSet2,
glyph1,
glyph2,
advanceTolerance=0.05,
advanceToleranceEmpty=0.20,
):
pen1 = DecomposingRecordingPen(glyphSet1)
pen2 = DecomposingRecordingPen(glyphSet2)
g1 = glyphSet1[glyph1]
g2 = glyphSet2[glyph2]
g1.draw(pen1)
g2.draw(pen2)
if pen1.value != pen2.value:
return False
# Allow more width tolerance for glyphs with no ink
tolerance = advanceTolerance if pen1.value else advanceToleranceEmpty
# TODO Warn if advances not the same but within tolerance.
if abs(g1.width - g2.width) > g1.width * tolerance:
return False
if hasattr(g1, 'height') and g1.height is not None:
if abs(g1.height - g2.height) > g1.height * tolerance:
return False
return True
if abs(g1.width - g2.width) > g1.width * tolerance:
return False
if hasattr(g1, "height") and g1.height is not None:
if abs(g1.height - g2.height) > g1.height * tolerance:
return False
return True
# Valid (format, platformID, platEncID) triplets for cmap subtables containing
# Unicode BMP-only and Unicode Full Repertoire semantics.
# Cf. OpenType spec for "Platform specific encodings":
# https://docs.microsoft.com/en-us/typography/opentype/spec/name
class _CmapUnicodePlatEncodings:
BMP = {(4, 3, 1), (4, 0, 3), (4, 0, 4), (4, 0, 6)}
FullRepertoire = {(12, 3, 10), (12, 0, 4), (12, 0, 6)}
BMP = {(4, 3, 1), (4, 0, 3), (4, 0, 4), (4, 0, 6)}
FullRepertoire = {(12, 3, 10), (12, 0, 4), (12, 0, 6)}
def computeMegaCmap(merger, cmapTables):
"""Sets merger.cmap and merger.glyphOrder."""
"""Sets merger.cmap and merger.glyphOrder."""
# TODO Handle format=14.
# Only merge format 4 and 12 Unicode subtables, ignores all other subtables
# If there is a format 12 table for a font, ignore the format 4 table of it
chosenCmapTables = []
for fontIdx,table in enumerate(cmapTables):
format4 = None
format12 = None
for subtable in table.tables:
properties = (subtable.format, subtable.platformID, subtable.platEncID)
if properties in _CmapUnicodePlatEncodings.BMP:
format4 = subtable
elif properties in _CmapUnicodePlatEncodings.FullRepertoire:
format12 = subtable
else:
log.warning(
"Dropped cmap subtable from font '%s':\t"
"format %2s, platformID %2s, platEncID %2s",
fontIdx, subtable.format, subtable.platformID, subtable.platEncID
)
if format12 is not None:
chosenCmapTables.append((format12, fontIdx))
elif format4 is not None:
chosenCmapTables.append((format4, fontIdx))
# TODO Handle format=14.
# Only merge format 4 and 12 Unicode subtables, ignores all other subtables
# If there is a format 12 table for a font, ignore the format 4 table of it
chosenCmapTables = []
for fontIdx, table in enumerate(cmapTables):
format4 = None
format12 = None
for subtable in table.tables:
properties = (subtable.format, subtable.platformID, subtable.platEncID)
if properties in _CmapUnicodePlatEncodings.BMP:
format4 = subtable
elif properties in _CmapUnicodePlatEncodings.FullRepertoire:
format12 = subtable
else:
log.warning(
"Dropped cmap subtable from font '%s':\t"
"format %2s, platformID %2s, platEncID %2s",
fontIdx,
subtable.format,
subtable.platformID,
subtable.platEncID,
)
if format12 is not None:
chosenCmapTables.append((format12, fontIdx))
elif format4 is not None:
chosenCmapTables.append((format4, fontIdx))
# Build the unicode mapping
merger.cmap = cmap = {}
fontIndexForGlyph = {}
glyphSets = [None for f in merger.fonts] if hasattr(merger, 'fonts') else None
# Build the unicode mapping
merger.cmap = cmap = {}
fontIndexForGlyph = {}
glyphSets = [None for f in merger.fonts] if hasattr(merger, "fonts") else None
for table,fontIdx in chosenCmapTables:
# handle duplicates
for uni,gid in table.cmap.items():
oldgid = cmap.get(uni, None)
if oldgid is None:
cmap[uni] = gid
fontIndexForGlyph[gid] = fontIdx
elif is_Default_Ignorable(uni) or uni in (0x25CC,): # U+25CC DOTTED CIRCLE
continue
elif oldgid != gid:
# Char previously mapped to oldgid, now to gid.
# Record, to fix up in GSUB 'locl' later.
if merger.duplicateGlyphsPerFont[fontIdx].get(oldgid) is None:
if glyphSets is not None:
oldFontIdx = fontIndexForGlyph[oldgid]
for idx in (fontIdx, oldFontIdx):
if glyphSets[idx] is None:
glyphSets[idx] = merger.fonts[idx].getGlyphSet()
#if _glyphsAreSame(glyphSets[oldFontIdx], glyphSets[fontIdx], oldgid, gid):
# continue
merger.duplicateGlyphsPerFont[fontIdx][oldgid] = gid
elif merger.duplicateGlyphsPerFont[fontIdx][oldgid] != gid:
# Char previously mapped to oldgid but oldgid is already remapped to a different
# gid, because of another Unicode character.
# TODO: Try harder to do something about these.
log.warning("Dropped mapping from codepoint %#06X to glyphId '%s'", uni, gid)
for table, fontIdx in chosenCmapTables:
# handle duplicates
for uni, gid in table.cmap.items():
oldgid = cmap.get(uni, None)
if oldgid is None:
cmap[uni] = gid
fontIndexForGlyph[gid] = fontIdx
elif is_Default_Ignorable(uni) or uni in (0x25CC,): # U+25CC DOTTED CIRCLE
continue
elif oldgid != gid:
# Char previously mapped to oldgid, now to gid.
# Record, to fix up in GSUB 'locl' later.
if merger.duplicateGlyphsPerFont[fontIdx].get(oldgid) is None:
if glyphSets is not None:
oldFontIdx = fontIndexForGlyph[oldgid]
for idx in (fontIdx, oldFontIdx):
if glyphSets[idx] is None:
glyphSets[idx] = merger.fonts[idx].getGlyphSet()
# if _glyphsAreSame(glyphSets[oldFontIdx], glyphSets[fontIdx], oldgid, gid):
# continue
merger.duplicateGlyphsPerFont[fontIdx][oldgid] = gid
elif merger.duplicateGlyphsPerFont[fontIdx][oldgid] != gid:
# Char previously mapped to oldgid but oldgid is already remapped to a different
# gid, because of another Unicode character.
# TODO: Try harder to do something about these.
log.warning(
"Dropped mapping from codepoint %#06X to glyphId '%s'", uni, gid
)
def renameCFFCharStrings(merger, glyphOrder, cffTable):
"""Rename topDictIndex charStrings based on glyphOrder."""
td = cffTable.cff.topDictIndex[0]
"""Rename topDictIndex charStrings based on glyphOrder."""
td = cffTable.cff.topDictIndex[0]
charStrings = {}
for i, v in enumerate(td.CharStrings.charStrings.values()):
glyphName = glyphOrder[i]
charStrings[glyphName] = v
td.CharStrings.charStrings = charStrings
charStrings = {}
for i, v in enumerate(td.CharStrings.charStrings.values()):
glyphName = glyphOrder[i]
charStrings[glyphName] = v
td.CharStrings.charStrings = charStrings
td.charset = list(glyphOrder)
td.charset = list(glyphOrder)

View File

@ -14,453 +14,516 @@ log = logging.getLogger("fontTools.merge")
def mergeLookupLists(lst):
# TODO Do smarter merge.
return sumLists(lst)
# TODO Do smarter merge.
return sumLists(lst)
def mergeFeatures(lst):
assert lst
self = otTables.Feature()
self.FeatureParams = None
self.LookupListIndex = mergeLookupLists([l.LookupListIndex for l in lst if l.LookupListIndex])
self.LookupCount = len(self.LookupListIndex)
return self
assert lst
self = otTables.Feature()
self.FeatureParams = None
self.LookupListIndex = mergeLookupLists(
[l.LookupListIndex for l in lst if l.LookupListIndex]
)
self.LookupCount = len(self.LookupListIndex)
return self
def mergeFeatureLists(lst):
d = {}
for l in lst:
for f in l:
tag = f.FeatureTag
if tag not in d:
d[tag] = []
d[tag].append(f.Feature)
ret = []
for tag in sorted(d.keys()):
rec = otTables.FeatureRecord()
rec.FeatureTag = tag
rec.Feature = mergeFeatures(d[tag])
ret.append(rec)
return ret
d = {}
for l in lst:
for f in l:
tag = f.FeatureTag
if tag not in d:
d[tag] = []
d[tag].append(f.Feature)
ret = []
for tag in sorted(d.keys()):
rec = otTables.FeatureRecord()
rec.FeatureTag = tag
rec.Feature = mergeFeatures(d[tag])
ret.append(rec)
return ret
def mergeLangSyses(lst):
assert lst
assert lst
# TODO Support merging ReqFeatureIndex
assert all(l.ReqFeatureIndex == 0xFFFF for l in lst)
# TODO Support merging ReqFeatureIndex
assert all(l.ReqFeatureIndex == 0xFFFF for l in lst)
self = otTables.LangSys()
self.LookupOrder = None
self.ReqFeatureIndex = 0xFFFF
self.FeatureIndex = mergeFeatureLists(
[l.FeatureIndex for l in lst if l.FeatureIndex]
)
self.FeatureCount = len(self.FeatureIndex)
return self
self = otTables.LangSys()
self.LookupOrder = None
self.ReqFeatureIndex = 0xFFFF
self.FeatureIndex = mergeFeatureLists([l.FeatureIndex for l in lst if l.FeatureIndex])
self.FeatureCount = len(self.FeatureIndex)
return self
def mergeScripts(lst):
assert lst
assert lst
if len(lst) == 1:
return lst[0]
langSyses = {}
for sr in lst:
for lsr in sr.LangSysRecord:
if lsr.LangSysTag not in langSyses:
langSyses[lsr.LangSysTag] = []
langSyses[lsr.LangSysTag].append(lsr.LangSys)
lsrecords = []
for tag, langSys_list in sorted(langSyses.items()):
lsr = otTables.LangSysRecord()
lsr.LangSys = mergeLangSyses(langSys_list)
lsr.LangSysTag = tag
lsrecords.append(lsr)
if len(lst) == 1:
return lst[0]
langSyses = {}
for sr in lst:
for lsr in sr.LangSysRecord:
if lsr.LangSysTag not in langSyses:
langSyses[lsr.LangSysTag] = []
langSyses[lsr.LangSysTag].append(lsr.LangSys)
lsrecords = []
for tag, langSys_list in sorted(langSyses.items()):
lsr = otTables.LangSysRecord()
lsr.LangSys = mergeLangSyses(langSys_list)
lsr.LangSysTag = tag
lsrecords.append(lsr)
self = otTables.Script()
self.LangSysRecord = lsrecords
self.LangSysCount = len(lsrecords)
dfltLangSyses = [s.DefaultLangSys for s in lst if s.DefaultLangSys]
if dfltLangSyses:
self.DefaultLangSys = mergeLangSyses(dfltLangSyses)
else:
self.DefaultLangSys = None
return self
self = otTables.Script()
self.LangSysRecord = lsrecords
self.LangSysCount = len(lsrecords)
dfltLangSyses = [s.DefaultLangSys for s in lst if s.DefaultLangSys]
if dfltLangSyses:
self.DefaultLangSys = mergeLangSyses(dfltLangSyses)
else:
self.DefaultLangSys = None
return self
def mergeScriptRecords(lst):
d = {}
for l in lst:
for s in l:
tag = s.ScriptTag
if tag not in d:
d[tag] = []
d[tag].append(s.Script)
ret = []
for tag in sorted(d.keys()):
rec = otTables.ScriptRecord()
rec.ScriptTag = tag
rec.Script = mergeScripts(d[tag])
ret.append(rec)
return ret
d = {}
for l in lst:
for s in l:
tag = s.ScriptTag
if tag not in d:
d[tag] = []
d[tag].append(s.Script)
ret = []
for tag in sorted(d.keys()):
rec = otTables.ScriptRecord()
rec.ScriptTag = tag
rec.Script = mergeScripts(d[tag])
ret.append(rec)
return ret
otTables.ScriptList.mergeMap = {
'ScriptCount': lambda lst: None, # TODO
'ScriptRecord': mergeScriptRecords,
"ScriptCount": lambda lst: None, # TODO
"ScriptRecord": mergeScriptRecords,
}
otTables.BaseScriptList.mergeMap = {
'BaseScriptCount': lambda lst: None, # TODO
# TODO: Merge duplicate entries
'BaseScriptRecord': lambda lst: sorted(sumLists(lst), key=lambda s: s.BaseScriptTag),
"BaseScriptCount": lambda lst: None, # TODO
# TODO: Merge duplicate entries
"BaseScriptRecord": lambda lst: sorted(
sumLists(lst), key=lambda s: s.BaseScriptTag
),
}
otTables.FeatureList.mergeMap = {
'FeatureCount': sum,
'FeatureRecord': lambda lst: sorted(sumLists(lst), key=lambda s: s.FeatureTag),
"FeatureCount": sum,
"FeatureRecord": lambda lst: sorted(sumLists(lst), key=lambda s: s.FeatureTag),
}
otTables.LookupList.mergeMap = {
'LookupCount': sum,
'Lookup': sumLists,
"LookupCount": sum,
"Lookup": sumLists,
}
otTables.Coverage.mergeMap = {
'Format': min,
'glyphs': sumLists,
"Format": min,
"glyphs": sumLists,
}
otTables.ClassDef.mergeMap = {
'Format': min,
'classDefs': sumDicts,
"Format": min,
"classDefs": sumDicts,
}
otTables.LigCaretList.mergeMap = {
'Coverage': mergeObjects,
'LigGlyphCount': sum,
'LigGlyph': sumLists,
"Coverage": mergeObjects,
"LigGlyphCount": sum,
"LigGlyph": sumLists,
}
otTables.AttachList.mergeMap = {
'Coverage': mergeObjects,
'GlyphCount': sum,
'AttachPoint': sumLists,
"Coverage": mergeObjects,
"GlyphCount": sum,
"AttachPoint": sumLists,
}
# XXX Renumber MarkFilterSets of lookups
otTables.MarkGlyphSetsDef.mergeMap = {
'MarkSetTableFormat': equal,
'MarkSetCount': sum,
'Coverage': sumLists,
"MarkSetTableFormat": equal,
"MarkSetCount": sum,
"Coverage": sumLists,
}
otTables.Axis.mergeMap = {
'*': mergeObjects,
"*": mergeObjects,
}
# XXX Fix BASE table merging
otTables.BaseTagList.mergeMap = {
'BaseTagCount': sum,
'BaselineTag': sumLists,
"BaseTagCount": sum,
"BaselineTag": sumLists,
}
otTables.GDEF.mergeMap = \
otTables.GSUB.mergeMap = \
otTables.GPOS.mergeMap = \
otTables.BASE.mergeMap = \
otTables.JSTF.mergeMap = \
otTables.MATH.mergeMap = \
{
'*': mergeObjects,
'Version': max,
otTables.GDEF.mergeMap = (
otTables.GSUB.mergeMap
) = (
otTables.GPOS.mergeMap
) = otTables.BASE.mergeMap = otTables.JSTF.mergeMap = otTables.MATH.mergeMap = {
"*": mergeObjects,
"Version": max,
}
ttLib.getTableClass('GDEF').mergeMap = \
ttLib.getTableClass('GSUB').mergeMap = \
ttLib.getTableClass('GPOS').mergeMap = \
ttLib.getTableClass('BASE').mergeMap = \
ttLib.getTableClass('JSTF').mergeMap = \
ttLib.getTableClass('MATH').mergeMap = \
{
'tableTag': onlyExisting(equal), # XXX clean me up
'table': mergeObjects,
ttLib.getTableClass("GDEF").mergeMap = ttLib.getTableClass(
"GSUB"
).mergeMap = ttLib.getTableClass("GPOS").mergeMap = ttLib.getTableClass(
"BASE"
).mergeMap = ttLib.getTableClass(
"JSTF"
).mergeMap = ttLib.getTableClass(
"MATH"
).mergeMap = {
"tableTag": onlyExisting(equal), # XXX clean me up
"table": mergeObjects,
}
@add_method(ttLib.getTableClass('GSUB'))
@add_method(ttLib.getTableClass("GSUB"))
def merge(self, m, tables):
assert len(tables) == len(m.duplicateGlyphsPerFont)
for i,(table,dups) in enumerate(zip(tables, m.duplicateGlyphsPerFont)):
if not dups: continue
if table is None or table is NotImplemented:
log.warning("Have non-identical duplicates to resolve for '%s' but no GSUB. Are duplicates intended?: %s", m.fonts[i]._merger__name, dups)
continue
assert len(tables) == len(m.duplicateGlyphsPerFont)
for i, (table, dups) in enumerate(zip(tables, m.duplicateGlyphsPerFont)):
if not dups:
continue
if table is None or table is NotImplemented:
log.warning(
"Have non-identical duplicates to resolve for '%s' but no GSUB. Are duplicates intended?: %s",
m.fonts[i]._merger__name,
dups,
)
continue
synthFeature = None
synthLookup = None
for script in table.table.ScriptList.ScriptRecord:
if script.ScriptTag == 'DFLT': continue # XXX
for langsys in [script.Script.DefaultLangSys] + [l.LangSys for l in script.Script.LangSysRecord]:
if langsys is None: continue # XXX Create!
feature = [v for v in langsys.FeatureIndex if v.FeatureTag == 'locl']
assert len(feature) <= 1
if feature:
feature = feature[0]
else:
if not synthFeature:
synthFeature = otTables.FeatureRecord()
synthFeature.FeatureTag = 'locl'
f = synthFeature.Feature = otTables.Feature()
f.FeatureParams = None
f.LookupCount = 0
f.LookupListIndex = []
table.table.FeatureList.FeatureRecord.append(synthFeature)
table.table.FeatureList.FeatureCount += 1
feature = synthFeature
langsys.FeatureIndex.append(feature)
langsys.FeatureIndex.sort(key=lambda v: v.FeatureTag)
synthFeature = None
synthLookup = None
for script in table.table.ScriptList.ScriptRecord:
if script.ScriptTag == "DFLT":
continue # XXX
for langsys in [script.Script.DefaultLangSys] + [
l.LangSys for l in script.Script.LangSysRecord
]:
if langsys is None:
continue # XXX Create!
feature = [v for v in langsys.FeatureIndex if v.FeatureTag == "locl"]
assert len(feature) <= 1
if feature:
feature = feature[0]
else:
if not synthFeature:
synthFeature = otTables.FeatureRecord()
synthFeature.FeatureTag = "locl"
f = synthFeature.Feature = otTables.Feature()
f.FeatureParams = None
f.LookupCount = 0
f.LookupListIndex = []
table.table.FeatureList.FeatureRecord.append(synthFeature)
table.table.FeatureList.FeatureCount += 1
feature = synthFeature
langsys.FeatureIndex.append(feature)
langsys.FeatureIndex.sort(key=lambda v: v.FeatureTag)
if not synthLookup:
subtable = otTables.SingleSubst()
subtable.mapping = dups
synthLookup = otTables.Lookup()
synthLookup.LookupFlag = 0
synthLookup.LookupType = 1
synthLookup.SubTableCount = 1
synthLookup.SubTable = [subtable]
if table.table.LookupList is None:
# mtiLib uses None as default value for LookupList,
# while feaLib points to an empty array with count 0
# TODO: make them do the same
table.table.LookupList = otTables.LookupList()
table.table.LookupList.Lookup = []
table.table.LookupList.LookupCount = 0
table.table.LookupList.Lookup.append(synthLookup)
table.table.LookupList.LookupCount += 1
if not synthLookup:
subtable = otTables.SingleSubst()
subtable.mapping = dups
synthLookup = otTables.Lookup()
synthLookup.LookupFlag = 0
synthLookup.LookupType = 1
synthLookup.SubTableCount = 1
synthLookup.SubTable = [subtable]
if table.table.LookupList is None:
# mtiLib uses None as default value for LookupList,
# while feaLib points to an empty array with count 0
# TODO: make them do the same
table.table.LookupList = otTables.LookupList()
table.table.LookupList.Lookup = []
table.table.LookupList.LookupCount = 0
table.table.LookupList.Lookup.append(synthLookup)
table.table.LookupList.LookupCount += 1
if feature.Feature.LookupListIndex[:1] != [synthLookup]:
feature.Feature.LookupListIndex[:0] = [synthLookup]
feature.Feature.LookupCount += 1
if feature.Feature.LookupListIndex[:1] != [synthLookup]:
feature.Feature.LookupListIndex[:0] = [synthLookup]
feature.Feature.LookupCount += 1
DefaultTable.merge(self, m, tables)
return self
DefaultTable.merge(self, m, tables)
return self
@add_method(otTables.SingleSubst,
otTables.MultipleSubst,
otTables.AlternateSubst,
otTables.LigatureSubst,
otTables.ReverseChainSingleSubst,
otTables.SinglePos,
otTables.PairPos,
otTables.CursivePos,
otTables.MarkBasePos,
otTables.MarkLigPos,
otTables.MarkMarkPos)
@add_method(
otTables.SingleSubst,
otTables.MultipleSubst,
otTables.AlternateSubst,
otTables.LigatureSubst,
otTables.ReverseChainSingleSubst,
otTables.SinglePos,
otTables.PairPos,
otTables.CursivePos,
otTables.MarkBasePos,
otTables.MarkLigPos,
otTables.MarkMarkPos,
)
def mapLookups(self, lookupMap):
pass
pass
# Copied and trimmed down from subset.py
@add_method(otTables.ContextSubst,
otTables.ChainContextSubst,
otTables.ContextPos,
otTables.ChainContextPos)
@add_method(
otTables.ContextSubst,
otTables.ChainContextSubst,
otTables.ContextPos,
otTables.ChainContextPos,
)
def __merge_classify_context(self):
class ContextHelper(object):
def __init__(self, klass, Format):
if klass.__name__.endswith("Subst"):
Typ = "Sub"
Type = "Subst"
else:
Typ = "Pos"
Type = "Pos"
if klass.__name__.startswith("Chain"):
Chain = "Chain"
else:
Chain = ""
ChainTyp = Chain + Typ
class ContextHelper(object):
def __init__(self, klass, Format):
if klass.__name__.endswith('Subst'):
Typ = 'Sub'
Type = 'Subst'
else:
Typ = 'Pos'
Type = 'Pos'
if klass.__name__.startswith('Chain'):
Chain = 'Chain'
else:
Chain = ''
ChainTyp = Chain+Typ
self.Typ = Typ
self.Type = Type
self.Chain = Chain
self.ChainTyp = ChainTyp
self.Typ = Typ
self.Type = Type
self.Chain = Chain
self.ChainTyp = ChainTyp
self.LookupRecord = Type + "LookupRecord"
self.LookupRecord = Type+'LookupRecord'
if Format == 1:
self.Rule = ChainTyp + "Rule"
self.RuleSet = ChainTyp + "RuleSet"
elif Format == 2:
self.Rule = ChainTyp + "ClassRule"
self.RuleSet = ChainTyp + "ClassSet"
if Format == 1:
self.Rule = ChainTyp+'Rule'
self.RuleSet = ChainTyp+'RuleSet'
elif Format == 2:
self.Rule = ChainTyp+'ClassRule'
self.RuleSet = ChainTyp+'ClassSet'
if self.Format not in [1, 2, 3]:
return None # Don't shoot the messenger; let it go
if not hasattr(self.__class__, "_merge__ContextHelpers"):
self.__class__._merge__ContextHelpers = {}
if self.Format not in self.__class__._merge__ContextHelpers:
helper = ContextHelper(self.__class__, self.Format)
self.__class__._merge__ContextHelpers[self.Format] = helper
return self.__class__._merge__ContextHelpers[self.Format]
if self.Format not in [1, 2, 3]:
return None # Don't shoot the messenger; let it go
if not hasattr(self.__class__, "_merge__ContextHelpers"):
self.__class__._merge__ContextHelpers = {}
if self.Format not in self.__class__._merge__ContextHelpers:
helper = ContextHelper(self.__class__, self.Format)
self.__class__._merge__ContextHelpers[self.Format] = helper
return self.__class__._merge__ContextHelpers[self.Format]
@add_method(otTables.ContextSubst,
otTables.ChainContextSubst,
otTables.ContextPos,
otTables.ChainContextPos)
@add_method(
otTables.ContextSubst,
otTables.ChainContextSubst,
otTables.ContextPos,
otTables.ChainContextPos,
)
def mapLookups(self, lookupMap):
c = self.__merge_classify_context()
c = self.__merge_classify_context()
if self.Format in [1, 2]:
for rs in getattr(self, c.RuleSet):
if not rs: continue
for r in getattr(rs, c.Rule):
if not r: continue
for ll in getattr(r, c.LookupRecord):
if not ll: continue
ll.LookupListIndex = lookupMap[ll.LookupListIndex]
elif self.Format == 3:
for ll in getattr(self, c.LookupRecord):
if not ll: continue
ll.LookupListIndex = lookupMap[ll.LookupListIndex]
else:
assert 0, "unknown format: %s" % self.Format
if self.Format in [1, 2]:
for rs in getattr(self, c.RuleSet):
if not rs:
continue
for r in getattr(rs, c.Rule):
if not r:
continue
for ll in getattr(r, c.LookupRecord):
if not ll:
continue
ll.LookupListIndex = lookupMap[ll.LookupListIndex]
elif self.Format == 3:
for ll in getattr(self, c.LookupRecord):
if not ll:
continue
ll.LookupListIndex = lookupMap[ll.LookupListIndex]
else:
assert 0, "unknown format: %s" % self.Format
@add_method(otTables.ExtensionSubst,
otTables.ExtensionPos)
@add_method(otTables.ExtensionSubst, otTables.ExtensionPos)
def mapLookups(self, lookupMap):
if self.Format == 1:
self.ExtSubTable.mapLookups(lookupMap)
else:
assert 0, "unknown format: %s" % self.Format
if self.Format == 1:
self.ExtSubTable.mapLookups(lookupMap)
else:
assert 0, "unknown format: %s" % self.Format
@add_method(otTables.Lookup)
def mapLookups(self, lookupMap):
for st in self.SubTable:
if not st: continue
st.mapLookups(lookupMap)
for st in self.SubTable:
if not st:
continue
st.mapLookups(lookupMap)
@add_method(otTables.LookupList)
def mapLookups(self, lookupMap):
for l in self.Lookup:
if not l: continue
l.mapLookups(lookupMap)
for l in self.Lookup:
if not l:
continue
l.mapLookups(lookupMap)
@add_method(otTables.Lookup)
def mapMarkFilteringSets(self, markFilteringSetMap):
if self.LookupFlag & 0x0010:
self.MarkFilteringSet = markFilteringSetMap[self.MarkFilteringSet]
if self.LookupFlag & 0x0010:
self.MarkFilteringSet = markFilteringSetMap[self.MarkFilteringSet]
@add_method(otTables.LookupList)
def mapMarkFilteringSets(self, markFilteringSetMap):
for l in self.Lookup:
if not l: continue
l.mapMarkFilteringSets(markFilteringSetMap)
for l in self.Lookup:
if not l:
continue
l.mapMarkFilteringSets(markFilteringSetMap)
@add_method(otTables.Feature)
def mapLookups(self, lookupMap):
self.LookupListIndex = [lookupMap[i] for i in self.LookupListIndex]
self.LookupListIndex = [lookupMap[i] for i in self.LookupListIndex]
@add_method(otTables.FeatureList)
def mapLookups(self, lookupMap):
for f in self.FeatureRecord:
if not f or not f.Feature: continue
f.Feature.mapLookups(lookupMap)
for f in self.FeatureRecord:
if not f or not f.Feature:
continue
f.Feature.mapLookups(lookupMap)
@add_method(otTables.DefaultLangSys,
otTables.LangSys)
@add_method(otTables.DefaultLangSys, otTables.LangSys)
def mapFeatures(self, featureMap):
self.FeatureIndex = [featureMap[i] for i in self.FeatureIndex]
if self.ReqFeatureIndex != 65535:
self.ReqFeatureIndex = featureMap[self.ReqFeatureIndex]
self.FeatureIndex = [featureMap[i] for i in self.FeatureIndex]
if self.ReqFeatureIndex != 65535:
self.ReqFeatureIndex = featureMap[self.ReqFeatureIndex]
@add_method(otTables.Script)
def mapFeatures(self, featureMap):
if self.DefaultLangSys:
self.DefaultLangSys.mapFeatures(featureMap)
for l in self.LangSysRecord:
if not l or not l.LangSys: continue
l.LangSys.mapFeatures(featureMap)
if self.DefaultLangSys:
self.DefaultLangSys.mapFeatures(featureMap)
for l in self.LangSysRecord:
if not l or not l.LangSys:
continue
l.LangSys.mapFeatures(featureMap)
@add_method(otTables.ScriptList)
def mapFeatures(self, featureMap):
for s in self.ScriptRecord:
if not s or not s.Script: continue
s.Script.mapFeatures(featureMap)
for s in self.ScriptRecord:
if not s or not s.Script:
continue
s.Script.mapFeatures(featureMap)
def layoutPreMerge(font):
# Map indices to references
# Map indices to references
GDEF = font.get('GDEF')
GSUB = font.get('GSUB')
GPOS = font.get('GPOS')
GDEF = font.get("GDEF")
GSUB = font.get("GSUB")
GPOS = font.get("GPOS")
for t in [GSUB, GPOS]:
if not t: continue
for t in [GSUB, GPOS]:
if not t:
continue
if t.table.LookupList:
lookupMap = {i:v for i,v in enumerate(t.table.LookupList.Lookup)}
t.table.LookupList.mapLookups(lookupMap)
t.table.FeatureList.mapLookups(lookupMap)
if t.table.LookupList:
lookupMap = {i: v for i, v in enumerate(t.table.LookupList.Lookup)}
t.table.LookupList.mapLookups(lookupMap)
t.table.FeatureList.mapLookups(lookupMap)
if GDEF and GDEF.table.Version >= 0x00010002:
markFilteringSetMap = {i:v for i,v in enumerate(GDEF.table.MarkGlyphSetsDef.Coverage)}
t.table.LookupList.mapMarkFilteringSets(markFilteringSetMap)
if GDEF and GDEF.table.Version >= 0x00010002:
markFilteringSetMap = {
i: v for i, v in enumerate(GDEF.table.MarkGlyphSetsDef.Coverage)
}
t.table.LookupList.mapMarkFilteringSets(markFilteringSetMap)
if t.table.FeatureList and t.table.ScriptList:
featureMap = {i:v for i,v in enumerate(t.table.FeatureList.FeatureRecord)}
t.table.ScriptList.mapFeatures(featureMap)
if t.table.FeatureList and t.table.ScriptList:
featureMap = {i: v for i, v in enumerate(t.table.FeatureList.FeatureRecord)}
t.table.ScriptList.mapFeatures(featureMap)
# TODO FeatureParams nameIDs
# TODO FeatureParams nameIDs
def layoutPostMerge(font):
# Map references back to indices
# Map references back to indices
GDEF = font.get('GDEF')
GSUB = font.get('GSUB')
GPOS = font.get('GPOS')
GDEF = font.get("GDEF")
GSUB = font.get("GSUB")
GPOS = font.get("GPOS")
for t in [GSUB, GPOS]:
if not t: continue
for t in [GSUB, GPOS]:
if not t:
continue
if t.table.FeatureList and t.table.ScriptList:
if t.table.FeatureList and t.table.ScriptList:
# Collect unregistered (new) features.
featureMap = GregariousIdentityDict(t.table.FeatureList.FeatureRecord)
t.table.ScriptList.mapFeatures(featureMap)
# Collect unregistered (new) features.
featureMap = GregariousIdentityDict(t.table.FeatureList.FeatureRecord)
t.table.ScriptList.mapFeatures(featureMap)
# Record used features.
featureMap = AttendanceRecordingIdentityDict(t.table.FeatureList.FeatureRecord)
t.table.ScriptList.mapFeatures(featureMap)
usedIndices = featureMap.s
# Record used features.
featureMap = AttendanceRecordingIdentityDict(
t.table.FeatureList.FeatureRecord
)
t.table.ScriptList.mapFeatures(featureMap)
usedIndices = featureMap.s
# Remove unused features
t.table.FeatureList.FeatureRecord = [f for i,f in enumerate(t.table.FeatureList.FeatureRecord) if i in usedIndices]
# Remove unused features
t.table.FeatureList.FeatureRecord = [
f
for i, f in enumerate(t.table.FeatureList.FeatureRecord)
if i in usedIndices
]
# Map back to indices.
featureMap = NonhashableDict(t.table.FeatureList.FeatureRecord)
t.table.ScriptList.mapFeatures(featureMap)
# Map back to indices.
featureMap = NonhashableDict(t.table.FeatureList.FeatureRecord)
t.table.ScriptList.mapFeatures(featureMap)
t.table.FeatureList.FeatureCount = len(t.table.FeatureList.FeatureRecord)
t.table.FeatureList.FeatureCount = len(t.table.FeatureList.FeatureRecord)
if t.table.LookupList:
if t.table.LookupList:
# Collect unregistered (new) lookups.
lookupMap = GregariousIdentityDict(t.table.LookupList.Lookup)
t.table.FeatureList.mapLookups(lookupMap)
t.table.LookupList.mapLookups(lookupMap)
# Collect unregistered (new) lookups.
lookupMap = GregariousIdentityDict(t.table.LookupList.Lookup)
t.table.FeatureList.mapLookups(lookupMap)
t.table.LookupList.mapLookups(lookupMap)
# Record used lookups.
lookupMap = AttendanceRecordingIdentityDict(t.table.LookupList.Lookup)
t.table.FeatureList.mapLookups(lookupMap)
t.table.LookupList.mapLookups(lookupMap)
usedIndices = lookupMap.s
# Record used lookups.
lookupMap = AttendanceRecordingIdentityDict(t.table.LookupList.Lookup)
t.table.FeatureList.mapLookups(lookupMap)
t.table.LookupList.mapLookups(lookupMap)
usedIndices = lookupMap.s
# Remove unused lookups
t.table.LookupList.Lookup = [l for i,l in enumerate(t.table.LookupList.Lookup) if i in usedIndices]
# Remove unused lookups
t.table.LookupList.Lookup = [
l for i, l in enumerate(t.table.LookupList.Lookup) if i in usedIndices
]
# Map back to indices.
lookupMap = NonhashableDict(t.table.LookupList.Lookup)
t.table.FeatureList.mapLookups(lookupMap)
t.table.LookupList.mapLookups(lookupMap)
# Map back to indices.
lookupMap = NonhashableDict(t.table.LookupList.Lookup)
t.table.FeatureList.mapLookups(lookupMap)
t.table.LookupList.mapLookups(lookupMap)
t.table.LookupList.LookupCount = len(t.table.LookupList.Lookup)
t.table.LookupList.LookupCount = len(t.table.LookupList.Lookup)
if GDEF and GDEF.table.Version >= 0x00010002:
markFilteringSetMap = NonhashableDict(GDEF.table.MarkGlyphSetsDef.Coverage)
t.table.LookupList.mapMarkFilteringSets(markFilteringSetMap)
if GDEF and GDEF.table.Version >= 0x00010002:
markFilteringSetMap = NonhashableDict(
GDEF.table.MarkGlyphSetsDef.Coverage
)
t.table.LookupList.mapMarkFilteringSets(markFilteringSetMap)
# TODO FeatureParams nameIDs
# TODO FeatureParams nameIDs

View File

@ -4,82 +4,80 @@
class Options(object):
class UnknownOptionError(Exception):
pass
class UnknownOptionError(Exception):
pass
def __init__(self, **kwargs):
def __init__(self, **kwargs):
self.verbose = False
self.timing = False
self.drop_tables = []
self.verbose = False
self.timing = False
self.drop_tables = []
self.set(**kwargs)
self.set(**kwargs)
def set(self, **kwargs):
for k, v in kwargs.items():
if not hasattr(self, k):
raise self.UnknownOptionError("Unknown option '%s'" % k)
setattr(self, k, v)
def set(self, **kwargs):
for k,v in kwargs.items():
if not hasattr(self, k):
raise self.UnknownOptionError("Unknown option '%s'" % k)
setattr(self, k, v)
def parse_opts(self, argv, ignore_unknown=[]):
ret = []
opts = {}
for a in argv:
orig_a = a
if not a.startswith("--"):
ret.append(a)
continue
a = a[2:]
i = a.find("=")
op = "="
if i == -1:
if a.startswith("no-"):
k = a[3:]
v = False
else:
k = a
v = True
else:
k = a[:i]
if k[-1] in "-+":
op = k[-1] + "=" # Ops is '-=' or '+=' now.
k = k[:-1]
v = a[i + 1 :]
ok = k
k = k.replace("-", "_")
if not hasattr(self, k):
if ignore_unknown is True or ok in ignore_unknown:
ret.append(orig_a)
continue
else:
raise self.UnknownOptionError("Unknown option '%s'" % a)
def parse_opts(self, argv, ignore_unknown=[]):
ret = []
opts = {}
for a in argv:
orig_a = a
if not a.startswith('--'):
ret.append(a)
continue
a = a[2:]
i = a.find('=')
op = '='
if i == -1:
if a.startswith("no-"):
k = a[3:]
v = False
else:
k = a
v = True
else:
k = a[:i]
if k[-1] in "-+":
op = k[-1]+'=' # Ops is '-=' or '+=' now.
k = k[:-1]
v = a[i+1:]
ok = k
k = k.replace('-', '_')
if not hasattr(self, k):
if ignore_unknown is True or ok in ignore_unknown:
ret.append(orig_a)
continue
else:
raise self.UnknownOptionError("Unknown option '%s'" % a)
ov = getattr(self, k)
if isinstance(ov, bool):
v = bool(v)
elif isinstance(ov, int):
v = int(v)
elif isinstance(ov, list):
vv = v.split(",")
if vv == [""]:
vv = []
vv = [int(x, 0) if len(x) and x[0] in "0123456789" else x for x in vv]
if op == "=":
v = vv
elif op == "+=":
v = ov
v.extend(vv)
elif op == "-=":
v = ov
for x in vv:
if x in v:
v.remove(x)
else:
assert 0
ov = getattr(self, k)
if isinstance(ov, bool):
v = bool(v)
elif isinstance(ov, int):
v = int(v)
elif isinstance(ov, list):
vv = v.split(',')
if vv == ['']:
vv = []
vv = [int(x, 0) if len(x) and x[0] in "0123456789" else x for x in vv]
if op == '=':
v = vv
elif op == '+=':
v = ov
v.extend(vv)
elif op == '-=':
v = ov
for x in vv:
if x in v:
v.remove(x)
else:
assert 0
opts[k] = v
self.set(**opts)
return ret
opts[k] = v
self.set(**opts)
return ret

View File

@ -13,299 +13,306 @@ import logging
log = logging.getLogger("fontTools.merge")
ttLib.getTableClass('maxp').mergeMap = {
'*': max,
'tableTag': equal,
'tableVersion': equal,
'numGlyphs': sum,
'maxStorage': first,
'maxFunctionDefs': first,
'maxInstructionDefs': first,
# TODO When we correctly merge hinting data, update these values:
# maxFunctionDefs, maxInstructionDefs, maxSizeOfInstructions
ttLib.getTableClass("maxp").mergeMap = {
"*": max,
"tableTag": equal,
"tableVersion": equal,
"numGlyphs": sum,
"maxStorage": first,
"maxFunctionDefs": first,
"maxInstructionDefs": first,
# TODO When we correctly merge hinting data, update these values:
# maxFunctionDefs, maxInstructionDefs, maxSizeOfInstructions
}
headFlagsMergeBitMap = {
'size': 16,
'*': bitwise_or,
1: bitwise_and, # Baseline at y = 0
2: bitwise_and, # lsb at x = 0
3: bitwise_and, # Force ppem to integer values. FIXME?
5: bitwise_and, # Font is vertical
6: lambda bit: 0, # Always set to zero
11: bitwise_and, # Font data is 'lossless'
13: bitwise_and, # Optimized for ClearType
14: bitwise_and, # Last resort font. FIXME? equal or first may be better
15: lambda bit: 0, # Always set to zero
"size": 16,
"*": bitwise_or,
1: bitwise_and, # Baseline at y = 0
2: bitwise_and, # lsb at x = 0
3: bitwise_and, # Force ppem to integer values. FIXME?
5: bitwise_and, # Font is vertical
6: lambda bit: 0, # Always set to zero
11: bitwise_and, # Font data is 'lossless'
13: bitwise_and, # Optimized for ClearType
14: bitwise_and, # Last resort font. FIXME? equal or first may be better
15: lambda bit: 0, # Always set to zero
}
ttLib.getTableClass('head').mergeMap = {
'tableTag': equal,
'tableVersion': max,
'fontRevision': max,
'checkSumAdjustment': lambda lst: 0, # We need *something* here
'magicNumber': equal,
'flags': mergeBits(headFlagsMergeBitMap),
'unitsPerEm': equal,
'created': current_time,
'modified': current_time,
'xMin': min,
'yMin': min,
'xMax': max,
'yMax': max,
'macStyle': first,
'lowestRecPPEM': max,
'fontDirectionHint': lambda lst: 2,
'indexToLocFormat': first,
'glyphDataFormat': equal,
ttLib.getTableClass("head").mergeMap = {
"tableTag": equal,
"tableVersion": max,
"fontRevision": max,
"checkSumAdjustment": lambda lst: 0, # We need *something* here
"magicNumber": equal,
"flags": mergeBits(headFlagsMergeBitMap),
"unitsPerEm": equal,
"created": current_time,
"modified": current_time,
"xMin": min,
"yMin": min,
"xMax": max,
"yMax": max,
"macStyle": first,
"lowestRecPPEM": max,
"fontDirectionHint": lambda lst: 2,
"indexToLocFormat": first,
"glyphDataFormat": equal,
}
ttLib.getTableClass('hhea').mergeMap = {
'*': equal,
'tableTag': equal,
'tableVersion': max,
'ascent': max,
'descent': min,
'lineGap': max,
'advanceWidthMax': max,
'minLeftSideBearing': min,
'minRightSideBearing': min,
'xMaxExtent': max,
'caretSlopeRise': first,
'caretSlopeRun': first,
'caretOffset': first,
'numberOfHMetrics': recalculate,
ttLib.getTableClass("hhea").mergeMap = {
"*": equal,
"tableTag": equal,
"tableVersion": max,
"ascent": max,
"descent": min,
"lineGap": max,
"advanceWidthMax": max,
"minLeftSideBearing": min,
"minRightSideBearing": min,
"xMaxExtent": max,
"caretSlopeRise": first,
"caretSlopeRun": first,
"caretOffset": first,
"numberOfHMetrics": recalculate,
}
ttLib.getTableClass('vhea').mergeMap = {
'*': equal,
'tableTag': equal,
'tableVersion': max,
'ascent': max,
'descent': min,
'lineGap': max,
'advanceHeightMax': max,
'minTopSideBearing': min,
'minBottomSideBearing': min,
'yMaxExtent': max,
'caretSlopeRise': first,
'caretSlopeRun': first,
'caretOffset': first,
'numberOfVMetrics': recalculate,
ttLib.getTableClass("vhea").mergeMap = {
"*": equal,
"tableTag": equal,
"tableVersion": max,
"ascent": max,
"descent": min,
"lineGap": max,
"advanceHeightMax": max,
"minTopSideBearing": min,
"minBottomSideBearing": min,
"yMaxExtent": max,
"caretSlopeRise": first,
"caretSlopeRun": first,
"caretOffset": first,
"numberOfVMetrics": recalculate,
}
os2FsTypeMergeBitMap = {
'size': 16,
'*': lambda bit: 0,
1: bitwise_or, # no embedding permitted
2: bitwise_and, # allow previewing and printing documents
3: bitwise_and, # allow editing documents
8: bitwise_or, # no subsetting permitted
9: bitwise_or, # no embedding of outlines permitted
"size": 16,
"*": lambda bit: 0,
1: bitwise_or, # no embedding permitted
2: bitwise_and, # allow previewing and printing documents
3: bitwise_and, # allow editing documents
8: bitwise_or, # no subsetting permitted
9: bitwise_or, # no embedding of outlines permitted
}
def mergeOs2FsType(lst):
lst = list(lst)
if all(item == 0 for item in lst):
return 0
lst = list(lst)
if all(item == 0 for item in lst):
return 0
# Compute least restrictive logic for each fsType value
for i in range(len(lst)):
# unset bit 1 (no embedding permitted) if either bit 2 or 3 is set
if lst[i] & 0x000C:
lst[i] &= ~0x0002
# set bit 2 (allow previewing) if bit 3 is set (allow editing)
elif lst[i] & 0x0008:
lst[i] |= 0x0004
# set bits 2 and 3 if everything is allowed
elif lst[i] == 0:
lst[i] = 0x000C
# Compute least restrictive logic for each fsType value
for i in range(len(lst)):
# unset bit 1 (no embedding permitted) if either bit 2 or 3 is set
if lst[i] & 0x000C:
lst[i] &= ~0x0002
# set bit 2 (allow previewing) if bit 3 is set (allow editing)
elif lst[i] & 0x0008:
lst[i] |= 0x0004
# set bits 2 and 3 if everything is allowed
elif lst[i] == 0:
lst[i] = 0x000C
fsType = mergeBits(os2FsTypeMergeBitMap)(lst)
# unset bits 2 and 3 if bit 1 is set (some font is "no embedding")
if fsType & 0x0002:
fsType &= ~0x000C
return fsType
fsType = mergeBits(os2FsTypeMergeBitMap)(lst)
# unset bits 2 and 3 if bit 1 is set (some font is "no embedding")
if fsType & 0x0002:
fsType &= ~0x000C
return fsType
ttLib.getTableClass('OS/2').mergeMap = {
'*': first,
'tableTag': equal,
'version': max,
'xAvgCharWidth': first, # Will be recalculated at the end on the merged font
'fsType': mergeOs2FsType, # Will be overwritten
'panose': first, # FIXME: should really be the first Latin font
'ulUnicodeRange1': bitwise_or,
'ulUnicodeRange2': bitwise_or,
'ulUnicodeRange3': bitwise_or,
'ulUnicodeRange4': bitwise_or,
'fsFirstCharIndex': min,
'fsLastCharIndex': max,
'sTypoAscender': max,
'sTypoDescender': min,
'sTypoLineGap': max,
'usWinAscent': max,
'usWinDescent': max,
# Version 1
'ulCodePageRange1': onlyExisting(bitwise_or),
'ulCodePageRange2': onlyExisting(bitwise_or),
# Version 2, 3, 4
'sxHeight': onlyExisting(max),
'sCapHeight': onlyExisting(max),
'usDefaultChar': onlyExisting(first),
'usBreakChar': onlyExisting(first),
'usMaxContext': onlyExisting(max),
# version 5
'usLowerOpticalPointSize': onlyExisting(min),
'usUpperOpticalPointSize': onlyExisting(max),
ttLib.getTableClass("OS/2").mergeMap = {
"*": first,
"tableTag": equal,
"version": max,
"xAvgCharWidth": first, # Will be recalculated at the end on the merged font
"fsType": mergeOs2FsType, # Will be overwritten
"panose": first, # FIXME: should really be the first Latin font
"ulUnicodeRange1": bitwise_or,
"ulUnicodeRange2": bitwise_or,
"ulUnicodeRange3": bitwise_or,
"ulUnicodeRange4": bitwise_or,
"fsFirstCharIndex": min,
"fsLastCharIndex": max,
"sTypoAscender": max,
"sTypoDescender": min,
"sTypoLineGap": max,
"usWinAscent": max,
"usWinDescent": max,
# Version 1
"ulCodePageRange1": onlyExisting(bitwise_or),
"ulCodePageRange2": onlyExisting(bitwise_or),
# Version 2, 3, 4
"sxHeight": onlyExisting(max),
"sCapHeight": onlyExisting(max),
"usDefaultChar": onlyExisting(first),
"usBreakChar": onlyExisting(first),
"usMaxContext": onlyExisting(max),
# version 5
"usLowerOpticalPointSize": onlyExisting(min),
"usUpperOpticalPointSize": onlyExisting(max),
}
@add_method(ttLib.getTableClass('OS/2'))
@add_method(ttLib.getTableClass("OS/2"))
def merge(self, m, tables):
DefaultTable.merge(self, m, tables)
if self.version < 2:
# bits 8 and 9 are reserved and should be set to zero
self.fsType &= ~0x0300
if self.version >= 3:
# Only one of bits 1, 2, and 3 may be set. We already take
# care of bit 1 implications in mergeOs2FsType. So unset
# bit 2 if bit 3 is already set.
if self.fsType & 0x0008:
self.fsType &= ~0x0004
return self
DefaultTable.merge(self, m, tables)
if self.version < 2:
# bits 8 and 9 are reserved and should be set to zero
self.fsType &= ~0x0300
if self.version >= 3:
# Only one of bits 1, 2, and 3 may be set. We already take
# care of bit 1 implications in mergeOs2FsType. So unset
# bit 2 if bit 3 is already set.
if self.fsType & 0x0008:
self.fsType &= ~0x0004
return self
ttLib.getTableClass('post').mergeMap = {
'*': first,
'tableTag': equal,
'formatType': max,
'isFixedPitch': min,
'minMemType42': max,
'maxMemType42': lambda lst: 0,
'minMemType1': max,
'maxMemType1': lambda lst: 0,
'mapping': onlyExisting(sumDicts),
'extraNames': lambda lst: [],
ttLib.getTableClass("post").mergeMap = {
"*": first,
"tableTag": equal,
"formatType": max,
"isFixedPitch": min,
"minMemType42": max,
"maxMemType42": lambda lst: 0,
"minMemType1": max,
"maxMemType1": lambda lst: 0,
"mapping": onlyExisting(sumDicts),
"extraNames": lambda lst: [],
}
ttLib.getTableClass('vmtx').mergeMap = ttLib.getTableClass('hmtx').mergeMap = {
'tableTag': equal,
'metrics': sumDicts,
ttLib.getTableClass("vmtx").mergeMap = ttLib.getTableClass("hmtx").mergeMap = {
"tableTag": equal,
"metrics": sumDicts,
}
ttLib.getTableClass('name').mergeMap = {
'tableTag': equal,
'names': first, # FIXME? Does mixing name records make sense?
ttLib.getTableClass("name").mergeMap = {
"tableTag": equal,
"names": first, # FIXME? Does mixing name records make sense?
}
ttLib.getTableClass('loca').mergeMap = {
'*': recalculate,
'tableTag': equal,
ttLib.getTableClass("loca").mergeMap = {
"*": recalculate,
"tableTag": equal,
}
ttLib.getTableClass('glyf').mergeMap = {
'tableTag': equal,
'glyphs': sumDicts,
'glyphOrder': sumLists,
ttLib.getTableClass("glyf").mergeMap = {
"tableTag": equal,
"glyphs": sumDicts,
"glyphOrder": sumLists,
}
@add_method(ttLib.getTableClass('glyf'))
@add_method(ttLib.getTableClass("glyf"))
def merge(self, m, tables):
for i,table in enumerate(tables):
for g in table.glyphs.values():
if i:
# Drop hints for all but first font, since
# we don't map functions / CVT values.
g.removeHinting()
# Expand composite glyphs to load their
# composite glyph names.
if g.isComposite():
g.expand(table)
return DefaultTable.merge(self, m, tables)
for i, table in enumerate(tables):
for g in table.glyphs.values():
if i:
# Drop hints for all but first font, since
# we don't map functions / CVT values.
g.removeHinting()
# Expand composite glyphs to load their
# composite glyph names.
if g.isComposite():
g.expand(table)
return DefaultTable.merge(self, m, tables)
ttLib.getTableClass('prep').mergeMap = lambda self, lst: first(lst)
ttLib.getTableClass('fpgm').mergeMap = lambda self, lst: first(lst)
ttLib.getTableClass('cvt ').mergeMap = lambda self, lst: first(lst)
ttLib.getTableClass('gasp').mergeMap = lambda self, lst: first(lst) # FIXME? Appears irreconcilable
@add_method(ttLib.getTableClass('CFF '))
ttLib.getTableClass("prep").mergeMap = lambda self, lst: first(lst)
ttLib.getTableClass("fpgm").mergeMap = lambda self, lst: first(lst)
ttLib.getTableClass("cvt ").mergeMap = lambda self, lst: first(lst)
ttLib.getTableClass("gasp").mergeMap = lambda self, lst: first(
lst
) # FIXME? Appears irreconcilable
@add_method(ttLib.getTableClass("CFF "))
def merge(self, m, tables):
if any(hasattr(table, "FDSelect") for table in tables):
raise NotImplementedError(
"Merging CID-keyed CFF tables is not supported yet"
)
if any(hasattr(table, "FDSelect") for table in tables):
raise NotImplementedError("Merging CID-keyed CFF tables is not supported yet")
for table in tables:
table.cff.desubroutinize()
for table in tables:
table.cff.desubroutinize()
newcff = tables[0]
newfont = newcff.cff[0]
private = newfont.Private
storedNamesStrings = []
glyphOrderStrings = []
glyphOrder = set(newfont.getGlyphOrder())
newcff = tables[0]
newfont = newcff.cff[0]
private = newfont.Private
storedNamesStrings = []
glyphOrderStrings = []
glyphOrder = set(newfont.getGlyphOrder())
for name in newfont.strings.strings:
if name not in glyphOrder:
storedNamesStrings.append(name)
else:
glyphOrderStrings.append(name)
for name in newfont.strings.strings:
if name not in glyphOrder:
storedNamesStrings.append(name)
else:
glyphOrderStrings.append(name)
chrset = list(newfont.charset)
newcs = newfont.CharStrings
log.debug("FONT 0 CharStrings: %d.", len(newcs))
chrset = list(newfont.charset)
newcs = newfont.CharStrings
log.debug("FONT 0 CharStrings: %d.", len(newcs))
for i, table in enumerate(tables[1:], start=1):
font = table.cff[0]
font.Private = private
fontGlyphOrder = set(font.getGlyphOrder())
for name in font.strings.strings:
if name in fontGlyphOrder:
glyphOrderStrings.append(name)
cs = font.CharStrings
gs = table.cff.GlobalSubrs
log.debug("Font %d CharStrings: %d.", i, len(cs))
chrset.extend(font.charset)
if newcs.charStringsAreIndexed:
for i, name in enumerate(cs.charStrings, start=len(newcs)):
newcs.charStrings[name] = i
newcs.charStringsIndex.items.append(None)
for name in cs.charStrings:
newcs[name] = cs[name]
for i, table in enumerate(tables[1:], start=1):
font = table.cff[0]
font.Private = private
fontGlyphOrder = set(font.getGlyphOrder())
for name in font.strings.strings:
if name in fontGlyphOrder:
glyphOrderStrings.append(name)
cs = font.CharStrings
gs = table.cff.GlobalSubrs
log.debug("Font %d CharStrings: %d.", i, len(cs))
chrset.extend(font.charset)
if newcs.charStringsAreIndexed:
for i, name in enumerate(cs.charStrings, start=len(newcs)):
newcs.charStrings[name] = i
newcs.charStringsIndex.items.append(None)
for name in cs.charStrings:
newcs[name] = cs[name]
newfont.charset = chrset
newfont.numGlyphs = len(chrset)
newfont.strings.strings = glyphOrderStrings + storedNamesStrings
newfont.charset = chrset
newfont.numGlyphs = len(chrset)
newfont.strings.strings = glyphOrderStrings + storedNamesStrings
return newcff
return newcff
@add_method(ttLib.getTableClass('cmap'))
@add_method(ttLib.getTableClass("cmap"))
def merge(self, m, tables):
# TODO Handle format=14.
if not hasattr(m, 'cmap'):
computeMegaCmap(m, tables)
cmap = m.cmap
# TODO Handle format=14.
if not hasattr(m, "cmap"):
computeMegaCmap(m, tables)
cmap = m.cmap
cmapBmpOnly = {uni: gid for uni,gid in cmap.items() if uni <= 0xFFFF}
self.tables = []
module = ttLib.getTableModule('cmap')
if len(cmapBmpOnly) != len(cmap):
# format-12 required.
cmapTable = module.cmap_classes[12](12)
cmapTable.platformID = 3
cmapTable.platEncID = 10
cmapTable.language = 0
cmapTable.cmap = cmap
self.tables.append(cmapTable)
# always create format-4
cmapTable = module.cmap_classes[4](4)
cmapTable.platformID = 3
cmapTable.platEncID = 1
cmapTable.language = 0
cmapTable.cmap = cmapBmpOnly
# ordered by platform then encoding
self.tables.insert(0, cmapTable)
self.tableVersion = 0
self.numSubTables = len(self.tables)
return self
cmapBmpOnly = {uni: gid for uni, gid in cmap.items() if uni <= 0xFFFF}
self.tables = []
module = ttLib.getTableModule("cmap")
if len(cmapBmpOnly) != len(cmap):
# format-12 required.
cmapTable = module.cmap_classes[12](12)
cmapTable.platformID = 3
cmapTable.platEncID = 10
cmapTable.language = 0
cmapTable.cmap = cmap
self.tables.append(cmapTable)
# always create format-4
cmapTable = module.cmap_classes[4](4)
cmapTable.platformID = 3
cmapTable.platEncID = 1
cmapTable.language = 0
cmapTable.cmap = cmapBmpOnly
# ordered by platform then encoding
self.tables.insert(0, cmapTable)
self.tableVersion = 0
self.numSubTables = len(self.tables)
return self

View File

@ -1,65 +1,78 @@
# Copyright 2021 Behdad Esfahbod. All Rights Reserved.
def is_Default_Ignorable(u):
# http://www.unicode.org/reports/tr44/#Default_Ignorable_Code_Point
#
# TODO Move me to unicodedata module and autogenerate.
#
# Unicode 14.0:
# $ grep '; Default_Ignorable_Code_Point ' DerivedCoreProperties.txt | sed 's/;.*#/#/'
# 00AD # Cf SOFT HYPHEN
# 034F # Mn COMBINING GRAPHEME JOINER
# 061C # Cf ARABIC LETTER MARK
# 115F..1160 # Lo [2] HANGUL CHOSEONG FILLER..HANGUL JUNGSEONG FILLER
# 17B4..17B5 # Mn [2] KHMER VOWEL INHERENT AQ..KHMER VOWEL INHERENT AA
# 180B..180D # Mn [3] MONGOLIAN FREE VARIATION SELECTOR ONE..MONGOLIAN FREE VARIATION SELECTOR THREE
# 180E # Cf MONGOLIAN VOWEL SEPARATOR
# 180F # Mn MONGOLIAN FREE VARIATION SELECTOR FOUR
# 200B..200F # Cf [5] ZERO WIDTH SPACE..RIGHT-TO-LEFT MARK
# 202A..202E # Cf [5] LEFT-TO-RIGHT EMBEDDING..RIGHT-TO-LEFT OVERRIDE
# 2060..2064 # Cf [5] WORD JOINER..INVISIBLE PLUS
# 2065 # Cn <reserved-2065>
# 2066..206F # Cf [10] LEFT-TO-RIGHT ISOLATE..NOMINAL DIGIT SHAPES
# 3164 # Lo HANGUL FILLER
# FE00..FE0F # Mn [16] VARIATION SELECTOR-1..VARIATION SELECTOR-16
# FEFF # Cf ZERO WIDTH NO-BREAK SPACE
# FFA0 # Lo HALFWIDTH HANGUL FILLER
# FFF0..FFF8 # Cn [9] <reserved-FFF0>..<reserved-FFF8>
# 1BCA0..1BCA3 # Cf [4] SHORTHAND FORMAT LETTER OVERLAP..SHORTHAND FORMAT UP STEP
# 1D173..1D17A # Cf [8] MUSICAL SYMBOL BEGIN BEAM..MUSICAL SYMBOL END PHRASE
# E0000 # Cn <reserved-E0000>
# E0001 # Cf LANGUAGE TAG
# E0002..E001F # Cn [30] <reserved-E0002>..<reserved-E001F>
# E0020..E007F # Cf [96] TAG SPACE..CANCEL TAG
# E0080..E00FF # Cn [128] <reserved-E0080>..<reserved-E00FF>
# E0100..E01EF # Mn [240] VARIATION SELECTOR-17..VARIATION SELECTOR-256
# E01F0..E0FFF # Cn [3600] <reserved-E01F0>..<reserved-E0FFF>
return (
u == 0x00AD or # Cf SOFT HYPHEN
u == 0x034F or # Mn COMBINING GRAPHEME JOINER
u == 0x061C or # Cf ARABIC LETTER MARK
0x115F <= u <= 0x1160 or # Lo [2] HANGUL CHOSEONG FILLER..HANGUL JUNGSEONG FILLER
0x17B4 <= u <= 0x17B5 or # Mn [2] KHMER VOWEL INHERENT AQ..KHMER VOWEL INHERENT AA
0x180B <= u <= 0x180D or # Mn [3] MONGOLIAN FREE VARIATION SELECTOR ONE..MONGOLIAN FREE VARIATION SELECTOR THREE
u == 0x180E or # Cf MONGOLIAN VOWEL SEPARATOR
u == 0x180F or # Mn MONGOLIAN FREE VARIATION SELECTOR FOUR
0x200B <= u <= 0x200F or # Cf [5] ZERO WIDTH SPACE..RIGHT-TO-LEFT MARK
0x202A <= u <= 0x202E or # Cf [5] LEFT-TO-RIGHT EMBEDDING..RIGHT-TO-LEFT OVERRIDE
0x2060 <= u <= 0x2064 or # Cf [5] WORD JOINER..INVISIBLE PLUS
u == 0x2065 or # Cn <reserved-2065>
0x2066 <= u <= 0x206F or # Cf [10] LEFT-TO-RIGHT ISOLATE..NOMINAL DIGIT SHAPES
u == 0x3164 or # Lo HANGUL FILLER
0xFE00 <= u <= 0xFE0F or # Mn [16] VARIATION SELECTOR-1..VARIATION SELECTOR-16
u == 0xFEFF or # Cf ZERO WIDTH NO-BREAK SPACE
u == 0xFFA0 or # Lo HALFWIDTH HANGUL FILLER
0xFFF0 <= u <= 0xFFF8 or # Cn [9] <reserved-FFF0>..<reserved-FFF8>
0x1BCA0 <= u <= 0x1BCA3 or # Cf [4] SHORTHAND FORMAT LETTER OVERLAP..SHORTHAND FORMAT UP STEP
0x1D173 <= u <= 0x1D17A or # Cf [8] MUSICAL SYMBOL BEGIN BEAM..MUSICAL SYMBOL END PHRASE
u == 0xE0000 or # Cn <reserved-E0000>
u == 0xE0001 or # Cf LANGUAGE TAG
0xE0002 <= u <= 0xE001F or # Cn [30] <reserved-E0002>..<reserved-E001F>
0xE0020 <= u <= 0xE007F or # Cf [96] TAG SPACE..CANCEL TAG
0xE0080 <= u <= 0xE00FF or # Cn [128] <reserved-E0080>..<reserved-E00FF>
0xE0100 <= u <= 0xE01EF or # Mn [240] VARIATION SELECTOR-17..VARIATION SELECTOR-256
0xE01F0 <= u <= 0xE0FFF or # Cn [3600] <reserved-E01F0>..<reserved-E0FFF>
False)
# http://www.unicode.org/reports/tr44/#Default_Ignorable_Code_Point
#
# TODO Move me to unicodedata module and autogenerate.
#
# Unicode 14.0:
# $ grep '; Default_Ignorable_Code_Point ' DerivedCoreProperties.txt | sed 's/;.*#/#/'
# 00AD # Cf SOFT HYPHEN
# 034F # Mn COMBINING GRAPHEME JOINER
# 061C # Cf ARABIC LETTER MARK
# 115F..1160 # Lo [2] HANGUL CHOSEONG FILLER..HANGUL JUNGSEONG FILLER
# 17B4..17B5 # Mn [2] KHMER VOWEL INHERENT AQ..KHMER VOWEL INHERENT AA
# 180B..180D # Mn [3] MONGOLIAN FREE VARIATION SELECTOR ONE..MONGOLIAN FREE VARIATION SELECTOR THREE
# 180E # Cf MONGOLIAN VOWEL SEPARATOR
# 180F # Mn MONGOLIAN FREE VARIATION SELECTOR FOUR
# 200B..200F # Cf [5] ZERO WIDTH SPACE..RIGHT-TO-LEFT MARK
# 202A..202E # Cf [5] LEFT-TO-RIGHT EMBEDDING..RIGHT-TO-LEFT OVERRIDE
# 2060..2064 # Cf [5] WORD JOINER..INVISIBLE PLUS
# 2065 # Cn <reserved-2065>
# 2066..206F # Cf [10] LEFT-TO-RIGHT ISOLATE..NOMINAL DIGIT SHAPES
# 3164 # Lo HANGUL FILLER
# FE00..FE0F # Mn [16] VARIATION SELECTOR-1..VARIATION SELECTOR-16
# FEFF # Cf ZERO WIDTH NO-BREAK SPACE
# FFA0 # Lo HALFWIDTH HANGUL FILLER
# FFF0..FFF8 # Cn [9] <reserved-FFF0>..<reserved-FFF8>
# 1BCA0..1BCA3 # Cf [4] SHORTHAND FORMAT LETTER OVERLAP..SHORTHAND FORMAT UP STEP
# 1D173..1D17A # Cf [8] MUSICAL SYMBOL BEGIN BEAM..MUSICAL SYMBOL END PHRASE
# E0000 # Cn <reserved-E0000>
# E0001 # Cf LANGUAGE TAG
# E0002..E001F # Cn [30] <reserved-E0002>..<reserved-E001F>
# E0020..E007F # Cf [96] TAG SPACE..CANCEL TAG
# E0080..E00FF # Cn [128] <reserved-E0080>..<reserved-E00FF>
# E0100..E01EF # Mn [240] VARIATION SELECTOR-17..VARIATION SELECTOR-256
# E01F0..E0FFF # Cn [3600] <reserved-E01F0>..<reserved-E0FFF>
return (
u == 0x00AD
or u == 0x034F # Cf SOFT HYPHEN
or u == 0x061C # Mn COMBINING GRAPHEME JOINER
or 0x115F <= u <= 0x1160 # Cf ARABIC LETTER MARK
or 0x17B4 # Lo [2] HANGUL CHOSEONG FILLER..HANGUL JUNGSEONG FILLER
<= u
<= 0x17B5
or 0x180B # Mn [2] KHMER VOWEL INHERENT AQ..KHMER VOWEL INHERENT AA
<= u
<= 0x180D
or u # Mn [3] MONGOLIAN FREE VARIATION SELECTOR ONE..MONGOLIAN FREE VARIATION SELECTOR THREE
== 0x180E
or u == 0x180F # Cf MONGOLIAN VOWEL SEPARATOR
or 0x200B <= u <= 0x200F # Mn MONGOLIAN FREE VARIATION SELECTOR FOUR
or 0x202A <= u <= 0x202E # Cf [5] ZERO WIDTH SPACE..RIGHT-TO-LEFT MARK
or 0x2060 # Cf [5] LEFT-TO-RIGHT EMBEDDING..RIGHT-TO-LEFT OVERRIDE
<= u
<= 0x2064
or u == 0x2065 # Cf [5] WORD JOINER..INVISIBLE PLUS
or 0x2066 <= u <= 0x206F # Cn <reserved-2065>
or u == 0x3164 # Cf [10] LEFT-TO-RIGHT ISOLATE..NOMINAL DIGIT SHAPES
or 0xFE00 <= u <= 0xFE0F # Lo HANGUL FILLER
or u == 0xFEFF # Mn [16] VARIATION SELECTOR-1..VARIATION SELECTOR-16
or u == 0xFFA0 # Cf ZERO WIDTH NO-BREAK SPACE
or 0xFFF0 <= u <= 0xFFF8 # Lo HALFWIDTH HANGUL FILLER
or 0x1BCA0 <= u <= 0x1BCA3 # Cn [9] <reserved-FFF0>..<reserved-FFF8>
or 0x1D173 # Cf [4] SHORTHAND FORMAT LETTER OVERLAP..SHORTHAND FORMAT UP STEP
<= u
<= 0x1D17A
or u == 0xE0000 # Cf [8] MUSICAL SYMBOL BEGIN BEAM..MUSICAL SYMBOL END PHRASE
or u == 0xE0001 # Cn <reserved-E0000>
or 0xE0002 <= u <= 0xE001F # Cf LANGUAGE TAG
or 0xE0020 <= u <= 0xE007F # Cn [30] <reserved-E0002>..<reserved-E001F>
or 0xE0080 <= u <= 0xE00FF # Cf [96] TAG SPACE..CANCEL TAG
or 0xE0100 <= u <= 0xE01EF # Cn [128] <reserved-E0080>..<reserved-E00FF>
or 0xE01F0 # Mn [240] VARIATION SELECTOR-17..VARIATION SELECTOR-256
<= u
<= 0xE0FFF
or False # Cn [3600] <reserved-E01F0>..<reserved-E0FFF>
)

View File

@ -14,118 +14,130 @@ log = logging.getLogger("fontTools.merge")
# General utility functions for merging values from different fonts
def equal(lst):
lst = list(lst)
t = iter(lst)
first = next(t)
assert all(item == first for item in t), "Expected all items to be equal: %s" % lst
return first
lst = list(lst)
t = iter(lst)
first = next(t)
assert all(item == first for item in t), "Expected all items to be equal: %s" % lst
return first
def first(lst):
return next(iter(lst))
return next(iter(lst))
def recalculate(lst):
return NotImplemented
return NotImplemented
def current_time(lst):
return timestampNow()
return timestampNow()
def bitwise_and(lst):
return reduce(operator.and_, lst)
return reduce(operator.and_, lst)
def bitwise_or(lst):
return reduce(operator.or_, lst)
return reduce(operator.or_, lst)
def avg_int(lst):
lst = list(lst)
return sum(lst) // len(lst)
lst = list(lst)
return sum(lst) // len(lst)
def onlyExisting(func):
"""Returns a filter func that when called with a list,
only calls func on the non-NotImplemented items of the list,
and only so if there's at least one item remaining.
Otherwise returns NotImplemented."""
"""Returns a filter func that when called with a list,
only calls func on the non-NotImplemented items of the list,
and only so if there's at least one item remaining.
Otherwise returns NotImplemented."""
def wrapper(lst):
items = [item for item in lst if item is not NotImplemented]
return func(items) if items else NotImplemented
def wrapper(lst):
items = [item for item in lst if item is not NotImplemented]
return func(items) if items else NotImplemented
return wrapper
return wrapper
def sumLists(lst):
l = []
for item in lst:
l.extend(item)
return l
l = []
for item in lst:
l.extend(item)
return l
def sumDicts(lst):
d = {}
for item in lst:
d.update(item)
return d
d = {}
for item in lst:
d.update(item)
return d
def mergeBits(bitmap):
def wrapper(lst):
lst = list(lst)
returnValue = 0
for bitNumber in range(bitmap["size"]):
try:
mergeLogic = bitmap[bitNumber]
except KeyError:
try:
mergeLogic = bitmap["*"]
except KeyError:
raise Exception("Don't know how to merge bit %s" % bitNumber)
shiftedBit = 1 << bitNumber
mergedValue = mergeLogic(bool(item & shiftedBit) for item in lst)
returnValue |= mergedValue << bitNumber
return returnValue
def wrapper(lst):
lst = list(lst)
returnValue = 0
for bitNumber in range(bitmap['size']):
try:
mergeLogic = bitmap[bitNumber]
except KeyError:
try:
mergeLogic = bitmap['*']
except KeyError:
raise Exception("Don't know how to merge bit %s" % bitNumber)
shiftedBit = 1 << bitNumber
mergedValue = mergeLogic(bool(item & shiftedBit) for item in lst)
returnValue |= mergedValue << bitNumber
return returnValue
return wrapper
return wrapper
class AttendanceRecordingIdentityDict(object):
"""A dictionary-like object that records indices of items actually accessed
from a list."""
"""A dictionary-like object that records indices of items actually accessed
from a list."""
def __init__(self, lst):
self.l = lst
self.d = {id(v):i for i,v in enumerate(lst)}
self.s = set()
def __init__(self, lst):
self.l = lst
self.d = {id(v): i for i, v in enumerate(lst)}
self.s = set()
def __getitem__(self, v):
self.s.add(self.d[id(v)])
return v
def __getitem__(self, v):
self.s.add(self.d[id(v)])
return v
class GregariousIdentityDict(object):
"""A dictionary-like object that welcomes guests without reservations and
adds them to the end of the guest list."""
"""A dictionary-like object that welcomes guests without reservations and
adds them to the end of the guest list."""
def __init__(self, lst):
self.l = lst
self.s = set(id(v) for v in lst)
def __init__(self, lst):
self.l = lst
self.s = set(id(v) for v in lst)
def __getitem__(self, v):
if id(v) not in self.s:
self.s.add(id(v))
self.l.append(v)
return v
def __getitem__(self, v):
if id(v) not in self.s:
self.s.add(id(v))
self.l.append(v)
return v
class NonhashableDict(object):
"""A dictionary-like object mapping objects to values."""
"""A dictionary-like object mapping objects to values."""
def __init__(self, keys, values=None):
if values is None:
self.d = {id(v):i for i,v in enumerate(keys)}
else:
self.d = {id(k):v for k,v in zip(keys, values)}
def __init__(self, keys, values=None):
if values is None:
self.d = {id(v): i for i, v in enumerate(keys)}
else:
self.d = {id(k): v for k, v in zip(keys, values)}
def __getitem__(self, k):
return self.d[id(k)]
def __getitem__(self, k):
return self.d[id(k)]
def __setitem__(self, k, v):
self.d[id(k)] = v
def __setitem__(self, k, v):
self.d[id(k)] = v
def __delitem__(self, k):
del self.d[id(k)]
def __delitem__(self, k):
del self.d[id(k)]

View File

@ -23,6 +23,7 @@ def calcBounds(array):
ys = [y for x, y in array]
return min(xs), min(ys), max(xs), max(ys)
def calcIntBounds(array, round=otRound):
"""Calculate the integer bounding rectangle of a 2D points array.
@ -57,6 +58,7 @@ def updateBounds(bounds, p, min=min, max=max):
xMin, yMin, xMax, yMax = bounds
return min(xMin, x), min(yMin, y), max(xMax, x), max(yMax, y)
def pointInRect(p, rect):
"""Test if a point is inside a bounding rectangle.
@ -72,6 +74,7 @@ def pointInRect(p, rect):
xMin, yMin, xMax, yMax = rect
return (xMin <= x <= xMax) and (yMin <= y <= yMax)
def pointsInRect(array, rect):
"""Determine which points are inside a bounding rectangle.
@ -88,6 +91,7 @@ def pointsInRect(array, rect):
xMin, yMin, xMax, yMax = rect
return [(xMin <= x <= xMax) and (yMin <= y <= yMax) for x, y in array]
def vectorLength(vector):
"""Calculate the length of the given vector.
@ -100,6 +104,7 @@ def vectorLength(vector):
x, y = vector
return math.sqrt(x**2 + y**2)
def asInt16(array):
"""Round a list of floats to 16-bit signed integers.
@ -109,7 +114,7 @@ def asInt16(array):
Returns:
A list of rounded integers.
"""
return [int(math.floor(i+0.5)) for i in array]
return [int(math.floor(i + 0.5)) for i in array]
def normRect(rect):
@ -130,6 +135,7 @@ def normRect(rect):
(xMin, yMin, xMax, yMax) = rect
return min(xMin, xMax), min(yMin, yMax), max(xMin, xMax), max(yMin, yMax)
def scaleRect(rect, x, y):
"""Scale a bounding box rectangle.
@ -145,6 +151,7 @@ def scaleRect(rect, x, y):
(xMin, yMin, xMax, yMax) = rect
return xMin * x, yMin * y, xMax * x, yMax * y
def offsetRect(rect, dx, dy):
"""Offset a bounding box rectangle.
@ -158,7 +165,8 @@ def offsetRect(rect, dx, dy):
An offset bounding rectangle.
"""
(xMin, yMin, xMax, yMax) = rect
return xMin+dx, yMin+dy, xMax+dx, yMax+dy
return xMin + dx, yMin + dy, xMax + dx, yMax + dy
def insetRect(rect, dx, dy):
"""Inset a bounding box rectangle on all sides.
@ -173,7 +181,8 @@ def insetRect(rect, dx, dy):
An inset bounding rectangle.
"""
(xMin, yMin, xMax, yMax) = rect
return xMin+dx, yMin+dy, xMax-dx, yMax-dy
return xMin + dx, yMin + dy, xMax - dx, yMax - dy
def sectRect(rect1, rect2):
"""Test for rectangle-rectangle intersection.
@ -191,12 +200,17 @@ def sectRect(rect1, rect2):
"""
(xMin1, yMin1, xMax1, yMax1) = rect1
(xMin2, yMin2, xMax2, yMax2) = rect2
xMin, yMin, xMax, yMax = (max(xMin1, xMin2), max(yMin1, yMin2),
min(xMax1, xMax2), min(yMax1, yMax2))
xMin, yMin, xMax, yMax = (
max(xMin1, xMin2),
max(yMin1, yMin2),
min(xMax1, xMax2),
min(yMax1, yMax2),
)
if xMin >= xMax or yMin >= yMax:
return False, (0, 0, 0, 0)
return True, (xMin, yMin, xMax, yMax)
def unionRect(rect1, rect2):
"""Determine union of bounding rectangles.
@ -211,10 +225,15 @@ def unionRect(rect1, rect2):
"""
(xMin1, yMin1, xMax1, yMax1) = rect1
(xMin2, yMin2, xMax2, yMax2) = rect2
xMin, yMin, xMax, yMax = (min(xMin1, xMin2), min(yMin1, yMin2),
max(xMax1, xMax2), max(yMax1, yMax2))
xMin, yMin, xMax, yMax = (
min(xMin1, xMin2),
min(yMin1, yMin2),
max(xMax1, xMax2),
max(yMax1, yMax2),
)
return (xMin, yMin, xMax, yMax)
def rectCenter(rect):
"""Determine rectangle center.
@ -226,7 +245,8 @@ def rectCenter(rect):
A 2D tuple representing the point at the center of the rectangle.
"""
(xMin, yMin, xMax, yMax) = rect
return (xMin+xMax)/2, (yMin+yMax)/2
return (xMin + xMax) / 2, (yMin + yMax) / 2
def rectArea(rect):
"""Determine rectangle area.
@ -241,6 +261,7 @@ def rectArea(rect):
(xMin, yMin, xMax, yMax) = rect
return (yMax - yMin) * (xMax - xMin)
def intRect(rect):
"""Round a rectangle to integer values.
@ -262,7 +283,6 @@ def intRect(rect):
class Vector(_Vector):
def __init__(self, *args, **kwargs):
warnings.warn(
"fontTools.misc.arrayTools.Vector has been deprecated, please use "
@ -373,7 +393,9 @@ def _test():
(0, 2, 4, 5)
"""
if __name__ == "__main__":
import sys
import doctest
sys.exit(doctest.testmod().failed)

View File

@ -104,7 +104,7 @@ def _dot(v1, v2):
def _intSecAtan(x):
# In : sympy.integrate(sp.sec(sp.atan(x)))
# Out: x*sqrt(x**2 + 1)/2 + asinh(x)/2
return x * math.sqrt(x ** 2 + 1) / 2 + math.asinh(x) / 2
return x * math.sqrt(x**2 + 1) / 2 + math.asinh(x) / 2
def calcQuadraticArcLength(pt1, pt2, pt3):

View File

@ -4,168 +4,169 @@
class Classifier(object):
"""
Main Classifier object, used to classify things into similar sets.
"""
"""
Main Classifier object, used to classify things into similar sets.
"""
def __init__(self, sort=True):
def __init__(self, sort=True):
self._things = set() # set of all things known so far
self._sets = [] # list of class sets produced so far
self._mapping = {} # map from things to their class set
self._dirty = False
self._sort = sort
self._things = set() # set of all things known so far
self._sets = [] # list of class sets produced so far
self._mapping = {} # map from things to their class set
self._dirty = False
self._sort = sort
def add(self, set_of_things):
"""
Add a set to the classifier. Any iterable is accepted.
"""
if not set_of_things:
return
def add(self, set_of_things):
"""
Add a set to the classifier. Any iterable is accepted.
"""
if not set_of_things:
return
self._dirty = True
self._dirty = True
things, sets, mapping = self._things, self._sets, self._mapping
things, sets, mapping = self._things, self._sets, self._mapping
s = set(set_of_things)
intersection = s.intersection(things) # existing things
s.difference_update(intersection) # new things
difference = s
del s
s = set(set_of_things)
intersection = s.intersection(things) # existing things
s.difference_update(intersection) # new things
difference = s
del s
# Add new class for new things
if difference:
things.update(difference)
sets.append(difference)
for thing in difference:
mapping[thing] = difference
del difference
# Add new class for new things
if difference:
things.update(difference)
sets.append(difference)
for thing in difference:
mapping[thing] = difference
del difference
while intersection:
# Take one item and process the old class it belongs to
old_class = mapping[next(iter(intersection))]
old_class_intersection = old_class.intersection(intersection)
while intersection:
# Take one item and process the old class it belongs to
old_class = mapping[next(iter(intersection))]
old_class_intersection = old_class.intersection(intersection)
# Update old class to remove items from new set
old_class.difference_update(old_class_intersection)
# Update old class to remove items from new set
old_class.difference_update(old_class_intersection)
# Remove processed items from todo list
intersection.difference_update(old_class_intersection)
# Remove processed items from todo list
intersection.difference_update(old_class_intersection)
# Add new class for the intersection with old class
sets.append(old_class_intersection)
for thing in old_class_intersection:
mapping[thing] = old_class_intersection
del old_class_intersection
# Add new class for the intersection with old class
sets.append(old_class_intersection)
for thing in old_class_intersection:
mapping[thing] = old_class_intersection
del old_class_intersection
def update(self, list_of_sets):
"""
Add a a list of sets to the classifier. Any iterable of iterables is accepted.
"""
for s in list_of_sets:
self.add(s)
def update(self, list_of_sets):
"""
Add a a list of sets to the classifier. Any iterable of iterables is accepted.
"""
for s in list_of_sets:
self.add(s)
def _process(self):
if not self._dirty:
return
def _process(self):
if not self._dirty:
return
# Do any deferred processing
sets = self._sets
self._sets = [s for s in sets if s]
# Do any deferred processing
sets = self._sets
self._sets = [s for s in sets if s]
if self._sort:
self._sets = sorted(self._sets, key=lambda s: (-len(s), sorted(s)))
if self._sort:
self._sets = sorted(self._sets, key=lambda s: (-len(s), sorted(s)))
self._dirty = False
self._dirty = False
# Output methods
# Output methods
def getThings(self):
"""Returns the set of all things known so far.
def getThings(self):
"""Returns the set of all things known so far.
The return value belongs to the Classifier object and should NOT
be modified while the classifier is still in use.
"""
self._process()
return self._things
The return value belongs to the Classifier object and should NOT
be modified while the classifier is still in use.
"""
self._process()
return self._things
def getMapping(self):
"""Returns the mapping from things to their class set.
def getMapping(self):
"""Returns the mapping from things to their class set.
The return value belongs to the Classifier object and should NOT
be modified while the classifier is still in use.
"""
self._process()
return self._mapping
The return value belongs to the Classifier object and should NOT
be modified while the classifier is still in use.
"""
self._process()
return self._mapping
def getClasses(self):
"""Returns the list of class sets.
def getClasses(self):
"""Returns the list of class sets.
The return value belongs to the Classifier object and should NOT
be modified while the classifier is still in use.
"""
self._process()
return self._sets
The return value belongs to the Classifier object and should NOT
be modified while the classifier is still in use.
"""
self._process()
return self._sets
def classify(list_of_sets, sort=True):
"""
Takes a iterable of iterables (list of sets from here on; but any
iterable works.), and returns the smallest list of sets such that
each set, is either a subset, or is disjoint from, each of the input
sets.
"""
Takes a iterable of iterables (list of sets from here on; but any
iterable works.), and returns the smallest list of sets such that
each set, is either a subset, or is disjoint from, each of the input
sets.
In other words, this function classifies all the things present in
any of the input sets, into similar classes, based on which sets
things are a member of.
In other words, this function classifies all the things present in
any of the input sets, into similar classes, based on which sets
things are a member of.
If sort=True, return class sets are sorted by decreasing size and
their natural sort order within each class size. Otherwise, class
sets are returned in the order that they were identified, which is
generally not significant.
If sort=True, return class sets are sorted by decreasing size and
their natural sort order within each class size. Otherwise, class
sets are returned in the order that they were identified, which is
generally not significant.
>>> classify([]) == ([], {})
True
>>> classify([[]]) == ([], {})
True
>>> classify([[], []]) == ([], {})
True
>>> classify([[1]]) == ([{1}], {1: {1}})
True
>>> classify([[1,2]]) == ([{1, 2}], {1: {1, 2}, 2: {1, 2}})
True
>>> classify([[1],[2]]) == ([{1}, {2}], {1: {1}, 2: {2}})
True
>>> classify([[1,2],[2]]) == ([{1}, {2}], {1: {1}, 2: {2}})
True
>>> classify([[1,2],[2,4]]) == ([{1}, {2}, {4}], {1: {1}, 2: {2}, 4: {4}})
True
>>> classify([[1,2],[2,4,5]]) == (
... [{4, 5}, {1}, {2}], {1: {1}, 2: {2}, 4: {4, 5}, 5: {4, 5}})
True
>>> classify([[1,2],[2,4,5]], sort=False) == (
... [{1}, {4, 5}, {2}], {1: {1}, 2: {2}, 4: {4, 5}, 5: {4, 5}})
True
>>> classify([[1,2,9],[2,4,5]], sort=False) == (
... [{1, 9}, {4, 5}, {2}], {1: {1, 9}, 2: {2}, 4: {4, 5}, 5: {4, 5},
... 9: {1, 9}})
True
>>> classify([[1,2,9,15],[2,4,5]], sort=False) == (
... [{1, 9, 15}, {4, 5}, {2}], {1: {1, 9, 15}, 2: {2}, 4: {4, 5},
... 5: {4, 5}, 9: {1, 9, 15}, 15: {1, 9, 15}})
True
>>> classes, mapping = classify([[1,2,9,15],[2,4,5],[15,5]], sort=False)
>>> set([frozenset(c) for c in classes]) == set(
... [frozenset(s) for s in ({1, 9}, {4}, {2}, {5}, {15})])
True
>>> mapping == {1: {1, 9}, 2: {2}, 4: {4}, 5: {5}, 9: {1, 9}, 15: {15}}
True
"""
classifier = Classifier(sort=sort)
classifier.update(list_of_sets)
return classifier.getClasses(), classifier.getMapping()
>>> classify([]) == ([], {})
True
>>> classify([[]]) == ([], {})
True
>>> classify([[], []]) == ([], {})
True
>>> classify([[1]]) == ([{1}], {1: {1}})
True
>>> classify([[1,2]]) == ([{1, 2}], {1: {1, 2}, 2: {1, 2}})
True
>>> classify([[1],[2]]) == ([{1}, {2}], {1: {1}, 2: {2}})
True
>>> classify([[1,2],[2]]) == ([{1}, {2}], {1: {1}, 2: {2}})
True
>>> classify([[1,2],[2,4]]) == ([{1}, {2}, {4}], {1: {1}, 2: {2}, 4: {4}})
True
>>> classify([[1,2],[2,4,5]]) == (
... [{4, 5}, {1}, {2}], {1: {1}, 2: {2}, 4: {4, 5}, 5: {4, 5}})
True
>>> classify([[1,2],[2,4,5]], sort=False) == (
... [{1}, {4, 5}, {2}], {1: {1}, 2: {2}, 4: {4, 5}, 5: {4, 5}})
True
>>> classify([[1,2,9],[2,4,5]], sort=False) == (
... [{1, 9}, {4, 5}, {2}], {1: {1, 9}, 2: {2}, 4: {4, 5}, 5: {4, 5},
... 9: {1, 9}})
True
>>> classify([[1,2,9,15],[2,4,5]], sort=False) == (
... [{1, 9, 15}, {4, 5}, {2}], {1: {1, 9, 15}, 2: {2}, 4: {4, 5},
... 5: {4, 5}, 9: {1, 9, 15}, 15: {1, 9, 15}})
True
>>> classes, mapping = classify([[1,2,9,15],[2,4,5],[15,5]], sort=False)
>>> set([frozenset(c) for c in classes]) == set(
... [frozenset(s) for s in ({1, 9}, {4}, {2}, {5}, {15})])
True
>>> mapping == {1: {1, 9}, 2: {2}, 4: {4}, 5: {5}, 9: {1, 9}, 15: {15}}
True
"""
classifier = Classifier(sort=sort)
classifier.update(list_of_sets)
return classifier.getClasses(), classifier.getMapping()
if __name__ == "__main__":
import sys, doctest
sys.exit(doctest.testmod(optionflags=doctest.ELLIPSIS).failed)
import sys, doctest
sys.exit(doctest.testmod(optionflags=doctest.ELLIPSIS).failed)

View File

@ -6,7 +6,9 @@ import re
numberAddedRE = re.compile(r"#\d+$")
def makeOutputFileName(input, outputDir=None, extension=None, overWrite=False, suffix=""):
def makeOutputFileName(
input, outputDir=None, extension=None, overWrite=False, suffix=""
):
"""Generates a suitable file name for writing output.
Often tools will want to take a file, do some kind of transformation to it,
@ -44,6 +46,7 @@ def makeOutputFileName(input, outputDir=None, extension=None, overWrite=False, s
if not overWrite:
while os.path.exists(output):
output = os.path.join(
dirName, fileName + suffix + "#" + repr(n) + extension)
dirName, fileName + suffix + "#" + repr(n) + extension
)
n += 1
return output

View File

@ -10,9 +10,11 @@ We only define the symbols that we use. E.g. see fontTools.cu2qu
from types import SimpleNamespace
def _empty_decorator(x):
return x
compiled = False
for name in ("double", "complex", "int"):

View File

@ -1,7 +1,7 @@
"""Misc dict tools."""
__all__ = ['hashdict']
__all__ = ["hashdict"]
# https://stackoverflow.com/questions/1151658/python-hashable-dicts
class hashdict(dict):
@ -26,36 +26,54 @@ class hashdict(dict):
http://stackoverflow.com/questions/1151658/python-hashable-dicts
"""
def __key(self):
return tuple(sorted(self.items()))
def __repr__(self):
return "{0}({1})".format(self.__class__.__name__,
", ".join("{0}={1}".format(
str(i[0]),repr(i[1])) for i in self.__key()))
return "{0}({1})".format(
self.__class__.__name__,
", ".join("{0}={1}".format(str(i[0]), repr(i[1])) for i in self.__key()),
)
def __hash__(self):
return hash(self.__key())
def __setitem__(self, key, value):
raise TypeError("{0} does not support item assignment"
.format(self.__class__.__name__))
raise TypeError(
"{0} does not support item assignment".format(self.__class__.__name__)
)
def __delitem__(self, key):
raise TypeError("{0} does not support item assignment"
.format(self.__class__.__name__))
raise TypeError(
"{0} does not support item assignment".format(self.__class__.__name__)
)
def clear(self):
raise TypeError("{0} does not support item assignment"
.format(self.__class__.__name__))
raise TypeError(
"{0} does not support item assignment".format(self.__class__.__name__)
)
def pop(self, *args, **kwargs):
raise TypeError("{0} does not support item assignment"
.format(self.__class__.__name__))
raise TypeError(
"{0} does not support item assignment".format(self.__class__.__name__)
)
def popitem(self, *args, **kwargs):
raise TypeError("{0} does not support item assignment"
.format(self.__class__.__name__))
raise TypeError(
"{0} does not support item assignment".format(self.__class__.__name__)
)
def setdefault(self, *args, **kwargs):
raise TypeError("{0} does not support item assignment"
.format(self.__class__.__name__))
raise TypeError(
"{0} does not support item assignment".format(self.__class__.__name__)
)
def update(self, *args, **kwargs):
raise TypeError("{0} does not support item assignment"
.format(self.__class__.__name__))
raise TypeError(
"{0} does not support item assignment".format(self.__class__.__name__)
)
# update is not ok because it mutates the object
# __add__ is ok because it creates a new object
# while the new object is under construction, it's ok to mutate it
@ -63,4 +81,3 @@ class hashdict(dict):
result = hashdict(self)
dict.update(result, right)
return result

View File

@ -16,98 +16,104 @@ from fontTools.misc.textTools import bytechr, bytesjoin, byteord
def _decryptChar(cipher, R):
cipher = byteord(cipher)
plain = ( (cipher ^ (R>>8)) ) & 0xFF
R = ( (cipher + R) * 52845 + 22719 ) & 0xFFFF
return bytechr(plain), R
cipher = byteord(cipher)
plain = ((cipher ^ (R >> 8))) & 0xFF
R = ((cipher + R) * 52845 + 22719) & 0xFFFF
return bytechr(plain), R
def _encryptChar(plain, R):
plain = byteord(plain)
cipher = ( (plain ^ (R>>8)) ) & 0xFF
R = ( (cipher + R) * 52845 + 22719 ) & 0xFFFF
return bytechr(cipher), R
plain = byteord(plain)
cipher = ((plain ^ (R >> 8))) & 0xFF
R = ((cipher + R) * 52845 + 22719) & 0xFFFF
return bytechr(cipher), R
def decrypt(cipherstring, R):
r"""
Decrypts a string using the Type 1 encryption algorithm.
r"""
Decrypts a string using the Type 1 encryption algorithm.
Args:
cipherstring: String of ciphertext.
R: Initial key.
Args:
cipherstring: String of ciphertext.
R: Initial key.
Returns:
decryptedStr: Plaintext string.
R: Output key for subsequent decryptions.
Returns:
decryptedStr: Plaintext string.
R: Output key for subsequent decryptions.
Examples::
Examples::
>>> testStr = b"\0\0asdadads asds\265"
>>> decryptedStr, R = decrypt(testStr, 12321)
>>> decryptedStr == b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1'
True
>>> R == 36142
True
"""
plainList = []
for cipher in cipherstring:
plain, R = _decryptChar(cipher, R)
plainList.append(plain)
plainstring = bytesjoin(plainList)
return plainstring, int(R)
>>> testStr = b"\0\0asdadads asds\265"
>>> decryptedStr, R = decrypt(testStr, 12321)
>>> decryptedStr == b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1'
True
>>> R == 36142
True
"""
plainList = []
for cipher in cipherstring:
plain, R = _decryptChar(cipher, R)
plainList.append(plain)
plainstring = bytesjoin(plainList)
return plainstring, int(R)
def encrypt(plainstring, R):
r"""
Encrypts a string using the Type 1 encryption algorithm.
r"""
Encrypts a string using the Type 1 encryption algorithm.
Note that the algorithm as described in the Type 1 specification requires the
plaintext to be prefixed with a number of random bytes. (For ``eexec`` the
number of random bytes is set to 4.) This routine does *not* add the random
prefix to its input.
Note that the algorithm as described in the Type 1 specification requires the
plaintext to be prefixed with a number of random bytes. (For ``eexec`` the
number of random bytes is set to 4.) This routine does *not* add the random
prefix to its input.
Args:
plainstring: String of plaintext.
R: Initial key.
Args:
plainstring: String of plaintext.
R: Initial key.
Returns:
cipherstring: Ciphertext string.
R: Output key for subsequent encryptions.
Returns:
cipherstring: Ciphertext string.
R: Output key for subsequent encryptions.
Examples::
Examples::
>>> testStr = b"\0\0asdadads asds\265"
>>> decryptedStr, R = decrypt(testStr, 12321)
>>> decryptedStr == b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1'
True
>>> R == 36142
True
>>> testStr = b"\0\0asdadads asds\265"
>>> decryptedStr, R = decrypt(testStr, 12321)
>>> decryptedStr == b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1'
True
>>> R == 36142
True
>>> testStr = b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1'
>>> encryptedStr, R = encrypt(testStr, 12321)
>>> encryptedStr == b"\0\0asdadads asds\265"
True
>>> R == 36142
True
"""
cipherList = []
for plain in plainstring:
cipher, R = _encryptChar(plain, R)
cipherList.append(cipher)
cipherstring = bytesjoin(cipherList)
return cipherstring, int(R)
>>> testStr = b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1'
>>> encryptedStr, R = encrypt(testStr, 12321)
>>> encryptedStr == b"\0\0asdadads asds\265"
True
>>> R == 36142
True
"""
cipherList = []
for plain in plainstring:
cipher, R = _encryptChar(plain, R)
cipherList.append(cipher)
cipherstring = bytesjoin(cipherList)
return cipherstring, int(R)
def hexString(s):
import binascii
return binascii.hexlify(s)
import binascii
return binascii.hexlify(s)
def deHexString(h):
import binascii
h = bytesjoin(h.split())
return binascii.unhexlify(h)
import binascii
h = bytesjoin(h.split())
return binascii.unhexlify(h)
if __name__ == "__main__":
import sys
import doctest
sys.exit(doctest.testmod().failed)
import sys
import doctest
sys.exit(doctest.testmod().failed)

View File

@ -5,67 +5,68 @@ import fontTools.encodings.codecs
# Map keyed by platformID, then platEncID, then possibly langID
_encodingMap = {
0: { # Unicode
0: 'utf_16_be',
1: 'utf_16_be',
2: 'utf_16_be',
3: 'utf_16_be',
4: 'utf_16_be',
5: 'utf_16_be',
6: 'utf_16_be',
},
1: { # Macintosh
# See
# https://github.com/fonttools/fonttools/issues/236
0: { # Macintosh, platEncID==0, keyed by langID
15: "mac_iceland",
17: "mac_turkish",
18: "mac_croatian",
24: "mac_latin2",
25: "mac_latin2",
26: "mac_latin2",
27: "mac_latin2",
28: "mac_latin2",
36: "mac_latin2",
37: "mac_romanian",
38: "mac_latin2",
39: "mac_latin2",
40: "mac_latin2",
Ellipsis: 'mac_roman', # Other
},
1: 'x_mac_japanese_ttx',
2: 'x_mac_trad_chinese_ttx',
3: 'x_mac_korean_ttx',
6: 'mac_greek',
7: 'mac_cyrillic',
25: 'x_mac_simp_chinese_ttx',
29: 'mac_latin2',
35: 'mac_turkish',
37: 'mac_iceland',
},
2: { # ISO
0: 'ascii',
1: 'utf_16_be',
2: 'latin1',
},
3: { # Microsoft
0: 'utf_16_be',
1: 'utf_16_be',
2: 'shift_jis',
3: 'gb2312',
4: 'big5',
5: 'euc_kr',
6: 'johab',
10: 'utf_16_be',
},
0: { # Unicode
0: "utf_16_be",
1: "utf_16_be",
2: "utf_16_be",
3: "utf_16_be",
4: "utf_16_be",
5: "utf_16_be",
6: "utf_16_be",
},
1: { # Macintosh
# See
# https://github.com/fonttools/fonttools/issues/236
0: { # Macintosh, platEncID==0, keyed by langID
15: "mac_iceland",
17: "mac_turkish",
18: "mac_croatian",
24: "mac_latin2",
25: "mac_latin2",
26: "mac_latin2",
27: "mac_latin2",
28: "mac_latin2",
36: "mac_latin2",
37: "mac_romanian",
38: "mac_latin2",
39: "mac_latin2",
40: "mac_latin2",
Ellipsis: "mac_roman", # Other
},
1: "x_mac_japanese_ttx",
2: "x_mac_trad_chinese_ttx",
3: "x_mac_korean_ttx",
6: "mac_greek",
7: "mac_cyrillic",
25: "x_mac_simp_chinese_ttx",
29: "mac_latin2",
35: "mac_turkish",
37: "mac_iceland",
},
2: { # ISO
0: "ascii",
1: "utf_16_be",
2: "latin1",
},
3: { # Microsoft
0: "utf_16_be",
1: "utf_16_be",
2: "shift_jis",
3: "gb2312",
4: "big5",
5: "euc_kr",
6: "johab",
10: "utf_16_be",
},
}
def getEncoding(platformID, platEncID, langID, default=None):
"""Returns the Python encoding name for OpenType platformID/encodingID/langID
triplet. If encoding for these values is not known, by default None is
returned. That can be overriden by passing a value to the default argument.
"""
encoding = _encodingMap.get(platformID, {}).get(platEncID, default)
if isinstance(encoding, dict):
encoding = encoding.get(langID, encoding[Ellipsis])
return encoding
"""Returns the Python encoding name for OpenType platformID/encodingID/langID
triplet. If encoding for these values is not known, by default None is
returned. That can be overriden by passing a value to the default argument.
"""
encoding = _encodingMap.get(platformID, {}).get(platEncID, default)
if isinstance(encoding, dict):
encoding = encoding.get(langID, encoding[Ellipsis])
return encoding

View File

@ -244,7 +244,8 @@ except ImportError:
except UnicodeDecodeError:
raise ValueError(
"Bytes strings can only contain ASCII characters. "
"Use unicode strings for non-ASCII characters.")
"Use unicode strings for non-ASCII characters."
)
except AttributeError:
_raise_serialization_error(s)
if s and _invalid_xml_string.search(s):
@ -425,9 +426,7 @@ except ImportError:
write(_escape_cdata(elem.tail))
def _raise_serialization_error(text):
raise TypeError(
"cannot serialize %r (type %s)" % (text, type(text).__name__)
)
raise TypeError("cannot serialize %r (type %s)" % (text, type(text).__name__))
def _escape_cdata(text):
# escape character data

View File

@ -27,216 +27,220 @@ maxFileNameLength = 255
class NameTranslationError(Exception):
pass
pass
def userNameToFileName(userName, existing=[], prefix="", suffix=""):
"""Converts from a user name to a file name.
"""Converts from a user name to a file name.
Takes care to avoid illegal characters, reserved file names, ambiguity between
upper- and lower-case characters, and clashes with existing files.
Takes care to avoid illegal characters, reserved file names, ambiguity between
upper- and lower-case characters, and clashes with existing files.
Args:
userName (str): The input file name.
existing: A case-insensitive list of all existing file names.
prefix: Prefix to be prepended to the file name.
suffix: Suffix to be appended to the file name.
Args:
userName (str): The input file name.
existing: A case-insensitive list of all existing file names.
prefix: Prefix to be prepended to the file name.
suffix: Suffix to be appended to the file name.
Returns:
A suitable filename.
Returns:
A suitable filename.
Raises:
NameTranslationError: If no suitable name could be generated.
Raises:
NameTranslationError: If no suitable name could be generated.
Examples::
Examples::
>>> userNameToFileName("a") == "a"
True
>>> userNameToFileName("A") == "A_"
True
>>> userNameToFileName("AE") == "A_E_"
True
>>> userNameToFileName("Ae") == "A_e"
True
>>> userNameToFileName("ae") == "ae"
True
>>> userNameToFileName("aE") == "aE_"
True
>>> userNameToFileName("a.alt") == "a.alt"
True
>>> userNameToFileName("A.alt") == "A_.alt"
True
>>> userNameToFileName("A.Alt") == "A_.A_lt"
True
>>> userNameToFileName("A.aLt") == "A_.aL_t"
True
>>> userNameToFileName(u"A.alT") == "A_.alT_"
True
>>> userNameToFileName("T_H") == "T__H_"
True
>>> userNameToFileName("T_h") == "T__h"
True
>>> userNameToFileName("t_h") == "t_h"
True
>>> userNameToFileName("F_F_I") == "F__F__I_"
True
>>> userNameToFileName("f_f_i") == "f_f_i"
True
>>> userNameToFileName("Aacute_V.swash") == "A_acute_V_.swash"
True
>>> userNameToFileName(".notdef") == "_notdef"
True
>>> userNameToFileName("con") == "_con"
True
>>> userNameToFileName("CON") == "C_O_N_"
True
>>> userNameToFileName("con.alt") == "_con.alt"
True
>>> userNameToFileName("alt.con") == "alt._con"
True
"""
# the incoming name must be a str
if not isinstance(userName, str):
raise ValueError("The value for userName must be a string.")
# establish the prefix and suffix lengths
prefixLength = len(prefix)
suffixLength = len(suffix)
# replace an initial period with an _
# if no prefix is to be added
if not prefix and userName[0] == ".":
userName = "_" + userName[1:]
# filter the user name
filteredUserName = []
for character in userName:
# replace illegal characters with _
if character in illegalCharacters:
character = "_"
# add _ to all non-lower characters
elif character != character.lower():
character += "_"
filteredUserName.append(character)
userName = "".join(filteredUserName)
# clip to 255
sliceLength = maxFileNameLength - prefixLength - suffixLength
userName = userName[:sliceLength]
# test for illegal files names
parts = []
for part in userName.split("."):
if part.lower() in reservedFileNames:
part = "_" + part
parts.append(part)
userName = ".".join(parts)
# test for clash
fullName = prefix + userName + suffix
if fullName.lower() in existing:
fullName = handleClash1(userName, existing, prefix, suffix)
# finished
return fullName
>>> userNameToFileName("a") == "a"
True
>>> userNameToFileName("A") == "A_"
True
>>> userNameToFileName("AE") == "A_E_"
True
>>> userNameToFileName("Ae") == "A_e"
True
>>> userNameToFileName("ae") == "ae"
True
>>> userNameToFileName("aE") == "aE_"
True
>>> userNameToFileName("a.alt") == "a.alt"
True
>>> userNameToFileName("A.alt") == "A_.alt"
True
>>> userNameToFileName("A.Alt") == "A_.A_lt"
True
>>> userNameToFileName("A.aLt") == "A_.aL_t"
True
>>> userNameToFileName(u"A.alT") == "A_.alT_"
True
>>> userNameToFileName("T_H") == "T__H_"
True
>>> userNameToFileName("T_h") == "T__h"
True
>>> userNameToFileName("t_h") == "t_h"
True
>>> userNameToFileName("F_F_I") == "F__F__I_"
True
>>> userNameToFileName("f_f_i") == "f_f_i"
True
>>> userNameToFileName("Aacute_V.swash") == "A_acute_V_.swash"
True
>>> userNameToFileName(".notdef") == "_notdef"
True
>>> userNameToFileName("con") == "_con"
True
>>> userNameToFileName("CON") == "C_O_N_"
True
>>> userNameToFileName("con.alt") == "_con.alt"
True
>>> userNameToFileName("alt.con") == "alt._con"
True
"""
# the incoming name must be a str
if not isinstance(userName, str):
raise ValueError("The value for userName must be a string.")
# establish the prefix and suffix lengths
prefixLength = len(prefix)
suffixLength = len(suffix)
# replace an initial period with an _
# if no prefix is to be added
if not prefix and userName[0] == ".":
userName = "_" + userName[1:]
# filter the user name
filteredUserName = []
for character in userName:
# replace illegal characters with _
if character in illegalCharacters:
character = "_"
# add _ to all non-lower characters
elif character != character.lower():
character += "_"
filteredUserName.append(character)
userName = "".join(filteredUserName)
# clip to 255
sliceLength = maxFileNameLength - prefixLength - suffixLength
userName = userName[:sliceLength]
# test for illegal files names
parts = []
for part in userName.split("."):
if part.lower() in reservedFileNames:
part = "_" + part
parts.append(part)
userName = ".".join(parts)
# test for clash
fullName = prefix + userName + suffix
if fullName.lower() in existing:
fullName = handleClash1(userName, existing, prefix, suffix)
# finished
return fullName
def handleClash1(userName, existing=[], prefix="", suffix=""):
"""
existing should be a case-insensitive list
of all existing file names.
"""
existing should be a case-insensitive list
of all existing file names.
>>> prefix = ("0" * 5) + "."
>>> suffix = "." + ("0" * 10)
>>> existing = ["a" * 5]
>>> prefix = ("0" * 5) + "."
>>> suffix = "." + ("0" * 10)
>>> existing = ["a" * 5]
>>> e = list(existing)
>>> handleClash1(userName="A" * 5, existing=e,
... prefix=prefix, suffix=suffix) == (
... '00000.AAAAA000000000000001.0000000000')
True
>>> e = list(existing)
>>> handleClash1(userName="A" * 5, existing=e,
... prefix=prefix, suffix=suffix) == (
... '00000.AAAAA000000000000001.0000000000')
True
>>> e = list(existing)
>>> e.append(prefix + "aaaaa" + "1".zfill(15) + suffix)
>>> handleClash1(userName="A" * 5, existing=e,
... prefix=prefix, suffix=suffix) == (
... '00000.AAAAA000000000000002.0000000000')
True
>>> e = list(existing)
>>> e.append(prefix + "aaaaa" + "1".zfill(15) + suffix)
>>> handleClash1(userName="A" * 5, existing=e,
... prefix=prefix, suffix=suffix) == (
... '00000.AAAAA000000000000002.0000000000')
True
>>> e = list(existing)
>>> e.append(prefix + "AAAAA" + "2".zfill(15) + suffix)
>>> handleClash1(userName="A" * 5, existing=e,
... prefix=prefix, suffix=suffix) == (
... '00000.AAAAA000000000000001.0000000000')
True
"""
# if the prefix length + user name length + suffix length + 15 is at
# or past the maximum length, silce 15 characters off of the user name
prefixLength = len(prefix)
suffixLength = len(suffix)
if prefixLength + len(userName) + suffixLength + 15 > maxFileNameLength:
l = prefixLength + len(userName) + suffixLength + 15
sliceLength = maxFileNameLength - l
userName = userName[:sliceLength]
finalName = None
# try to add numbers to create a unique name
counter = 1
while finalName is None:
name = userName + str(counter).zfill(15)
fullName = prefix + name + suffix
if fullName.lower() not in existing:
finalName = fullName
break
else:
counter += 1
if counter >= 999999999999999:
break
# if there is a clash, go to the next fallback
if finalName is None:
finalName = handleClash2(existing, prefix, suffix)
# finished
return finalName
>>> e = list(existing)
>>> e.append(prefix + "AAAAA" + "2".zfill(15) + suffix)
>>> handleClash1(userName="A" * 5, existing=e,
... prefix=prefix, suffix=suffix) == (
... '00000.AAAAA000000000000001.0000000000')
True
"""
# if the prefix length + user name length + suffix length + 15 is at
# or past the maximum length, silce 15 characters off of the user name
prefixLength = len(prefix)
suffixLength = len(suffix)
if prefixLength + len(userName) + suffixLength + 15 > maxFileNameLength:
l = (prefixLength + len(userName) + suffixLength + 15)
sliceLength = maxFileNameLength - l
userName = userName[:sliceLength]
finalName = None
# try to add numbers to create a unique name
counter = 1
while finalName is None:
name = userName + str(counter).zfill(15)
fullName = prefix + name + suffix
if fullName.lower() not in existing:
finalName = fullName
break
else:
counter += 1
if counter >= 999999999999999:
break
# if there is a clash, go to the next fallback
if finalName is None:
finalName = handleClash2(existing, prefix, suffix)
# finished
return finalName
def handleClash2(existing=[], prefix="", suffix=""):
"""
existing should be a case-insensitive list
of all existing file names.
"""
existing should be a case-insensitive list
of all existing file names.
>>> prefix = ("0" * 5) + "."
>>> suffix = "." + ("0" * 10)
>>> existing = [prefix + str(i) + suffix for i in range(100)]
>>> prefix = ("0" * 5) + "."
>>> suffix = "." + ("0" * 10)
>>> existing = [prefix + str(i) + suffix for i in range(100)]
>>> e = list(existing)
>>> handleClash2(existing=e, prefix=prefix, suffix=suffix) == (
... '00000.100.0000000000')
True
>>> e = list(existing)
>>> handleClash2(existing=e, prefix=prefix, suffix=suffix) == (
... '00000.100.0000000000')
True
>>> e = list(existing)
>>> e.remove(prefix + "1" + suffix)
>>> handleClash2(existing=e, prefix=prefix, suffix=suffix) == (
... '00000.1.0000000000')
True
>>> e = list(existing)
>>> e.remove(prefix + "1" + suffix)
>>> handleClash2(existing=e, prefix=prefix, suffix=suffix) == (
... '00000.1.0000000000')
True
>>> e = list(existing)
>>> e.remove(prefix + "2" + suffix)
>>> handleClash2(existing=e, prefix=prefix, suffix=suffix) == (
... '00000.2.0000000000')
True
"""
# calculate the longest possible string
maxLength = maxFileNameLength - len(prefix) - len(suffix)
maxValue = int("9" * maxLength)
# try to find a number
finalName = None
counter = 1
while finalName is None:
fullName = prefix + str(counter) + suffix
if fullName.lower() not in existing:
finalName = fullName
break
else:
counter += 1
if counter >= maxValue:
break
# raise an error if nothing has been found
if finalName is None:
raise NameTranslationError("No unique name could be found.")
# finished
return finalName
>>> e = list(existing)
>>> e.remove(prefix + "2" + suffix)
>>> handleClash2(existing=e, prefix=prefix, suffix=suffix) == (
... '00000.2.0000000000')
True
"""
# calculate the longest possible string
maxLength = maxFileNameLength - len(prefix) - len(suffix)
maxValue = int("9" * maxLength)
# try to find a number
finalName = None
counter = 1
while finalName is None:
fullName = prefix + str(counter) + suffix
if fullName.lower() not in existing:
finalName = fullName
break
else:
counter += 1
if counter >= maxValue:
break
# raise an error if nothing has been found
if finalName is None:
raise NameTranslationError("No unique name could be found.")
# finished
return finalName
if __name__ == "__main__":
import doctest
import sys
sys.exit(doctest.testmod().failed)
import doctest
import sys
sys.exit(doctest.testmod().failed)

View File

@ -23,16 +23,16 @@ import logging
log = logging.getLogger(__name__)
__all__ = [
"MAX_F2DOT14",
"fixedToFloat",
"floatToFixed",
"floatToFixedToFloat",
"floatToFixedToStr",
"fixedToStr",
"strToFixed",
"strToFixedToFloat",
"ensureVersionIsLong",
"versionToFixed",
"MAX_F2DOT14",
"fixedToFloat",
"floatToFixed",
"floatToFixedToFloat",
"floatToFixedToStr",
"fixedToStr",
"strToFixed",
"strToFixedToFloat",
"ensureVersionIsLong",
"versionToFixed",
]
@ -40,212 +40,214 @@ MAX_F2DOT14 = 0x7FFF / (1 << 14)
def fixedToFloat(value, precisionBits):
"""Converts a fixed-point number to a float given the number of
precision bits.
"""Converts a fixed-point number to a float given the number of
precision bits.
Args:
value (int): Number in fixed-point format.
precisionBits (int): Number of precision bits.
Args:
value (int): Number in fixed-point format.
precisionBits (int): Number of precision bits.
Returns:
Floating point value.
Returns:
Floating point value.
Examples::
Examples::
>>> import math
>>> f = fixedToFloat(-10139, precisionBits=14)
>>> math.isclose(f, -0.61883544921875)
True
"""
return value / (1 << precisionBits)
>>> import math
>>> f = fixedToFloat(-10139, precisionBits=14)
>>> math.isclose(f, -0.61883544921875)
True
"""
return value / (1 << precisionBits)
def floatToFixed(value, precisionBits):
"""Converts a float to a fixed-point number given the number of
precision bits.
"""Converts a float to a fixed-point number given the number of
precision bits.
Args:
value (float): Floating point value.
precisionBits (int): Number of precision bits.
Args:
value (float): Floating point value.
precisionBits (int): Number of precision bits.
Returns:
int: Fixed-point representation.
Returns:
int: Fixed-point representation.
Examples::
Examples::
>>> floatToFixed(-0.61883544921875, precisionBits=14)
-10139
>>> floatToFixed(-0.61884, precisionBits=14)
-10139
"""
return otRound(value * (1 << precisionBits))
>>> floatToFixed(-0.61883544921875, precisionBits=14)
-10139
>>> floatToFixed(-0.61884, precisionBits=14)
-10139
"""
return otRound(value * (1 << precisionBits))
def floatToFixedToFloat(value, precisionBits):
"""Converts a float to a fixed-point number and back again.
"""Converts a float to a fixed-point number and back again.
By converting the float to fixed, rounding it, and converting it back
to float again, this returns a floating point values which is exactly
representable in fixed-point format.
By converting the float to fixed, rounding it, and converting it back
to float again, this returns a floating point values which is exactly
representable in fixed-point format.
Note: this **is** equivalent to ``fixedToFloat(floatToFixed(value))``.
Note: this **is** equivalent to ``fixedToFloat(floatToFixed(value))``.
Args:
value (float): The input floating point value.
precisionBits (int): Number of precision bits.
Args:
value (float): The input floating point value.
precisionBits (int): Number of precision bits.
Returns:
float: The transformed and rounded value.
Returns:
float: The transformed and rounded value.
Examples::
>>> import math
>>> f1 = -0.61884
>>> f2 = floatToFixedToFloat(-0.61884, precisionBits=14)
>>> f1 != f2
True
>>> math.isclose(f2, -0.61883544921875)
True
"""
scale = 1 << precisionBits
return otRound(value * scale) / scale
Examples::
>>> import math
>>> f1 = -0.61884
>>> f2 = floatToFixedToFloat(-0.61884, precisionBits=14)
>>> f1 != f2
True
>>> math.isclose(f2, -0.61883544921875)
True
"""
scale = 1 << precisionBits
return otRound(value * scale) / scale
def fixedToStr(value, precisionBits):
"""Converts a fixed-point number to a string representing a decimal float.
"""Converts a fixed-point number to a string representing a decimal float.
This chooses the float that has the shortest decimal representation (the least
number of fractional decimal digits).
This chooses the float that has the shortest decimal representation (the least
number of fractional decimal digits).
For example, to convert a fixed-point number in a 2.14 format, use
``precisionBits=14``::
For example, to convert a fixed-point number in a 2.14 format, use
``precisionBits=14``::
>>> fixedToStr(-10139, precisionBits=14)
'-0.61884'
>>> fixedToStr(-10139, precisionBits=14)
'-0.61884'
This is pretty slow compared to the simple division used in ``fixedToFloat``.
Use sporadically when you need to serialize or print the fixed-point number in
a human-readable form.
It uses nearestMultipleShortestRepr under the hood.
This is pretty slow compared to the simple division used in ``fixedToFloat``.
Use sporadically when you need to serialize or print the fixed-point number in
a human-readable form.
It uses nearestMultipleShortestRepr under the hood.
Args:
value (int): The fixed-point value to convert.
precisionBits (int): Number of precision bits, *up to a maximum of 16*.
Args:
value (int): The fixed-point value to convert.
precisionBits (int): Number of precision bits, *up to a maximum of 16*.
Returns:
str: A string representation of the value.
"""
scale = 1 << precisionBits
return nearestMultipleShortestRepr(value/scale, factor=1.0/scale)
Returns:
str: A string representation of the value.
"""
scale = 1 << precisionBits
return nearestMultipleShortestRepr(value / scale, factor=1.0 / scale)
def strToFixed(string, precisionBits):
"""Converts a string representing a decimal float to a fixed-point number.
"""Converts a string representing a decimal float to a fixed-point number.
Args:
string (str): A string representing a decimal float.
precisionBits (int): Number of precision bits, *up to a maximum of 16*.
Args:
string (str): A string representing a decimal float.
precisionBits (int): Number of precision bits, *up to a maximum of 16*.
Returns:
int: Fixed-point representation.
Returns:
int: Fixed-point representation.
Examples::
Examples::
>>> ## to convert a float string to a 2.14 fixed-point number:
>>> strToFixed('-0.61884', precisionBits=14)
-10139
"""
value = float(string)
return otRound(value * (1 << precisionBits))
>>> ## to convert a float string to a 2.14 fixed-point number:
>>> strToFixed('-0.61884', precisionBits=14)
-10139
"""
value = float(string)
return otRound(value * (1 << precisionBits))
def strToFixedToFloat(string, precisionBits):
"""Convert a string to a decimal float with fixed-point rounding.
"""Convert a string to a decimal float with fixed-point rounding.
This first converts string to a float, then turns it into a fixed-point
number with ``precisionBits`` fractional binary digits, then back to a
float again.
This first converts string to a float, then turns it into a fixed-point
number with ``precisionBits`` fractional binary digits, then back to a
float again.
This is simply a shorthand for fixedToFloat(floatToFixed(float(s))).
This is simply a shorthand for fixedToFloat(floatToFixed(float(s))).
Args:
string (str): A string representing a decimal float.
precisionBits (int): Number of precision bits.
Args:
string (str): A string representing a decimal float.
precisionBits (int): Number of precision bits.
Returns:
float: The transformed and rounded value.
Returns:
float: The transformed and rounded value.
Examples::
Examples::
>>> import math
>>> s = '-0.61884'
>>> bits = 14
>>> f = strToFixedToFloat(s, precisionBits=bits)
>>> math.isclose(f, -0.61883544921875)
True
>>> f == fixedToFloat(floatToFixed(float(s), precisionBits=bits), precisionBits=bits)
True
"""
value = float(string)
scale = 1 << precisionBits
return otRound(value * scale) / scale
>>> import math
>>> s = '-0.61884'
>>> bits = 14
>>> f = strToFixedToFloat(s, precisionBits=bits)
>>> math.isclose(f, -0.61883544921875)
True
>>> f == fixedToFloat(floatToFixed(float(s), precisionBits=bits), precisionBits=bits)
True
"""
value = float(string)
scale = 1 << precisionBits
return otRound(value * scale) / scale
def floatToFixedToStr(value, precisionBits):
"""Convert float to string with fixed-point rounding.
"""Convert float to string with fixed-point rounding.
This uses the shortest decimal representation (ie. the least
number of fractional decimal digits) to represent the equivalent
fixed-point number with ``precisionBits`` fractional binary digits.
It uses nearestMultipleShortestRepr under the hood.
This uses the shortest decimal representation (ie. the least
number of fractional decimal digits) to represent the equivalent
fixed-point number with ``precisionBits`` fractional binary digits.
It uses nearestMultipleShortestRepr under the hood.
>>> floatToFixedToStr(-0.61883544921875, precisionBits=14)
'-0.61884'
>>> floatToFixedToStr(-0.61883544921875, precisionBits=14)
'-0.61884'
Args:
value (float): The float value to convert.
precisionBits (int): Number of precision bits, *up to a maximum of 16*.
Args:
value (float): The float value to convert.
precisionBits (int): Number of precision bits, *up to a maximum of 16*.
Returns:
str: A string representation of the value.
Returns:
str: A string representation of the value.
"""
scale = 1 << precisionBits
return nearestMultipleShortestRepr(value, factor=1.0/scale)
"""
scale = 1 << precisionBits
return nearestMultipleShortestRepr(value, factor=1.0 / scale)
def ensureVersionIsLong(value):
"""Ensure a table version is an unsigned long.
"""Ensure a table version is an unsigned long.
OpenType table version numbers are expressed as a single unsigned long
comprising of an unsigned short major version and unsigned short minor
version. This function detects if the value to be used as a version number
looks too small (i.e. is less than ``0x10000``), and converts it to
fixed-point using :func:`floatToFixed` if so.
OpenType table version numbers are expressed as a single unsigned long
comprising of an unsigned short major version and unsigned short minor
version. This function detects if the value to be used as a version number
looks too small (i.e. is less than ``0x10000``), and converts it to
fixed-point using :func:`floatToFixed` if so.
Args:
value (Number): a candidate table version number.
Args:
value (Number): a candidate table version number.
Returns:
int: A table version number, possibly corrected to fixed-point.
"""
if value < 0x10000:
newValue = floatToFixed(value, 16)
log.warning(
"Table version value is a float: %.4f; "
"fix to use hex instead: 0x%08x", value, newValue)
value = newValue
return value
Returns:
int: A table version number, possibly corrected to fixed-point.
"""
if value < 0x10000:
newValue = floatToFixed(value, 16)
log.warning(
"Table version value is a float: %.4f; " "fix to use hex instead: 0x%08x",
value,
newValue,
)
value = newValue
return value
def versionToFixed(value):
"""Ensure a table version number is fixed-point.
"""Ensure a table version number is fixed-point.
Args:
value (str): a candidate table version number.
Args:
value (str): a candidate table version number.
Returns:
int: A table version number, possibly corrected to fixed-point.
"""
value = int(value, 0) if value.startswith("0") else float(value)
value = ensureVersionIsLong(value)
return value
Returns:
int: A table version number, possibly corrected to fixed-point.
"""
value = int(value, 0) if value.startswith("0") else float(value)
value = ensureVersionIsLong(value)
return value

View File

@ -13,524 +13,531 @@ TIME_LEVEL = logging.DEBUG
# per-level format strings used by the default formatter
# (the level name is not printed for INFO and DEBUG messages)
DEFAULT_FORMATS = {
"*": "%(levelname)s: %(message)s",
"INFO": "%(message)s",
"DEBUG": "%(message)s",
}
"*": "%(levelname)s: %(message)s",
"INFO": "%(message)s",
"DEBUG": "%(message)s",
}
class LevelFormatter(logging.Formatter):
"""Log formatter with level-specific formatting.
"""Log formatter with level-specific formatting.
Formatter class which optionally takes a dict of logging levels to
format strings, allowing to customise the log records appearance for
specific levels.
Formatter class which optionally takes a dict of logging levels to
format strings, allowing to customise the log records appearance for
specific levels.
Attributes:
fmt: A dictionary mapping logging levels to format strings.
The ``*`` key identifies the default format string.
datefmt: As per py:class:`logging.Formatter`
style: As per py:class:`logging.Formatter`
Attributes:
fmt: A dictionary mapping logging levels to format strings.
The ``*`` key identifies the default format string.
datefmt: As per py:class:`logging.Formatter`
style: As per py:class:`logging.Formatter`
>>> import sys
>>> handler = logging.StreamHandler(sys.stdout)
>>> formatter = LevelFormatter(
... fmt={
... '*': '[%(levelname)s] %(message)s',
... 'DEBUG': '%(name)s [%(levelname)s] %(message)s',
... 'INFO': '%(message)s',
... })
>>> handler.setFormatter(formatter)
>>> log = logging.getLogger('test')
>>> log.setLevel(logging.DEBUG)
>>> log.addHandler(handler)
>>> log.debug('this uses a custom format string')
test [DEBUG] this uses a custom format string
>>> log.info('this also uses a custom format string')
this also uses a custom format string
>>> log.warning("this one uses the default format string")
[WARNING] this one uses the default format string
"""
>>> import sys
>>> handler = logging.StreamHandler(sys.stdout)
>>> formatter = LevelFormatter(
... fmt={
... '*': '[%(levelname)s] %(message)s',
... 'DEBUG': '%(name)s [%(levelname)s] %(message)s',
... 'INFO': '%(message)s',
... })
>>> handler.setFormatter(formatter)
>>> log = logging.getLogger('test')
>>> log.setLevel(logging.DEBUG)
>>> log.addHandler(handler)
>>> log.debug('this uses a custom format string')
test [DEBUG] this uses a custom format string
>>> log.info('this also uses a custom format string')
this also uses a custom format string
>>> log.warning("this one uses the default format string")
[WARNING] this one uses the default format string
"""
def __init__(self, fmt=None, datefmt=None, style="%"):
if style != '%':
raise ValueError(
"only '%' percent style is supported in both python 2 and 3")
if fmt is None:
fmt = DEFAULT_FORMATS
if isinstance(fmt, str):
default_format = fmt
custom_formats = {}
elif isinstance(fmt, Mapping):
custom_formats = dict(fmt)
default_format = custom_formats.pop("*", None)
else:
raise TypeError('fmt must be a str or a dict of str: %r' % fmt)
super(LevelFormatter, self).__init__(default_format, datefmt)
self.default_format = self._fmt
self.custom_formats = {}
for level, fmt in custom_formats.items():
level = logging._checkLevel(level)
self.custom_formats[level] = fmt
def __init__(self, fmt=None, datefmt=None, style="%"):
if style != "%":
raise ValueError(
"only '%' percent style is supported in both python 2 and 3"
)
if fmt is None:
fmt = DEFAULT_FORMATS
if isinstance(fmt, str):
default_format = fmt
custom_formats = {}
elif isinstance(fmt, Mapping):
custom_formats = dict(fmt)
default_format = custom_formats.pop("*", None)
else:
raise TypeError("fmt must be a str or a dict of str: %r" % fmt)
super(LevelFormatter, self).__init__(default_format, datefmt)
self.default_format = self._fmt
self.custom_formats = {}
for level, fmt in custom_formats.items():
level = logging._checkLevel(level)
self.custom_formats[level] = fmt
def format(self, record):
if self.custom_formats:
fmt = self.custom_formats.get(record.levelno, self.default_format)
if self._fmt != fmt:
self._fmt = fmt
# for python >= 3.2, _style needs to be set if _fmt changes
if PercentStyle:
self._style = PercentStyle(fmt)
return super(LevelFormatter, self).format(record)
def format(self, record):
if self.custom_formats:
fmt = self.custom_formats.get(record.levelno, self.default_format)
if self._fmt != fmt:
self._fmt = fmt
# for python >= 3.2, _style needs to be set if _fmt changes
if PercentStyle:
self._style = PercentStyle(fmt)
return super(LevelFormatter, self).format(record)
def configLogger(**kwargs):
"""A more sophisticated logging system configuation manager.
"""A more sophisticated logging system configuation manager.
This is more or less the same as :py:func:`logging.basicConfig`,
with some additional options and defaults.
This is more or less the same as :py:func:`logging.basicConfig`,
with some additional options and defaults.
The default behaviour is to create a ``StreamHandler`` which writes to
sys.stderr, set a formatter using the ``DEFAULT_FORMATS`` strings, and add
the handler to the top-level library logger ("fontTools").
The default behaviour is to create a ``StreamHandler`` which writes to
sys.stderr, set a formatter using the ``DEFAULT_FORMATS`` strings, and add
the handler to the top-level library logger ("fontTools").
A number of optional keyword arguments may be specified, which can alter
the default behaviour.
A number of optional keyword arguments may be specified, which can alter
the default behaviour.
Args:
Args:
logger: Specifies the logger name or a Logger instance to be
configured. (Defaults to "fontTools" logger). Unlike ``basicConfig``,
this function can be called multiple times to reconfigure a logger.
If the logger or any of its children already exists before the call is
made, they will be reset before the new configuration is applied.
filename: Specifies that a ``FileHandler`` be created, using the
specified filename, rather than a ``StreamHandler``.
filemode: Specifies the mode to open the file, if filename is
specified. (If filemode is unspecified, it defaults to ``a``).
format: Use the specified format string for the handler. This
argument also accepts a dictionary of format strings keyed by
level name, to allow customising the records appearance for
specific levels. The special ``'*'`` key is for 'any other' level.
datefmt: Use the specified date/time format.
level: Set the logger level to the specified level.
stream: Use the specified stream to initialize the StreamHandler. Note
that this argument is incompatible with ``filename`` - if both
are present, ``stream`` is ignored.
handlers: If specified, this should be an iterable of already created
handlers, which will be added to the logger. Any handler in the
list which does not have a formatter assigned will be assigned the
formatter created in this function.
filters: If specified, this should be an iterable of already created
filters. If the ``handlers`` do not already have filters assigned,
these filters will be added to them.
propagate: All loggers have a ``propagate`` attribute which determines
whether to continue searching for handlers up the logging hierarchy.
If not provided, the "propagate" attribute will be set to ``False``.
"""
# using kwargs to enforce keyword-only arguments in py2.
handlers = kwargs.pop("handlers", None)
if handlers is None:
if "stream" in kwargs and "filename" in kwargs:
raise ValueError("'stream' and 'filename' should not be "
"specified together")
else:
if "stream" in kwargs or "filename" in kwargs:
raise ValueError("'stream' or 'filename' should not be "
"specified together with 'handlers'")
if handlers is None:
filename = kwargs.pop("filename", None)
mode = kwargs.pop("filemode", 'a')
if filename:
h = logging.FileHandler(filename, mode)
else:
stream = kwargs.pop("stream", None)
h = logging.StreamHandler(stream)
handlers = [h]
# By default, the top-level library logger is configured.
logger = kwargs.pop("logger", "fontTools")
if not logger or isinstance(logger, str):
# empty "" or None means the 'root' logger
logger = logging.getLogger(logger)
# before (re)configuring, reset named logger and its children (if exist)
_resetExistingLoggers(parent=logger.name)
# use DEFAULT_FORMATS if 'format' is None
fs = kwargs.pop("format", None)
dfs = kwargs.pop("datefmt", None)
# XXX: '%' is the only format style supported on both py2 and 3
style = kwargs.pop("style", '%')
fmt = LevelFormatter(fs, dfs, style)
filters = kwargs.pop("filters", [])
for h in handlers:
if h.formatter is None:
h.setFormatter(fmt)
if not h.filters:
for f in filters:
h.addFilter(f)
logger.addHandler(h)
if logger.name != "root":
# stop searching up the hierarchy for handlers
logger.propagate = kwargs.pop("propagate", False)
# set a custom severity level
level = kwargs.pop("level", None)
if level is not None:
logger.setLevel(level)
if kwargs:
keys = ', '.join(kwargs.keys())
raise ValueError('Unrecognised argument(s): %s' % keys)
logger: Specifies the logger name or a Logger instance to be
configured. (Defaults to "fontTools" logger). Unlike ``basicConfig``,
this function can be called multiple times to reconfigure a logger.
If the logger or any of its children already exists before the call is
made, they will be reset before the new configuration is applied.
filename: Specifies that a ``FileHandler`` be created, using the
specified filename, rather than a ``StreamHandler``.
filemode: Specifies the mode to open the file, if filename is
specified. (If filemode is unspecified, it defaults to ``a``).
format: Use the specified format string for the handler. This
argument also accepts a dictionary of format strings keyed by
level name, to allow customising the records appearance for
specific levels. The special ``'*'`` key is for 'any other' level.
datefmt: Use the specified date/time format.
level: Set the logger level to the specified level.
stream: Use the specified stream to initialize the StreamHandler. Note
that this argument is incompatible with ``filename`` - if both
are present, ``stream`` is ignored.
handlers: If specified, this should be an iterable of already created
handlers, which will be added to the logger. Any handler in the
list which does not have a formatter assigned will be assigned the
formatter created in this function.
filters: If specified, this should be an iterable of already created
filters. If the ``handlers`` do not already have filters assigned,
these filters will be added to them.
propagate: All loggers have a ``propagate`` attribute which determines
whether to continue searching for handlers up the logging hierarchy.
If not provided, the "propagate" attribute will be set to ``False``.
"""
# using kwargs to enforce keyword-only arguments in py2.
handlers = kwargs.pop("handlers", None)
if handlers is None:
if "stream" in kwargs and "filename" in kwargs:
raise ValueError(
"'stream' and 'filename' should not be " "specified together"
)
else:
if "stream" in kwargs or "filename" in kwargs:
raise ValueError(
"'stream' or 'filename' should not be "
"specified together with 'handlers'"
)
if handlers is None:
filename = kwargs.pop("filename", None)
mode = kwargs.pop("filemode", "a")
if filename:
h = logging.FileHandler(filename, mode)
else:
stream = kwargs.pop("stream", None)
h = logging.StreamHandler(stream)
handlers = [h]
# By default, the top-level library logger is configured.
logger = kwargs.pop("logger", "fontTools")
if not logger or isinstance(logger, str):
# empty "" or None means the 'root' logger
logger = logging.getLogger(logger)
# before (re)configuring, reset named logger and its children (if exist)
_resetExistingLoggers(parent=logger.name)
# use DEFAULT_FORMATS if 'format' is None
fs = kwargs.pop("format", None)
dfs = kwargs.pop("datefmt", None)
# XXX: '%' is the only format style supported on both py2 and 3
style = kwargs.pop("style", "%")
fmt = LevelFormatter(fs, dfs, style)
filters = kwargs.pop("filters", [])
for h in handlers:
if h.formatter is None:
h.setFormatter(fmt)
if not h.filters:
for f in filters:
h.addFilter(f)
logger.addHandler(h)
if logger.name != "root":
# stop searching up the hierarchy for handlers
logger.propagate = kwargs.pop("propagate", False)
# set a custom severity level
level = kwargs.pop("level", None)
if level is not None:
logger.setLevel(level)
if kwargs:
keys = ", ".join(kwargs.keys())
raise ValueError("Unrecognised argument(s): %s" % keys)
def _resetExistingLoggers(parent="root"):
""" Reset the logger named 'parent' and all its children to their initial
state, if they already exist in the current configuration.
"""
root = logging.root
# get sorted list of all existing loggers
existing = sorted(root.manager.loggerDict.keys())
if parent == "root":
# all the existing loggers are children of 'root'
loggers_to_reset = [parent] + existing
elif parent not in existing:
# nothing to do
return
elif parent in existing:
loggers_to_reset = [parent]
# collect children, starting with the entry after parent name
i = existing.index(parent) + 1
prefixed = parent + "."
pflen = len(prefixed)
num_existing = len(existing)
while i < num_existing:
if existing[i][:pflen] == prefixed:
loggers_to_reset.append(existing[i])
i += 1
for name in loggers_to_reset:
if name == "root":
root.setLevel(logging.WARNING)
for h in root.handlers[:]:
root.removeHandler(h)
for f in root.filters[:]:
root.removeFilters(f)
root.disabled = False
else:
logger = root.manager.loggerDict[name]
logger.level = logging.NOTSET
logger.handlers = []
logger.filters = []
logger.propagate = True
logger.disabled = False
"""Reset the logger named 'parent' and all its children to their initial
state, if they already exist in the current configuration.
"""
root = logging.root
# get sorted list of all existing loggers
existing = sorted(root.manager.loggerDict.keys())
if parent == "root":
# all the existing loggers are children of 'root'
loggers_to_reset = [parent] + existing
elif parent not in existing:
# nothing to do
return
elif parent in existing:
loggers_to_reset = [parent]
# collect children, starting with the entry after parent name
i = existing.index(parent) + 1
prefixed = parent + "."
pflen = len(prefixed)
num_existing = len(existing)
while i < num_existing:
if existing[i][:pflen] == prefixed:
loggers_to_reset.append(existing[i])
i += 1
for name in loggers_to_reset:
if name == "root":
root.setLevel(logging.WARNING)
for h in root.handlers[:]:
root.removeHandler(h)
for f in root.filters[:]:
root.removeFilters(f)
root.disabled = False
else:
logger = root.manager.loggerDict[name]
logger.level = logging.NOTSET
logger.handlers = []
logger.filters = []
logger.propagate = True
logger.disabled = False
class Timer(object):
""" Keeps track of overall time and split/lap times.
"""Keeps track of overall time and split/lap times.
>>> import time
>>> timer = Timer()
>>> time.sleep(0.01)
>>> print("First lap:", timer.split())
First lap: ...
>>> time.sleep(0.02)
>>> print("Second lap:", timer.split())
Second lap: ...
>>> print("Overall time:", timer.time())
Overall time: ...
>>> import time
>>> timer = Timer()
>>> time.sleep(0.01)
>>> print("First lap:", timer.split())
First lap: ...
>>> time.sleep(0.02)
>>> print("Second lap:", timer.split())
Second lap: ...
>>> print("Overall time:", timer.time())
Overall time: ...
Can be used as a context manager inside with-statements.
Can be used as a context manager inside with-statements.
>>> with Timer() as t:
... time.sleep(0.01)
>>> print("%0.3f seconds" % t.elapsed)
0... seconds
>>> with Timer() as t:
... time.sleep(0.01)
>>> print("%0.3f seconds" % t.elapsed)
0... seconds
If initialised with a logger, it can log the elapsed time automatically
upon exiting the with-statement.
If initialised with a logger, it can log the elapsed time automatically
upon exiting the with-statement.
>>> import logging
>>> log = logging.getLogger("my-fancy-timer-logger")
>>> configLogger(logger=log, level="DEBUG", format="%(message)s", stream=sys.stdout)
>>> with Timer(log, 'do something'):
... time.sleep(0.01)
Took ... to do something
>>> import logging
>>> log = logging.getLogger("my-fancy-timer-logger")
>>> configLogger(logger=log, level="DEBUG", format="%(message)s", stream=sys.stdout)
>>> with Timer(log, 'do something'):
... time.sleep(0.01)
Took ... to do something
The same Timer instance, holding a reference to a logger, can be reused
in multiple with-statements, optionally with different messages or levels.
The same Timer instance, holding a reference to a logger, can be reused
in multiple with-statements, optionally with different messages or levels.
>>> timer = Timer(log)
>>> with timer():
... time.sleep(0.01)
elapsed time: ...s
>>> with timer('redo it', level=logging.INFO):
... time.sleep(0.02)
Took ... to redo it
>>> timer = Timer(log)
>>> with timer():
... time.sleep(0.01)
elapsed time: ...s
>>> with timer('redo it', level=logging.INFO):
... time.sleep(0.02)
Took ... to redo it
It can also be used as a function decorator to log the time elapsed to run
the decorated function.
It can also be used as a function decorator to log the time elapsed to run
the decorated function.
>>> @timer()
... def test1():
... time.sleep(0.01)
>>> @timer('run test 2', level=logging.INFO)
... def test2():
... time.sleep(0.02)
>>> test1()
Took ... to run 'test1'
>>> test2()
Took ... to run test 2
"""
>>> @timer()
... def test1():
... time.sleep(0.01)
>>> @timer('run test 2', level=logging.INFO)
... def test2():
... time.sleep(0.02)
>>> test1()
Took ... to run 'test1'
>>> test2()
Took ... to run test 2
"""
# timeit.default_timer choses the most accurate clock for each platform
_time = timeit.default_timer
default_msg = "elapsed time: %(time).3fs"
default_format = "Took %(time).3fs to %(msg)s"
# timeit.default_timer choses the most accurate clock for each platform
_time = timeit.default_timer
default_msg = "elapsed time: %(time).3fs"
default_format = "Took %(time).3fs to %(msg)s"
def __init__(self, logger=None, msg=None, level=None, start=None):
self.reset(start)
if logger is None:
for arg in ('msg', 'level'):
if locals().get(arg) is not None:
raise ValueError(
"'%s' can't be specified without a 'logger'" % arg)
self.logger = logger
self.level = level if level is not None else TIME_LEVEL
self.msg = msg
def __init__(self, logger=None, msg=None, level=None, start=None):
self.reset(start)
if logger is None:
for arg in ("msg", "level"):
if locals().get(arg) is not None:
raise ValueError("'%s' can't be specified without a 'logger'" % arg)
self.logger = logger
self.level = level if level is not None else TIME_LEVEL
self.msg = msg
def reset(self, start=None):
""" Reset timer to 'start_time' or the current time. """
if start is None:
self.start = self._time()
else:
self.start = start
self.last = self.start
self.elapsed = 0.0
def reset(self, start=None):
"""Reset timer to 'start_time' or the current time."""
if start is None:
self.start = self._time()
else:
self.start = start
self.last = self.start
self.elapsed = 0.0
def time(self):
""" Return the overall time (in seconds) since the timer started. """
return self._time() - self.start
def time(self):
"""Return the overall time (in seconds) since the timer started."""
return self._time() - self.start
def split(self):
""" Split and return the lap time (in seconds) in between splits. """
current = self._time()
self.elapsed = current - self.last
self.last = current
return self.elapsed
def split(self):
"""Split and return the lap time (in seconds) in between splits."""
current = self._time()
self.elapsed = current - self.last
self.last = current
return self.elapsed
def formatTime(self, msg, time):
""" Format 'time' value in 'msg' and return formatted string.
If 'msg' contains a '%(time)' format string, try to use that.
Otherwise, use the predefined 'default_format'.
If 'msg' is empty or None, fall back to 'default_msg'.
"""
if not msg:
msg = self.default_msg
if msg.find("%(time)") < 0:
msg = self.default_format % {"msg": msg, "time": time}
else:
try:
msg = msg % {"time": time}
except (KeyError, ValueError):
pass # skip if the format string is malformed
return msg
def formatTime(self, msg, time):
"""Format 'time' value in 'msg' and return formatted string.
If 'msg' contains a '%(time)' format string, try to use that.
Otherwise, use the predefined 'default_format'.
If 'msg' is empty or None, fall back to 'default_msg'.
"""
if not msg:
msg = self.default_msg
if msg.find("%(time)") < 0:
msg = self.default_format % {"msg": msg, "time": time}
else:
try:
msg = msg % {"time": time}
except (KeyError, ValueError):
pass # skip if the format string is malformed
return msg
def __enter__(self):
""" Start a new lap """
self.last = self._time()
self.elapsed = 0.0
return self
def __enter__(self):
"""Start a new lap"""
self.last = self._time()
self.elapsed = 0.0
return self
def __exit__(self, exc_type, exc_value, traceback):
""" End the current lap. If timer has a logger, log the time elapsed,
using the format string in self.msg (or the default one).
"""
time = self.split()
if self.logger is None or exc_type:
# if there's no logger attached, or if any exception occurred in
# the with-statement, exit without logging the time
return
message = self.formatTime(self.msg, time)
# Allow log handlers to see the individual parts to facilitate things
# like a server accumulating aggregate stats.
msg_parts = { 'msg': self.msg, 'time': time }
self.logger.log(self.level, message, msg_parts)
def __exit__(self, exc_type, exc_value, traceback):
"""End the current lap. If timer has a logger, log the time elapsed,
using the format string in self.msg (or the default one).
"""
time = self.split()
if self.logger is None or exc_type:
# if there's no logger attached, or if any exception occurred in
# the with-statement, exit without logging the time
return
message = self.formatTime(self.msg, time)
# Allow log handlers to see the individual parts to facilitate things
# like a server accumulating aggregate stats.
msg_parts = {"msg": self.msg, "time": time}
self.logger.log(self.level, message, msg_parts)
def __call__(self, func_or_msg=None, **kwargs):
""" If the first argument is a function, return a decorator which runs
the wrapped function inside Timer's context manager.
Otherwise, treat the first argument as a 'msg' string and return an updated
Timer instance, referencing the same logger.
A 'level' keyword can also be passed to override self.level.
"""
if isinstance(func_or_msg, Callable):
func = func_or_msg
# use the function name when no explicit 'msg' is provided
if not self.msg:
self.msg = "run '%s'" % func.__name__
def __call__(self, func_or_msg=None, **kwargs):
"""If the first argument is a function, return a decorator which runs
the wrapped function inside Timer's context manager.
Otherwise, treat the first argument as a 'msg' string and return an updated
Timer instance, referencing the same logger.
A 'level' keyword can also be passed to override self.level.
"""
if isinstance(func_or_msg, Callable):
func = func_or_msg
# use the function name when no explicit 'msg' is provided
if not self.msg:
self.msg = "run '%s'" % func.__name__
@wraps(func)
def wrapper(*args, **kwds):
with self:
return func(*args, **kwds)
return wrapper
else:
msg = func_or_msg or kwargs.get("msg")
level = kwargs.get("level", self.level)
return self.__class__(self.logger, msg, level)
@wraps(func)
def wrapper(*args, **kwds):
with self:
return func(*args, **kwds)
def __float__(self):
return self.elapsed
return wrapper
else:
msg = func_or_msg or kwargs.get("msg")
level = kwargs.get("level", self.level)
return self.__class__(self.logger, msg, level)
def __int__(self):
return int(self.elapsed)
def __float__(self):
return self.elapsed
def __str__(self):
return "%.3f" % self.elapsed
def __int__(self):
return int(self.elapsed)
def __str__(self):
return "%.3f" % self.elapsed
class ChannelsFilter(logging.Filter):
"""Provides a hierarchical filter for log entries based on channel names.
"""Provides a hierarchical filter for log entries based on channel names.
Filters out records emitted from a list of enabled channel names,
including their children. It works the same as the ``logging.Filter``
class, but allows the user to specify multiple channel names.
Filters out records emitted from a list of enabled channel names,
including their children. It works the same as the ``logging.Filter``
class, but allows the user to specify multiple channel names.
>>> import sys
>>> handler = logging.StreamHandler(sys.stdout)
>>> handler.setFormatter(logging.Formatter("%(message)s"))
>>> filter = ChannelsFilter("A.B", "C.D")
>>> handler.addFilter(filter)
>>> root = logging.getLogger()
>>> root.addHandler(handler)
>>> root.setLevel(level=logging.DEBUG)
>>> logging.getLogger('A.B').debug('this record passes through')
this record passes through
>>> logging.getLogger('A.B.C').debug('records from children also pass')
records from children also pass
>>> logging.getLogger('C.D').debug('this one as well')
this one as well
>>> logging.getLogger('A.B.').debug('also this one')
also this one
>>> logging.getLogger('A.F').debug('but this one does not!')
>>> logging.getLogger('C.DE').debug('neither this one!')
"""
>>> import sys
>>> handler = logging.StreamHandler(sys.stdout)
>>> handler.setFormatter(logging.Formatter("%(message)s"))
>>> filter = ChannelsFilter("A.B", "C.D")
>>> handler.addFilter(filter)
>>> root = logging.getLogger()
>>> root.addHandler(handler)
>>> root.setLevel(level=logging.DEBUG)
>>> logging.getLogger('A.B').debug('this record passes through')
this record passes through
>>> logging.getLogger('A.B.C').debug('records from children also pass')
records from children also pass
>>> logging.getLogger('C.D').debug('this one as well')
this one as well
>>> logging.getLogger('A.B.').debug('also this one')
also this one
>>> logging.getLogger('A.F').debug('but this one does not!')
>>> logging.getLogger('C.DE').debug('neither this one!')
"""
def __init__(self, *names):
self.names = names
self.num = len(names)
self.lengths = {n: len(n) for n in names}
def __init__(self, *names):
self.names = names
self.num = len(names)
self.lengths = {n: len(n) for n in names}
def filter(self, record):
if self.num == 0:
return True
for name in self.names:
nlen = self.lengths[name]
if name == record.name:
return True
elif (record.name.find(name, 0, nlen) == 0
and record.name[nlen] == "."):
return True
return False
def filter(self, record):
if self.num == 0:
return True
for name in self.names:
nlen = self.lengths[name]
if name == record.name:
return True
elif record.name.find(name, 0, nlen) == 0 and record.name[nlen] == ".":
return True
return False
class CapturingLogHandler(logging.Handler):
def __init__(self, logger, level):
super(CapturingLogHandler, self).__init__(level=level)
self.records = []
if isinstance(logger, str):
self.logger = logging.getLogger(logger)
else:
self.logger = logger
def __init__(self, logger, level):
super(CapturingLogHandler, self).__init__(level=level)
self.records = []
if isinstance(logger, str):
self.logger = logging.getLogger(logger)
else:
self.logger = logger
def __enter__(self):
self.original_disabled = self.logger.disabled
self.original_level = self.logger.level
self.original_propagate = self.logger.propagate
def __enter__(self):
self.original_disabled = self.logger.disabled
self.original_level = self.logger.level
self.original_propagate = self.logger.propagate
self.logger.addHandler(self)
self.logger.setLevel(self.level)
self.logger.disabled = False
self.logger.propagate = False
self.logger.addHandler(self)
self.logger.setLevel(self.level)
self.logger.disabled = False
self.logger.propagate = False
return self
return self
def __exit__(self, type, value, traceback):
self.logger.removeHandler(self)
self.logger.setLevel(self.original_level)
self.logger.disabled = self.original_disabled
self.logger.propagate = self.original_propagate
def __exit__(self, type, value, traceback):
self.logger.removeHandler(self)
self.logger.setLevel(self.original_level)
self.logger.disabled = self.original_disabled
self.logger.propagate = self.original_propagate
return self
return self
def emit(self, record):
self.records.append(record)
def emit(self, record):
self.records.append(record)
def assertRegex(self, regexp, msg=None):
import re
pattern = re.compile(regexp)
for r in self.records:
if pattern.search(r.getMessage()):
return True
if msg is None:
msg = "Pattern '%s' not found in logger records" % regexp
assert 0, msg
def assertRegex(self, regexp, msg=None):
import re
pattern = re.compile(regexp)
for r in self.records:
if pattern.search(r.getMessage()):
return True
if msg is None:
msg = "Pattern '%s' not found in logger records" % regexp
assert 0, msg
class LogMixin(object):
""" Mixin class that adds logging functionality to another class.
"""Mixin class that adds logging functionality to another class.
You can define a new class that subclasses from ``LogMixin`` as well as
other base classes through multiple inheritance.
All instances of that class will have a ``log`` property that returns
a ``logging.Logger`` named after their respective ``<module>.<class>``.
You can define a new class that subclasses from ``LogMixin`` as well as
other base classes through multiple inheritance.
All instances of that class will have a ``log`` property that returns
a ``logging.Logger`` named after their respective ``<module>.<class>``.
For example:
For example:
>>> class BaseClass(object):
... pass
>>> class MyClass(LogMixin, BaseClass):
... pass
>>> a = MyClass()
>>> isinstance(a.log, logging.Logger)
True
>>> print(a.log.name)
fontTools.misc.loggingTools.MyClass
>>> class AnotherClass(MyClass):
... pass
>>> b = AnotherClass()
>>> isinstance(b.log, logging.Logger)
True
>>> print(b.log.name)
fontTools.misc.loggingTools.AnotherClass
"""
>>> class BaseClass(object):
... pass
>>> class MyClass(LogMixin, BaseClass):
... pass
>>> a = MyClass()
>>> isinstance(a.log, logging.Logger)
True
>>> print(a.log.name)
fontTools.misc.loggingTools.MyClass
>>> class AnotherClass(MyClass):
... pass
>>> b = AnotherClass()
>>> isinstance(b.log, logging.Logger)
True
>>> print(b.log.name)
fontTools.misc.loggingTools.AnotherClass
"""
@property
def log(self):
if not hasattr(self, "_log"):
name = ".".join(
(self.__class__.__module__, self.__class__.__name__)
)
self._log = logging.getLogger(name)
return self._log
@property
def log(self):
if not hasattr(self, "_log"):
name = ".".join((self.__class__.__module__, self.__class__.__name__))
self._log = logging.getLogger(name)
return self._log
def deprecateArgument(name, msg, category=UserWarning):
""" Raise a warning about deprecated function argument 'name'. """
warnings.warn(
"%r is deprecated; %s" % (name, msg), category=category, stacklevel=3)
"""Raise a warning about deprecated function argument 'name'."""
warnings.warn("%r is deprecated; %s" % (name, msg), category=category, stacklevel=3)
def deprecateFunction(msg, category=UserWarning):
""" Decorator to raise a warning when a deprecated function is called. """
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
warnings.warn(
"%r is deprecated; %s" % (func.__name__, msg),
category=category, stacklevel=2)
return func(*args, **kwargs)
return wrapper
return decorator
"""Decorator to raise a warning when a deprecated function is called."""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
warnings.warn(
"%r is deprecated; %s" % (func.__name__, msg),
category=category,
stacklevel=2,
)
return func(*args, **kwargs)
return wrapper
return decorator
if __name__ == "__main__":
import doctest
sys.exit(doctest.testmod(optionflags=doctest.ELLIPSIS).failed)
import doctest
sys.exit(doctest.testmod(optionflags=doctest.ELLIPSIS).failed)

View File

@ -1,54 +1,56 @@
from fontTools.misc.textTools import Tag, bytesjoin, strjoin
try:
import xattr
import xattr
except ImportError:
xattr = None
xattr = None
def _reverseString(s):
s = list(s)
s.reverse()
return strjoin(s)
s = list(s)
s.reverse()
return strjoin(s)
def getMacCreatorAndType(path):
"""Returns file creator and file type codes for a path.
"""Returns file creator and file type codes for a path.
Args:
path (str): A file path.
Args:
path (str): A file path.
Returns:
A tuple of two :py:class:`fontTools.textTools.Tag` objects, the first
representing the file creator and the second representing the
file type.
"""
if xattr is not None:
try:
finderInfo = xattr.getxattr(path, 'com.apple.FinderInfo')
except (KeyError, IOError):
pass
else:
fileType = Tag(finderInfo[:4])
fileCreator = Tag(finderInfo[4:8])
return fileCreator, fileType
return None, None
Returns:
A tuple of two :py:class:`fontTools.textTools.Tag` objects, the first
representing the file creator and the second representing the
file type.
"""
if xattr is not None:
try:
finderInfo = xattr.getxattr(path, "com.apple.FinderInfo")
except (KeyError, IOError):
pass
else:
fileType = Tag(finderInfo[:4])
fileCreator = Tag(finderInfo[4:8])
return fileCreator, fileType
return None, None
def setMacCreatorAndType(path, fileCreator, fileType):
"""Set file creator and file type codes for a path.
"""Set file creator and file type codes for a path.
Note that if the ``xattr`` module is not installed, no action is
taken but no error is raised.
Note that if the ``xattr`` module is not installed, no action is
taken but no error is raised.
Args:
path (str): A file path.
fileCreator: A four-character file creator tag.
fileType: A four-character file type tag.
Args:
path (str): A file path.
fileCreator: A four-character file creator tag.
fileType: A four-character file type tag.
"""
if xattr is not None:
from fontTools.misc.textTools import pad
if not all(len(s) == 4 for s in (fileCreator, fileType)):
raise TypeError('arg must be string of 4 chars')
finderInfo = pad(bytesjoin([fileType, fileCreator]), 32)
xattr.setxattr(path, 'com.apple.FinderInfo', finderInfo)
"""
if xattr is not None:
from fontTools.misc.textTools import pad
if not all(len(s) == 4 for s in (fileCreator, fileType)):
raise TypeError("arg must be string of 4 chars")
finderInfo = pad(bytesjoin([fileType, fileCreator]), 32)
xattr.setxattr(path, "com.apple.FinderInfo", finderInfo)

View File

@ -7,216 +7,218 @@ from collections.abc import MutableMapping
class ResourceError(Exception):
pass
pass
class ResourceReader(MutableMapping):
"""Reader for Mac OS resource forks.
"""Reader for Mac OS resource forks.
Parses a resource fork and returns resources according to their type.
If run on OS X, this will open the resource fork in the filesystem.
Otherwise, it will open the file itself and attempt to read it as
though it were a resource fork.
Parses a resource fork and returns resources according to their type.
If run on OS X, this will open the resource fork in the filesystem.
Otherwise, it will open the file itself and attempt to read it as
though it were a resource fork.
The returned object can be indexed by type and iterated over,
returning in each case a list of py:class:`Resource` objects
representing all the resources of a certain type.
The returned object can be indexed by type and iterated over,
returning in each case a list of py:class:`Resource` objects
representing all the resources of a certain type.
"""
def __init__(self, fileOrPath):
"""Open a file
"""
Args:
fileOrPath: Either an object supporting a ``read`` method, an
``os.PathLike`` object, or a string.
"""
self._resources = OrderedDict()
if hasattr(fileOrPath, 'read'):
self.file = fileOrPath
else:
try:
# try reading from the resource fork (only works on OS X)
self.file = self.openResourceFork(fileOrPath)
self._readFile()
return
except (ResourceError, IOError):
# if it fails, use the data fork
self.file = self.openDataFork(fileOrPath)
self._readFile()
def __init__(self, fileOrPath):
"""Open a file
@staticmethod
def openResourceFork(path):
if hasattr(path, "__fspath__"): # support os.PathLike objects
path = path.__fspath__()
with open(path + '/..namedfork/rsrc', 'rb') as resfork:
data = resfork.read()
infile = BytesIO(data)
infile.name = path
return infile
Args:
fileOrPath: Either an object supporting a ``read`` method, an
``os.PathLike`` object, or a string.
"""
self._resources = OrderedDict()
if hasattr(fileOrPath, "read"):
self.file = fileOrPath
else:
try:
# try reading from the resource fork (only works on OS X)
self.file = self.openResourceFork(fileOrPath)
self._readFile()
return
except (ResourceError, IOError):
# if it fails, use the data fork
self.file = self.openDataFork(fileOrPath)
self._readFile()
@staticmethod
def openDataFork(path):
with open(path, 'rb') as datafork:
data = datafork.read()
infile = BytesIO(data)
infile.name = path
return infile
@staticmethod
def openResourceFork(path):
if hasattr(path, "__fspath__"): # support os.PathLike objects
path = path.__fspath__()
with open(path + "/..namedfork/rsrc", "rb") as resfork:
data = resfork.read()
infile = BytesIO(data)
infile.name = path
return infile
def _readFile(self):
self._readHeaderAndMap()
self._readTypeList()
@staticmethod
def openDataFork(path):
with open(path, "rb") as datafork:
data = datafork.read()
infile = BytesIO(data)
infile.name = path
return infile
def _read(self, numBytes, offset=None):
if offset is not None:
try:
self.file.seek(offset)
except OverflowError:
raise ResourceError("Failed to seek offset ('offset' is too large)")
if self.file.tell() != offset:
raise ResourceError('Failed to seek offset (reached EOF)')
try:
data = self.file.read(numBytes)
except OverflowError:
raise ResourceError("Cannot read resource ('numBytes' is too large)")
if len(data) != numBytes:
raise ResourceError('Cannot read resource (not enough data)')
return data
def _readFile(self):
self._readHeaderAndMap()
self._readTypeList()
def _readHeaderAndMap(self):
self.file.seek(0)
headerData = self._read(ResourceForkHeaderSize)
sstruct.unpack(ResourceForkHeader, headerData, self)
# seek to resource map, skip reserved
mapOffset = self.mapOffset + 22
resourceMapData = self._read(ResourceMapHeaderSize, mapOffset)
sstruct.unpack(ResourceMapHeader, resourceMapData, self)
self.absTypeListOffset = self.mapOffset + self.typeListOffset
self.absNameListOffset = self.mapOffset + self.nameListOffset
def _read(self, numBytes, offset=None):
if offset is not None:
try:
self.file.seek(offset)
except OverflowError:
raise ResourceError("Failed to seek offset ('offset' is too large)")
if self.file.tell() != offset:
raise ResourceError("Failed to seek offset (reached EOF)")
try:
data = self.file.read(numBytes)
except OverflowError:
raise ResourceError("Cannot read resource ('numBytes' is too large)")
if len(data) != numBytes:
raise ResourceError("Cannot read resource (not enough data)")
return data
def _readTypeList(self):
absTypeListOffset = self.absTypeListOffset
numTypesData = self._read(2, absTypeListOffset)
self.numTypes, = struct.unpack('>H', numTypesData)
absTypeListOffset2 = absTypeListOffset + 2
for i in range(self.numTypes + 1):
resTypeItemOffset = absTypeListOffset2 + ResourceTypeItemSize * i
resTypeItemData = self._read(ResourceTypeItemSize, resTypeItemOffset)
item = sstruct.unpack(ResourceTypeItem, resTypeItemData)
resType = tostr(item['type'], encoding='mac-roman')
refListOffset = absTypeListOffset + item['refListOffset']
numRes = item['numRes'] + 1
resources = self._readReferenceList(resType, refListOffset, numRes)
self._resources[resType] = resources
def _readHeaderAndMap(self):
self.file.seek(0)
headerData = self._read(ResourceForkHeaderSize)
sstruct.unpack(ResourceForkHeader, headerData, self)
# seek to resource map, skip reserved
mapOffset = self.mapOffset + 22
resourceMapData = self._read(ResourceMapHeaderSize, mapOffset)
sstruct.unpack(ResourceMapHeader, resourceMapData, self)
self.absTypeListOffset = self.mapOffset + self.typeListOffset
self.absNameListOffset = self.mapOffset + self.nameListOffset
def _readReferenceList(self, resType, refListOffset, numRes):
resources = []
for i in range(numRes):
refOffset = refListOffset + ResourceRefItemSize * i
refData = self._read(ResourceRefItemSize, refOffset)
res = Resource(resType)
res.decompile(refData, self)
resources.append(res)
return resources
def _readTypeList(self):
absTypeListOffset = self.absTypeListOffset
numTypesData = self._read(2, absTypeListOffset)
(self.numTypes,) = struct.unpack(">H", numTypesData)
absTypeListOffset2 = absTypeListOffset + 2
for i in range(self.numTypes + 1):
resTypeItemOffset = absTypeListOffset2 + ResourceTypeItemSize * i
resTypeItemData = self._read(ResourceTypeItemSize, resTypeItemOffset)
item = sstruct.unpack(ResourceTypeItem, resTypeItemData)
resType = tostr(item["type"], encoding="mac-roman")
refListOffset = absTypeListOffset + item["refListOffset"]
numRes = item["numRes"] + 1
resources = self._readReferenceList(resType, refListOffset, numRes)
self._resources[resType] = resources
def __getitem__(self, resType):
return self._resources[resType]
def _readReferenceList(self, resType, refListOffset, numRes):
resources = []
for i in range(numRes):
refOffset = refListOffset + ResourceRefItemSize * i
refData = self._read(ResourceRefItemSize, refOffset)
res = Resource(resType)
res.decompile(refData, self)
resources.append(res)
return resources
def __delitem__(self, resType):
del self._resources[resType]
def __getitem__(self, resType):
return self._resources[resType]
def __setitem__(self, resType, resources):
self._resources[resType] = resources
def __delitem__(self, resType):
del self._resources[resType]
def __len__(self):
return len(self._resources)
def __setitem__(self, resType, resources):
self._resources[resType] = resources
def __iter__(self):
return iter(self._resources)
def __len__(self):
return len(self._resources)
def keys(self):
return self._resources.keys()
def __iter__(self):
return iter(self._resources)
@property
def types(self):
"""A list of the types of resources in the resource fork."""
return list(self._resources.keys())
def keys(self):
return self._resources.keys()
def countResources(self, resType):
"""Return the number of resources of a given type."""
try:
return len(self[resType])
except KeyError:
return 0
@property
def types(self):
"""A list of the types of resources in the resource fork."""
return list(self._resources.keys())
def getIndices(self, resType):
"""Returns a list of indices of resources of a given type."""
numRes = self.countResources(resType)
if numRes:
return list(range(1, numRes+1))
else:
return []
def countResources(self, resType):
"""Return the number of resources of a given type."""
try:
return len(self[resType])
except KeyError:
return 0
def getNames(self, resType):
"""Return list of names of all resources of a given type."""
return [res.name for res in self.get(resType, []) if res.name is not None]
def getIndices(self, resType):
"""Returns a list of indices of resources of a given type."""
numRes = self.countResources(resType)
if numRes:
return list(range(1, numRes + 1))
else:
return []
def getIndResource(self, resType, index):
"""Return resource of given type located at an index ranging from 1
to the number of resources for that type, or None if not found.
"""
if index < 1:
return None
try:
res = self[resType][index-1]
except (KeyError, IndexError):
return None
return res
def getNames(self, resType):
"""Return list of names of all resources of a given type."""
return [res.name for res in self.get(resType, []) if res.name is not None]
def getNamedResource(self, resType, name):
"""Return the named resource of given type, else return None."""
name = tostr(name, encoding='mac-roman')
for res in self.get(resType, []):
if res.name == name:
return res
return None
def getIndResource(self, resType, index):
"""Return resource of given type located at an index ranging from 1
to the number of resources for that type, or None if not found.
"""
if index < 1:
return None
try:
res = self[resType][index - 1]
except (KeyError, IndexError):
return None
return res
def close(self):
if not self.file.closed:
self.file.close()
def getNamedResource(self, resType, name):
"""Return the named resource of given type, else return None."""
name = tostr(name, encoding="mac-roman")
for res in self.get(resType, []):
if res.name == name:
return res
return None
def close(self):
if not self.file.closed:
self.file.close()
class Resource(object):
"""Represents a resource stored within a resource fork.
"""Represents a resource stored within a resource fork.
Attributes:
type: resource type.
data: resource data.
id: ID.
name: resource name.
attr: attributes.
"""
Attributes:
type: resource type.
data: resource data.
id: ID.
name: resource name.
attr: attributes.
"""
def __init__(self, resType=None, resData=None, resID=None, resName=None,
resAttr=None):
self.type = resType
self.data = resData
self.id = resID
self.name = resName
self.attr = resAttr
def __init__(
self, resType=None, resData=None, resID=None, resName=None, resAttr=None
):
self.type = resType
self.data = resData
self.id = resID
self.name = resName
self.attr = resAttr
def decompile(self, refData, reader):
sstruct.unpack(ResourceRefItem, refData, self)
# interpret 3-byte dataOffset as (padded) ULONG to unpack it with struct
self.dataOffset, = struct.unpack('>L', bytesjoin([b"\0", self.dataOffset]))
absDataOffset = reader.dataOffset + self.dataOffset
dataLength, = struct.unpack(">L", reader._read(4, absDataOffset))
self.data = reader._read(dataLength)
if self.nameOffset == -1:
return
absNameOffset = reader.absNameListOffset + self.nameOffset
nameLength, = struct.unpack('B', reader._read(1, absNameOffset))
name, = struct.unpack('>%ss' % nameLength, reader._read(nameLength))
self.name = tostr(name, encoding='mac-roman')
def decompile(self, refData, reader):
sstruct.unpack(ResourceRefItem, refData, self)
# interpret 3-byte dataOffset as (padded) ULONG to unpack it with struct
(self.dataOffset,) = struct.unpack(">L", bytesjoin([b"\0", self.dataOffset]))
absDataOffset = reader.dataOffset + self.dataOffset
(dataLength,) = struct.unpack(">L", reader._read(4, absDataOffset))
self.data = reader._read(dataLength)
if self.nameOffset == -1:
return
absNameOffset = reader.absNameListOffset + self.nameOffset
(nameLength,) = struct.unpack("B", reader._read(1, absNameOffset))
(name,) = struct.unpack(">%ss" % nameLength, reader._read(nameLength))
self.name = tostr(name, encoding="mac-roman")
ResourceForkHeader = """

View File

@ -353,7 +353,9 @@ def _real_element(value: float, ctx: SimpleNamespace) -> etree.Element:
return el
def _dict_element(d: Mapping[str, PlistEncodable], ctx: SimpleNamespace) -> etree.Element:
def _dict_element(
d: Mapping[str, PlistEncodable], ctx: SimpleNamespace
) -> etree.Element:
el = etree.Element("dict")
items = d.items()
if ctx.sort_keys:
@ -371,7 +373,9 @@ def _dict_element(d: Mapping[str, PlistEncodable], ctx: SimpleNamespace) -> etre
return el
def _array_element(array: Sequence[PlistEncodable], ctx: SimpleNamespace) -> etree.Element:
def _array_element(
array: Sequence[PlistEncodable], ctx: SimpleNamespace
) -> etree.Element:
el = etree.Element("array")
if len(array) == 0:
return el

File diff suppressed because it is too large Load Diff

View File

@ -1,20 +1,20 @@
from fontTools.misc.textTools import bytechr, byteord, bytesjoin, tobytes, tostr
from fontTools.misc import eexec
from .psOperators import (
PSOperators,
ps_StandardEncoding,
ps_array,
ps_boolean,
ps_dict,
ps_integer,
ps_literal,
ps_mark,
ps_name,
ps_operator,
ps_procedure,
ps_procmark,
ps_real,
ps_string,
PSOperators,
ps_StandardEncoding,
ps_array,
ps_boolean,
ps_dict,
ps_integer,
ps_literal,
ps_mark,
ps_name,
ps_operator,
ps_procedure,
ps_procmark,
ps_real,
ps_string,
)
import re
from collections.abc import Callable
@ -24,7 +24,7 @@ import logging
log = logging.getLogger(__name__)
ps_special = b'()<>[]{}%' # / is one too, but we take care of that one differently
ps_special = b"()<>[]{}%" # / is one too, but we take care of that one differently
skipwhiteRE = re.compile(bytesjoin([b"[", whitespace, b"]*"]))
endofthingPat = bytesjoin([b"[^][(){}<>/%", whitespace, b"]*"])
@ -32,7 +32,7 @@ endofthingRE = re.compile(endofthingPat)
commentRE = re.compile(b"%[^\n\r]*")
# XXX This not entirely correct as it doesn't allow *nested* embedded parens:
stringPat = br"""
stringPat = rb"""
\(
(
(
@ -51,335 +51,349 @@ stringRE = re.compile(stringPat)
hexstringRE = re.compile(bytesjoin([b"<[", whitespace, b"0-9A-Fa-f]*>"]))
class PSTokenError(Exception): pass
class PSError(Exception): pass
class PSTokenError(Exception):
pass
class PSError(Exception):
pass
class PSTokenizer(object):
def __init__(self, buf=b"", encoding="ascii"):
# Force self.buf to be a byte string
buf = tobytes(buf)
self.buf = buf
self.len = len(buf)
self.pos = 0
self.closed = False
self.encoding = encoding
def __init__(self, buf=b'', encoding="ascii"):
# Force self.buf to be a byte string
buf = tobytes(buf)
self.buf = buf
self.len = len(buf)
self.pos = 0
self.closed = False
self.encoding = encoding
def read(self, n=-1):
"""Read at most 'n' bytes from the buffer, or less if the read
hits EOF before obtaining 'n' bytes.
If 'n' is negative or omitted, read all data until EOF is reached.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
if n is None or n < 0:
newpos = self.len
else:
newpos = min(self.pos + n, self.len)
r = self.buf[self.pos : newpos]
self.pos = newpos
return r
def read(self, n=-1):
"""Read at most 'n' bytes from the buffer, or less if the read
hits EOF before obtaining 'n' bytes.
If 'n' is negative or omitted, read all data until EOF is reached.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
if n is None or n < 0:
newpos = self.len
else:
newpos = min(self.pos+n, self.len)
r = self.buf[self.pos:newpos]
self.pos = newpos
return r
def close(self):
if not self.closed:
self.closed = True
del self.buf, self.pos
def close(self):
if not self.closed:
self.closed = True
del self.buf, self.pos
def getnexttoken(
self,
# localize some stuff, for performance
len=len,
ps_special=ps_special,
stringmatch=stringRE.match,
hexstringmatch=hexstringRE.match,
commentmatch=commentRE.match,
endmatch=endofthingRE.match,
):
def getnexttoken(self,
# localize some stuff, for performance
len=len,
ps_special=ps_special,
stringmatch=stringRE.match,
hexstringmatch=hexstringRE.match,
commentmatch=commentRE.match,
endmatch=endofthingRE.match):
self.skipwhite()
if self.pos >= self.len:
return None, None
pos = self.pos
buf = self.buf
char = bytechr(byteord(buf[pos]))
if char in ps_special:
if char in b"{}[]":
tokentype = "do_special"
token = char
elif char == b"%":
tokentype = "do_comment"
_, nextpos = commentmatch(buf, pos).span()
token = buf[pos:nextpos]
elif char == b"(":
tokentype = "do_string"
m = stringmatch(buf, pos)
if m is None:
raise PSTokenError("bad string at character %d" % pos)
_, nextpos = m.span()
token = buf[pos:nextpos]
elif char == b"<":
tokentype = "do_hexstring"
m = hexstringmatch(buf, pos)
if m is None:
raise PSTokenError("bad hexstring at character %d" % pos)
_, nextpos = m.span()
token = buf[pos:nextpos]
else:
raise PSTokenError("bad token at character %d" % pos)
else:
if char == b"/":
tokentype = "do_literal"
m = endmatch(buf, pos + 1)
else:
tokentype = ""
m = endmatch(buf, pos)
if m is None:
raise PSTokenError("bad token at character %d" % pos)
_, nextpos = m.span()
token = buf[pos:nextpos]
self.pos = pos + len(token)
token = tostr(token, encoding=self.encoding)
return tokentype, token
self.skipwhite()
if self.pos >= self.len:
return None, None
pos = self.pos
buf = self.buf
char = bytechr(byteord(buf[pos]))
if char in ps_special:
if char in b'{}[]':
tokentype = 'do_special'
token = char
elif char == b'%':
tokentype = 'do_comment'
_, nextpos = commentmatch(buf, pos).span()
token = buf[pos:nextpos]
elif char == b'(':
tokentype = 'do_string'
m = stringmatch(buf, pos)
if m is None:
raise PSTokenError('bad string at character %d' % pos)
_, nextpos = m.span()
token = buf[pos:nextpos]
elif char == b'<':
tokentype = 'do_hexstring'
m = hexstringmatch(buf, pos)
if m is None:
raise PSTokenError('bad hexstring at character %d' % pos)
_, nextpos = m.span()
token = buf[pos:nextpos]
else:
raise PSTokenError('bad token at character %d' % pos)
else:
if char == b'/':
tokentype = 'do_literal'
m = endmatch(buf, pos+1)
else:
tokentype = ''
m = endmatch(buf, pos)
if m is None:
raise PSTokenError('bad token at character %d' % pos)
_, nextpos = m.span()
token = buf[pos:nextpos]
self.pos = pos + len(token)
token = tostr(token, encoding=self.encoding)
return tokentype, token
def skipwhite(self, whitematch=skipwhiteRE.match):
_, nextpos = whitematch(self.buf, self.pos).span()
self.pos = nextpos
def skipwhite(self, whitematch=skipwhiteRE.match):
_, nextpos = whitematch(self.buf, self.pos).span()
self.pos = nextpos
def starteexec(self):
self.pos = self.pos + 1
self.dirtybuf = self.buf[self.pos :]
self.buf, R = eexec.decrypt(self.dirtybuf, 55665)
self.len = len(self.buf)
self.pos = 4
def starteexec(self):
self.pos = self.pos + 1
self.dirtybuf = self.buf[self.pos:]
self.buf, R = eexec.decrypt(self.dirtybuf, 55665)
self.len = len(self.buf)
self.pos = 4
def stopeexec(self):
if not hasattr(self, 'dirtybuf'):
return
self.buf = self.dirtybuf
del self.dirtybuf
def stopeexec(self):
if not hasattr(self, "dirtybuf"):
return
self.buf = self.dirtybuf
del self.dirtybuf
class PSInterpreter(PSOperators):
def __init__(self, encoding="ascii"):
systemdict = {}
userdict = {}
self.encoding = encoding
self.dictstack = [systemdict, userdict]
self.stack = []
self.proclevel = 0
self.procmark = ps_procmark()
self.fillsystemdict()
def __init__(self, encoding="ascii"):
systemdict = {}
userdict = {}
self.encoding = encoding
self.dictstack = [systemdict, userdict]
self.stack = []
self.proclevel = 0
self.procmark = ps_procmark()
self.fillsystemdict()
def fillsystemdict(self):
systemdict = self.dictstack[0]
systemdict["["] = systemdict["mark"] = self.mark = ps_mark()
systemdict["]"] = ps_operator("]", self.do_makearray)
systemdict["true"] = ps_boolean(1)
systemdict["false"] = ps_boolean(0)
systemdict["StandardEncoding"] = ps_array(ps_StandardEncoding)
systemdict["FontDirectory"] = ps_dict({})
self.suckoperators(systemdict, self.__class__)
def fillsystemdict(self):
systemdict = self.dictstack[0]
systemdict['['] = systemdict['mark'] = self.mark = ps_mark()
systemdict[']'] = ps_operator(']', self.do_makearray)
systemdict['true'] = ps_boolean(1)
systemdict['false'] = ps_boolean(0)
systemdict['StandardEncoding'] = ps_array(ps_StandardEncoding)
systemdict['FontDirectory'] = ps_dict({})
self.suckoperators(systemdict, self.__class__)
def suckoperators(self, systemdict, klass):
for name in dir(klass):
attr = getattr(self, name)
if isinstance(attr, Callable) and name[:3] == "ps_":
name = name[3:]
systemdict[name] = ps_operator(name, attr)
for baseclass in klass.__bases__:
self.suckoperators(systemdict, baseclass)
def suckoperators(self, systemdict, klass):
for name in dir(klass):
attr = getattr(self, name)
if isinstance(attr, Callable) and name[:3] == 'ps_':
name = name[3:]
systemdict[name] = ps_operator(name, attr)
for baseclass in klass.__bases__:
self.suckoperators(systemdict, baseclass)
def interpret(self, data, getattr=getattr):
tokenizer = self.tokenizer = PSTokenizer(data, self.encoding)
getnexttoken = tokenizer.getnexttoken
do_token = self.do_token
handle_object = self.handle_object
try:
while 1:
tokentype, token = getnexttoken()
if not token:
break
if tokentype:
handler = getattr(self, tokentype)
object = handler(token)
else:
object = do_token(token)
if object is not None:
handle_object(object)
tokenizer.close()
self.tokenizer = None
except:
if self.tokenizer is not None:
log.debug(
"ps error:\n"
"- - - - - - -\n"
"%s\n"
">>>\n"
"%s\n"
"- - - - - - -",
self.tokenizer.buf[self.tokenizer.pos - 50 : self.tokenizer.pos],
self.tokenizer.buf[self.tokenizer.pos : self.tokenizer.pos + 50],
)
raise
def interpret(self, data, getattr=getattr):
tokenizer = self.tokenizer = PSTokenizer(data, self.encoding)
getnexttoken = tokenizer.getnexttoken
do_token = self.do_token
handle_object = self.handle_object
try:
while 1:
tokentype, token = getnexttoken()
if not token:
break
if tokentype:
handler = getattr(self, tokentype)
object = handler(token)
else:
object = do_token(token)
if object is not None:
handle_object(object)
tokenizer.close()
self.tokenizer = None
except:
if self.tokenizer is not None:
log.debug(
'ps error:\n'
'- - - - - - -\n'
'%s\n'
'>>>\n'
'%s\n'
'- - - - - - -',
self.tokenizer.buf[self.tokenizer.pos-50:self.tokenizer.pos],
self.tokenizer.buf[self.tokenizer.pos:self.tokenizer.pos+50])
raise
def handle_object(self, object):
if not (self.proclevel or object.literal or object.type == "proceduretype"):
if object.type != "operatortype":
object = self.resolve_name(object.value)
if object.literal:
self.push(object)
else:
if object.type == "proceduretype":
self.call_procedure(object)
else:
object.function()
else:
self.push(object)
def handle_object(self, object):
if not (self.proclevel or object.literal or object.type == 'proceduretype'):
if object.type != 'operatortype':
object = self.resolve_name(object.value)
if object.literal:
self.push(object)
else:
if object.type == 'proceduretype':
self.call_procedure(object)
else:
object.function()
else:
self.push(object)
def call_procedure(self, proc):
handle_object = self.handle_object
for item in proc.value:
handle_object(item)
def call_procedure(self, proc):
handle_object = self.handle_object
for item in proc.value:
handle_object(item)
def resolve_name(self, name):
dictstack = self.dictstack
for i in range(len(dictstack) - 1, -1, -1):
if name in dictstack[i]:
return dictstack[i][name]
raise PSError("name error: " + str(name))
def resolve_name(self, name):
dictstack = self.dictstack
for i in range(len(dictstack)-1, -1, -1):
if name in dictstack[i]:
return dictstack[i][name]
raise PSError('name error: ' + str(name))
def do_token(
self,
token,
int=int,
float=float,
ps_name=ps_name,
ps_integer=ps_integer,
ps_real=ps_real,
):
try:
num = int(token)
except (ValueError, OverflowError):
try:
num = float(token)
except (ValueError, OverflowError):
if "#" in token:
hashpos = token.find("#")
try:
base = int(token[:hashpos])
num = int(token[hashpos + 1 :], base)
except (ValueError, OverflowError):
return ps_name(token)
else:
return ps_integer(num)
else:
return ps_name(token)
else:
return ps_real(num)
else:
return ps_integer(num)
def do_token(self, token,
int=int,
float=float,
ps_name=ps_name,
ps_integer=ps_integer,
ps_real=ps_real):
try:
num = int(token)
except (ValueError, OverflowError):
try:
num = float(token)
except (ValueError, OverflowError):
if '#' in token:
hashpos = token.find('#')
try:
base = int(token[:hashpos])
num = int(token[hashpos+1:], base)
except (ValueError, OverflowError):
return ps_name(token)
else:
return ps_integer(num)
else:
return ps_name(token)
else:
return ps_real(num)
else:
return ps_integer(num)
def do_comment(self, token):
pass
def do_comment(self, token):
pass
def do_literal(self, token):
return ps_literal(token[1:])
def do_literal(self, token):
return ps_literal(token[1:])
def do_string(self, token):
return ps_string(token[1:-1])
def do_string(self, token):
return ps_string(token[1:-1])
def do_hexstring(self, token):
hexStr = "".join(token[1:-1].split())
if len(hexStr) % 2:
hexStr = hexStr + "0"
cleanstr = []
for i in range(0, len(hexStr), 2):
cleanstr.append(chr(int(hexStr[i : i + 2], 16)))
cleanstr = "".join(cleanstr)
return ps_string(cleanstr)
def do_hexstring(self, token):
hexStr = "".join(token[1:-1].split())
if len(hexStr) % 2:
hexStr = hexStr + '0'
cleanstr = []
for i in range(0, len(hexStr), 2):
cleanstr.append(chr(int(hexStr[i:i+2], 16)))
cleanstr = "".join(cleanstr)
return ps_string(cleanstr)
def do_special(self, token):
if token == "{":
self.proclevel = self.proclevel + 1
return self.procmark
elif token == "}":
proc = []
while 1:
topobject = self.pop()
if topobject == self.procmark:
break
proc.append(topobject)
self.proclevel = self.proclevel - 1
proc.reverse()
return ps_procedure(proc)
elif token == "[":
return self.mark
elif token == "]":
return ps_name("]")
else:
raise PSTokenError("huh?")
def do_special(self, token):
if token == '{':
self.proclevel = self.proclevel + 1
return self.procmark
elif token == '}':
proc = []
while 1:
topobject = self.pop()
if topobject == self.procmark:
break
proc.append(topobject)
self.proclevel = self.proclevel - 1
proc.reverse()
return ps_procedure(proc)
elif token == '[':
return self.mark
elif token == ']':
return ps_name(']')
else:
raise PSTokenError('huh?')
def push(self, object):
self.stack.append(object)
def push(self, object):
self.stack.append(object)
def pop(self, *types):
stack = self.stack
if not stack:
raise PSError("stack underflow")
object = stack[-1]
if types:
if object.type not in types:
raise PSError(
"typecheck, expected %s, found %s" % (repr(types), object.type)
)
del stack[-1]
return object
def pop(self, *types):
stack = self.stack
if not stack:
raise PSError('stack underflow')
object = stack[-1]
if types:
if object.type not in types:
raise PSError('typecheck, expected %s, found %s' % (repr(types), object.type))
del stack[-1]
return object
def do_makearray(self):
array = []
while 1:
topobject = self.pop()
if topobject == self.mark:
break
array.append(topobject)
array.reverse()
self.push(ps_array(array))
def do_makearray(self):
array = []
while 1:
topobject = self.pop()
if topobject == self.mark:
break
array.append(topobject)
array.reverse()
self.push(ps_array(array))
def close(self):
"""Remove circular references."""
del self.stack
del self.dictstack
def close(self):
"""Remove circular references."""
del self.stack
del self.dictstack
def unpack_item(item):
tp = type(item.value)
if tp == dict:
newitem = {}
for key, value in item.value.items():
newitem[key] = unpack_item(value)
elif tp == list:
newitem = [None] * len(item.value)
for i in range(len(item.value)):
newitem[i] = unpack_item(item.value[i])
if item.type == 'proceduretype':
newitem = tuple(newitem)
else:
newitem = item.value
return newitem
tp = type(item.value)
if tp == dict:
newitem = {}
for key, value in item.value.items():
newitem[key] = unpack_item(value)
elif tp == list:
newitem = [None] * len(item.value)
for i in range(len(item.value)):
newitem[i] = unpack_item(item.value[i])
if item.type == "proceduretype":
newitem = tuple(newitem)
else:
newitem = item.value
return newitem
def suckfont(data, encoding="ascii"):
m = re.search(br"/FontName\s+/([^ \t\n\r]+)\s+def", data)
if m:
fontName = m.group(1)
fontName = fontName.decode()
else:
fontName = None
interpreter = PSInterpreter(encoding=encoding)
interpreter.interpret(b"/Helvetica 4 dict dup /Encoding StandardEncoding put definefont pop")
interpreter.interpret(data)
fontdir = interpreter.dictstack[0]['FontDirectory'].value
if fontName in fontdir:
rawfont = fontdir[fontName]
else:
# fall back, in case fontName wasn't found
fontNames = list(fontdir.keys())
if len(fontNames) > 1:
fontNames.remove("Helvetica")
fontNames.sort()
rawfont = fontdir[fontNames[0]]
interpreter.close()
return unpack_item(rawfont)
m = re.search(rb"/FontName\s+/([^ \t\n\r]+)\s+def", data)
if m:
fontName = m.group(1)
fontName = fontName.decode()
else:
fontName = None
interpreter = PSInterpreter(encoding=encoding)
interpreter.interpret(
b"/Helvetica 4 dict dup /Encoding StandardEncoding put definefont pop"
)
interpreter.interpret(data)
fontdir = interpreter.dictstack[0]["FontDirectory"].value
if fontName in fontdir:
rawfont = fontdir[fontName]
else:
# fall back, in case fontName wasn't found
fontNames = list(fontdir.keys())
if len(fontNames) > 1:
fontNames.remove("Helvetica")
fontNames.sort()
rawfont = fontdir[fontNames[0]]
interpreter.close()
return unpack_item(rawfont)

File diff suppressed because it is too large Load Diff

View File

@ -9,41 +9,45 @@ import logging
log = logging.getLogger(__name__)
__all__ = [
"noRound",
"otRound",
"maybeRound",
"roundFunc",
"noRound",
"otRound",
"maybeRound",
"roundFunc",
]
def noRound(value):
return value
return value
def otRound(value):
"""Round float value to nearest integer towards ``+Infinity``.
"""Round float value to nearest integer towards ``+Infinity``.
The OpenType spec (in the section on `"normalization" of OpenType Font Variations <https://docs.microsoft.com/en-us/typography/opentype/spec/otvaroverview#coordinate-scales-and-normalization>`_)
defines the required method for converting floating point values to
fixed-point. In particular it specifies the following rounding strategy:
The OpenType spec (in the section on `"normalization" of OpenType Font Variations <https://docs.microsoft.com/en-us/typography/opentype/spec/otvaroverview#coordinate-scales-and-normalization>`_)
defines the required method for converting floating point values to
fixed-point. In particular it specifies the following rounding strategy:
for fractional values of 0.5 and higher, take the next higher integer;
for other fractional values, truncate.
for fractional values of 0.5 and higher, take the next higher integer;
for other fractional values, truncate.
This function rounds the floating-point value according to this strategy
in preparation for conversion to fixed-point.
This function rounds the floating-point value according to this strategy
in preparation for conversion to fixed-point.
Args:
value (float): The input floating-point value.
Args:
value (float): The input floating-point value.
Returns
float: The rounded value.
"""
# See this thread for how we ended up with this implementation:
# https://github.com/fonttools/fonttools/issues/1248#issuecomment-383198166
return int(math.floor(value + 0.5))
Returns
float: The rounded value.
"""
# See this thread for how we ended up with this implementation:
# https://github.com/fonttools/fonttools/issues/1248#issuecomment-383198166
return int(math.floor(value + 0.5))
def maybeRound(v, tolerance, round=otRound):
rounded = round(v)
return rounded if abs(rounded - v) <= tolerance else v
rounded = round(v)
return rounded if abs(rounded - v) <= tolerance else v
def roundFunc(tolerance, round=otRound):
if tolerance < 0:
@ -52,7 +56,7 @@ def roundFunc(tolerance, round=otRound):
if tolerance == 0:
return noRound
if tolerance >= .5:
if tolerance >= 0.5:
return round
return functools.partial(maybeRound, tolerance=tolerance, round=round)
@ -85,7 +89,7 @@ def nearestMultipleShortestRepr(value: float, factor: float) -> str:
return "0.0"
value = otRound(value / factor) * factor
eps = .5 * factor
eps = 0.5 * factor
lo = value - eps
hi = value + eps
# If the range of valid choices spans an integer, return the integer.
@ -99,7 +103,7 @@ def nearestMultipleShortestRepr(value: float, factor: float) -> str:
for i in range(len(lo)):
if lo[i] != hi[i]:
break
period = lo.find('.')
period = lo.find(".")
assert period < i
fmt = "%%.%df" % (i - period)
return fmt % value

View File

@ -56,68 +56,72 @@ __copyright__ = "Copyright 1998, Just van Rossum <just@letterror.com>"
class Error(Exception):
pass
pass
def pack(fmt, obj):
formatstring, names, fixes = getformat(fmt, keep_pad_byte=True)
elements = []
if not isinstance(obj, dict):
obj = obj.__dict__
for name in names:
value = obj[name]
if name in fixes:
# fixed point conversion
value = fl2fi(value, fixes[name])
elif isinstance(value, str):
value = tobytes(value)
elements.append(value)
data = struct.pack(*(formatstring,) + tuple(elements))
return data
formatstring, names, fixes = getformat(fmt, keep_pad_byte=True)
elements = []
if not isinstance(obj, dict):
obj = obj.__dict__
for name in names:
value = obj[name]
if name in fixes:
# fixed point conversion
value = fl2fi(value, fixes[name])
elif isinstance(value, str):
value = tobytes(value)
elements.append(value)
data = struct.pack(*(formatstring,) + tuple(elements))
return data
def unpack(fmt, data, obj=None):
if obj is None:
obj = {}
data = tobytes(data)
formatstring, names, fixes = getformat(fmt)
if isinstance(obj, dict):
d = obj
else:
d = obj.__dict__
elements = struct.unpack(formatstring, data)
for i in range(len(names)):
name = names[i]
value = elements[i]
if name in fixes:
# fixed point conversion
value = fi2fl(value, fixes[name])
elif isinstance(value, bytes):
try:
value = tostr(value)
except UnicodeDecodeError:
pass
d[name] = value
return obj
if obj is None:
obj = {}
data = tobytes(data)
formatstring, names, fixes = getformat(fmt)
if isinstance(obj, dict):
d = obj
else:
d = obj.__dict__
elements = struct.unpack(formatstring, data)
for i in range(len(names)):
name = names[i]
value = elements[i]
if name in fixes:
# fixed point conversion
value = fi2fl(value, fixes[name])
elif isinstance(value, bytes):
try:
value = tostr(value)
except UnicodeDecodeError:
pass
d[name] = value
return obj
def unpack2(fmt, data, obj=None):
length = calcsize(fmt)
return unpack(fmt, data[:length], obj), data[length:]
length = calcsize(fmt)
return unpack(fmt, data[:length], obj), data[length:]
def calcsize(fmt):
formatstring, names, fixes = getformat(fmt)
return struct.calcsize(formatstring)
formatstring, names, fixes = getformat(fmt)
return struct.calcsize(formatstring)
# matches "name:formatchar" (whitespace is allowed)
_elementRE = re.compile(
r"\s*" # whitespace
r"([A-Za-z_][A-Za-z_0-9]*)" # name (python identifier)
r"\s*:\s*" # whitespace : whitespace
r"([xcbB?hHiIlLqQfd]|" # formatchar...
r"[0-9]+[ps]|" # ...formatchar...
r"([0-9]+)\.([0-9]+)(F))" # ...formatchar
r"\s*" # whitespace
r"(#.*)?$" # [comment] + end of string
)
r"\s*" # whitespace
r"([A-Za-z_][A-Za-z_0-9]*)" # name (python identifier)
r"\s*:\s*" # whitespace : whitespace
r"([xcbB?hHiIlLqQfd]|" # formatchar...
r"[0-9]+[ps]|" # ...formatchar...
r"([0-9]+)\.([0-9]+)(F))" # ...formatchar
r"\s*" # whitespace
r"(#.*)?$" # [comment] + end of string
)
# matches the special struct fmt chars and 'x' (pad byte)
_extraRE = re.compile(r"\s*([x@=<>!])\s*(#.*)?$")
@ -125,54 +129,53 @@ _extraRE = re.compile(r"\s*([x@=<>!])\s*(#.*)?$")
# matches an "empty" string, possibly containing whitespace and/or a comment
_emptyRE = re.compile(r"\s*(#.*)?$")
_fixedpointmappings = {
8: "b",
16: "h",
32: "l"}
_fixedpointmappings = {8: "b", 16: "h", 32: "l"}
_formatcache = {}
def getformat(fmt, keep_pad_byte=False):
fmt = tostr(fmt, encoding="ascii")
try:
formatstring, names, fixes = _formatcache[fmt]
except KeyError:
lines = re.split("[\n;]", fmt)
formatstring = ""
names = []
fixes = {}
for line in lines:
if _emptyRE.match(line):
continue
m = _extraRE.match(line)
if m:
formatchar = m.group(1)
if formatchar != 'x' and formatstring:
raise Error("a special fmt char must be first")
else:
m = _elementRE.match(line)
if not m:
raise Error("syntax error in fmt: '%s'" % line)
name = m.group(1)
formatchar = m.group(2)
if keep_pad_byte or formatchar != "x":
names.append(name)
if m.group(3):
# fixed point
before = int(m.group(3))
after = int(m.group(4))
bits = before + after
if bits not in [8, 16, 32]:
raise Error("fixed point must be 8, 16 or 32 bits long")
formatchar = _fixedpointmappings[bits]
assert m.group(5) == "F"
fixes[name] = after
formatstring = formatstring + formatchar
_formatcache[fmt] = formatstring, names, fixes
return formatstring, names, fixes
fmt = tostr(fmt, encoding="ascii")
try:
formatstring, names, fixes = _formatcache[fmt]
except KeyError:
lines = re.split("[\n;]", fmt)
formatstring = ""
names = []
fixes = {}
for line in lines:
if _emptyRE.match(line):
continue
m = _extraRE.match(line)
if m:
formatchar = m.group(1)
if formatchar != "x" and formatstring:
raise Error("a special fmt char must be first")
else:
m = _elementRE.match(line)
if not m:
raise Error("syntax error in fmt: '%s'" % line)
name = m.group(1)
formatchar = m.group(2)
if keep_pad_byte or formatchar != "x":
names.append(name)
if m.group(3):
# fixed point
before = int(m.group(3))
after = int(m.group(4))
bits = before + after
if bits not in [8, 16, 32]:
raise Error("fixed point must be 8, 16 or 32 bits long")
formatchar = _fixedpointmappings[bits]
assert m.group(5) == "F"
fixes[name] = after
formatstring = formatstring + formatchar
_formatcache[fmt] = formatstring, names, fixes
return formatstring, names, fixes
def _test():
fmt = """
fmt = """
# comments are allowed
> # big endian (see documentation for struct)
# empty lines are allowed:
@ -188,29 +191,30 @@ def _test():
apad: x
"""
print('size:', calcsize(fmt))
print("size:", calcsize(fmt))
class foo(object):
pass
class foo(object):
pass
i = foo()
i = foo()
i.ashort = 0x7fff
i.along = 0x7fffffff
i.abyte = 0x7f
i.achar = "a"
i.astr = "12345"
i.afloat = 0.5
i.adouble = 0.5
i.afixed = 1.5
i.abool = True
i.ashort = 0x7FFF
i.along = 0x7FFFFFFF
i.abyte = 0x7F
i.achar = "a"
i.astr = "12345"
i.afloat = 0.5
i.adouble = 0.5
i.afixed = 1.5
i.abool = True
data = pack(fmt, i)
print("data:", repr(data))
print(unpack(fmt, data))
i2 = foo()
unpack(fmt, data, i2)
print(vars(i2))
data = pack(fmt, i)
print('data:', repr(data))
print(unpack(fmt, data))
i2 = foo()
unpack(fmt, data, i2)
print(vars(i2))
if __name__ == "__main__":
_test()
_test()

View File

@ -4,98 +4,104 @@ from itertools import count
import sympy as sp
import sys
n = 3 # Max Bezier degree; 3 for cubic, 2 for quadratic
n = 3 # Max Bezier degree; 3 for cubic, 2 for quadratic
t, x, y = sp.symbols('t x y', real=True)
c = sp.symbols('c', real=False) # Complex representation instead of x/y
t, x, y = sp.symbols("t x y", real=True)
c = sp.symbols("c", real=False) # Complex representation instead of x/y
X = tuple(sp.symbols('x:%d'%(n+1), real=True))
Y = tuple(sp.symbols('y:%d'%(n+1), real=True))
P = tuple(zip(*(sp.symbols('p:%d[%s]'%(n+1,w), real=True) for w in '01')))
C = tuple(sp.symbols('c:%d'%(n+1), real=False))
X = tuple(sp.symbols("x:%d" % (n + 1), real=True))
Y = tuple(sp.symbols("y:%d" % (n + 1), real=True))
P = tuple(zip(*(sp.symbols("p:%d[%s]" % (n + 1, w), real=True) for w in "01")))
C = tuple(sp.symbols("c:%d" % (n + 1), real=False))
# Cubic Bernstein basis functions
BinomialCoefficient = [(1, 0)]
for i in range(1, n+1):
last = BinomialCoefficient[-1]
this = tuple(last[j-1]+last[j] for j in range(len(last)))+(0,)
BinomialCoefficient.append(this)
for i in range(1, n + 1):
last = BinomialCoefficient[-1]
this = tuple(last[j - 1] + last[j] for j in range(len(last))) + (0,)
BinomialCoefficient.append(this)
BinomialCoefficient = tuple(tuple(item[:-1]) for item in BinomialCoefficient)
del last, this
BernsteinPolynomial = tuple(
tuple(c * t**i * (1-t)**(n-i) for i,c in enumerate(coeffs))
for n,coeffs in enumerate(BinomialCoefficient))
tuple(c * t**i * (1 - t) ** (n - i) for i, c in enumerate(coeffs))
for n, coeffs in enumerate(BinomialCoefficient)
)
BezierCurve = tuple(
tuple(sum(P[i][j]*bernstein for i,bernstein in enumerate(bernsteins))
for j in range(2))
for n,bernsteins in enumerate(BernsteinPolynomial))
tuple(
sum(P[i][j] * bernstein for i, bernstein in enumerate(bernsteins))
for j in range(2)
)
for n, bernsteins in enumerate(BernsteinPolynomial)
)
BezierCurveC = tuple(
sum(C[i]*bernstein for i,bernstein in enumerate(bernsteins))
for n,bernsteins in enumerate(BernsteinPolynomial))
sum(C[i] * bernstein for i, bernstein in enumerate(bernsteins))
for n, bernsteins in enumerate(BernsteinPolynomial)
)
def green(f, curveXY):
f = -sp.integrate(sp.sympify(f), y)
f = f.subs({x:curveXY[0], y:curveXY[1]})
f = sp.integrate(f * sp.diff(curveXY[0], t), (t, 0, 1))
return f
f = -sp.integrate(sp.sympify(f), y)
f = f.subs({x: curveXY[0], y: curveXY[1]})
f = sp.integrate(f * sp.diff(curveXY[0], t), (t, 0, 1))
return f
class _BezierFuncsLazy(dict):
def __init__(self, symfunc):
self._symfunc = symfunc
self._bezfuncs = {}
def __init__(self, symfunc):
self._symfunc = symfunc
self._bezfuncs = {}
def __missing__(self, i):
args = ["p%d" % d for d in range(i + 1)]
f = green(self._symfunc, BezierCurve[i])
f = sp.gcd_terms(f.collect(sum(P, ()))) # Optimize
return sp.lambdify(args, f)
def __missing__(self, i):
args = ['p%d'%d for d in range(i+1)]
f = green(self._symfunc, BezierCurve[i])
f = sp.gcd_terms(f.collect(sum(P,()))) # Optimize
return sp.lambdify(args, f)
class GreenPen(BasePen):
_BezierFuncs = {}
_BezierFuncs = {}
@classmethod
def _getGreenBezierFuncs(celf, func):
funcstr = str(func)
if not funcstr in celf._BezierFuncs:
celf._BezierFuncs[funcstr] = _BezierFuncsLazy(func)
return celf._BezierFuncs[funcstr]
@classmethod
def _getGreenBezierFuncs(celf, func):
funcstr = str(func)
if not funcstr in celf._BezierFuncs:
celf._BezierFuncs[funcstr] = _BezierFuncsLazy(func)
return celf._BezierFuncs[funcstr]
def __init__(self, func, glyphset=None):
BasePen.__init__(self, glyphset)
self._funcs = self._getGreenBezierFuncs(func)
self.value = 0
def __init__(self, func, glyphset=None):
BasePen.__init__(self, glyphset)
self._funcs = self._getGreenBezierFuncs(func)
self.value = 0
def _moveTo(self, p0):
self.__startPoint = p0
def _moveTo(self, p0):
self.__startPoint = p0
def _closePath(self):
p0 = self._getCurrentPoint()
if p0 != self.__startPoint:
self._lineTo(self.__startPoint)
def _closePath(self):
p0 = self._getCurrentPoint()
if p0 != self.__startPoint:
self._lineTo(self.__startPoint)
def _endPath(self):
p0 = self._getCurrentPoint()
if p0 != self.__startPoint:
# Green theorem is not defined on open contours.
raise NotImplementedError
def _endPath(self):
p0 = self._getCurrentPoint()
if p0 != self.__startPoint:
# Green theorem is not defined on open contours.
raise NotImplementedError
def _lineTo(self, p1):
p0 = self._getCurrentPoint()
self.value += self._funcs[1](p0, p1)
def _lineTo(self, p1):
p0 = self._getCurrentPoint()
self.value += self._funcs[1](p0, p1)
def _qCurveToOne(self, p1, p2):
p0 = self._getCurrentPoint()
self.value += self._funcs[2](p0, p1, p2)
def _qCurveToOne(self, p1, p2):
p0 = self._getCurrentPoint()
self.value += self._funcs[2](p0, p1, p2)
def _curveToOne(self, p1, p2, p3):
p0 = self._getCurrentPoint()
self.value += self._funcs[3](p0, p1, p2, p3)
def _curveToOne(self, p1, p2, p3):
p0 = self._getCurrentPoint()
self.value += self._funcs[3](p0, p1, p2, p3)
# Sample pens.
# Do not use this in real code.
@ -103,18 +109,18 @@ class GreenPen(BasePen):
AreaPen = partial(GreenPen, func=1)
MomentXPen = partial(GreenPen, func=x)
MomentYPen = partial(GreenPen, func=y)
MomentXXPen = partial(GreenPen, func=x*x)
MomentYYPen = partial(GreenPen, func=y*y)
MomentXYPen = partial(GreenPen, func=x*y)
MomentXXPen = partial(GreenPen, func=x * x)
MomentYYPen = partial(GreenPen, func=y * y)
MomentXYPen = partial(GreenPen, func=x * y)
def printGreenPen(penName, funcs, file=sys.stdout, docstring=None):
if docstring is not None:
print('"""%s"""' % docstring)
if docstring is not None:
print('"""%s"""' % docstring)
print(
'''from fontTools.pens.basePen import BasePen, OpenContourError
print(
"""from fontTools.pens.basePen import BasePen, OpenContourError
try:
import cython
except ImportError:
@ -135,10 +141,14 @@ class %s(BasePen):
def __init__(self, glyphset=None):
BasePen.__init__(self, glyphset)
'''% (penName, penName), file=file)
for name,f in funcs:
print(' self.%s = 0' % name, file=file)
print('''
"""
% (penName, penName),
file=file,
)
for name, f in funcs:
print(" self.%s = 0" % name, file=file)
print(
"""
def _moveTo(self, p0):
self.__startPoint = p0
@ -154,32 +164,40 @@ class %s(BasePen):
raise OpenContourError(
"Green theorem is not defined on open contours."
)
''', end='', file=file)
""",
end="",
file=file,
)
for n in (1, 2, 3):
for n in (1, 2, 3):
subs = {P[i][j]: [X, Y][j][i] for i in range(n + 1) for j in range(2)}
greens = [green(f, BezierCurve[n]) for name, f in funcs]
greens = [sp.gcd_terms(f.collect(sum(P, ()))) for f in greens] # Optimize
greens = [f.subs(subs) for f in greens] # Convert to p to x/y
defs, exprs = sp.cse(
greens,
optimizations="basic",
symbols=(sp.Symbol("r%d" % i) for i in count()),
)
subs = {P[i][j]: [X, Y][j][i] for i in range(n+1) for j in range(2)}
greens = [green(f, BezierCurve[n]) for name,f in funcs]
greens = [sp.gcd_terms(f.collect(sum(P,()))) for f in greens] # Optimize
greens = [f.subs(subs) for f in greens] # Convert to p to x/y
defs, exprs = sp.cse(greens,
optimizations='basic',
symbols=(sp.Symbol('r%d'%i) for i in count()))
print()
for name,value in defs:
print(' @cython.locals(%s=cython.double)' % name, file=file)
if n == 1:
print('''\
print()
for name, value in defs:
print(" @cython.locals(%s=cython.double)" % name, file=file)
if n == 1:
print(
"""\
@cython.locals(x0=cython.double, y0=cython.double)
@cython.locals(x1=cython.double, y1=cython.double)
def _lineTo(self, p1):
x0,y0 = self._getCurrentPoint()
x1,y1 = p1
''', file=file)
elif n == 2:
print('''\
""",
file=file,
)
elif n == 2:
print(
"""\
@cython.locals(x0=cython.double, y0=cython.double)
@cython.locals(x1=cython.double, y1=cython.double)
@cython.locals(x2=cython.double, y2=cython.double)
@ -187,9 +205,12 @@ class %s(BasePen):
x0,y0 = self._getCurrentPoint()
x1,y1 = p1
x2,y2 = p2
''', file=file)
elif n == 3:
print('''\
""",
file=file,
)
elif n == 3:
print(
"""\
@cython.locals(x0=cython.double, y0=cython.double)
@cython.locals(x1=cython.double, y1=cython.double)
@cython.locals(x2=cython.double, y2=cython.double)
@ -199,29 +220,35 @@ class %s(BasePen):
x1,y1 = p1
x2,y2 = p2
x3,y3 = p3
''', file=file)
for name,value in defs:
print(' %s = %s' % (name, value), file=file)
""",
file=file,
)
for name, value in defs:
print(" %s = %s" % (name, value), file=file)
print(file=file)
for name,value in zip([f[0] for f in funcs], exprs):
print(' self.%s += %s' % (name, value), file=file)
print(file=file)
for name, value in zip([f[0] for f in funcs], exprs):
print(" self.%s += %s" % (name, value), file=file)
print('''
print(
"""
if __name__ == '__main__':
from fontTools.misc.symfont import x, y, printGreenPen
printGreenPen('%s', ['''%penName, file=file)
for name,f in funcs:
print(" ('%s', %s)," % (name, str(f)), file=file)
print(' ])', file=file)
printGreenPen('%s', ["""
% penName,
file=file,
)
for name, f in funcs:
print(" ('%s', %s)," % (name, str(f)), file=file)
print(" ])", file=file)
if __name__ == '__main__':
pen = AreaPen()
pen.moveTo((100,100))
pen.lineTo((100,200))
pen.lineTo((200,200))
pen.curveTo((200,250),(300,300),(250,350))
pen.lineTo((200,100))
pen.closePath()
print(pen.value)
if __name__ == "__main__":
pen = AreaPen()
pen.moveTo((100, 100))
pen.lineTo((100, 200))
pen.lineTo((200, 200))
pen.curveTo((200, 250), (300, 300), (250, 350))
pen.lineTo((200, 100))
pen.closePath()
print(pen.value)

View File

@ -29,12 +29,14 @@ def parseXML(xmlSnippet):
if isinstance(xmlSnippet, bytes):
xml += xmlSnippet
elif isinstance(xmlSnippet, str):
xml += tobytes(xmlSnippet, 'utf-8')
xml += tobytes(xmlSnippet, "utf-8")
elif isinstance(xmlSnippet, Iterable):
xml += b"".join(tobytes(s, 'utf-8') for s in xmlSnippet)
xml += b"".join(tobytes(s, "utf-8") for s in xmlSnippet)
else:
raise TypeError("expected string or sequence of strings; found %r"
% type(xmlSnippet).__name__)
raise TypeError(
"expected string or sequence of strings; found %r"
% type(xmlSnippet).__name__
)
xml += b"</root>"
reader.parser.Parse(xml, 0)
return reader.root[2]
@ -76,6 +78,7 @@ class FakeFont:
return self.glyphOrder_[glyphID]
else:
return "glyph%.5d" % glyphID
def getGlyphNameMany(self, lst):
return [self.getGlyphName(gid) for gid in lst]
@ -92,6 +95,7 @@ class FakeFont:
class TestXMLReader_(object):
def __init__(self):
from xml.parsers.expat import ParserCreate
self.parser = ParserCreate()
self.parser.StartElementHandler = self.startElement_
self.parser.EndElementHandler = self.endElement_
@ -114,7 +118,7 @@ class TestXMLReader_(object):
self.stack[-1][2].append(data)
def makeXMLWriter(newlinestr='\n'):
def makeXMLWriter(newlinestr="\n"):
# don't write OS-specific new lines
writer = XMLWriter(BytesIO(), newlinestr=newlinestr)
# erase XML declaration
@ -166,7 +170,7 @@ class MockFont(object):
to its glyphOrder."""
def __init__(self):
self._glyphOrder = ['.notdef']
self._glyphOrder = [".notdef"]
class AllocatingDict(dict):
def __missing__(reverseDict, key):
@ -174,7 +178,8 @@ class MockFont(object):
gid = len(reverseDict)
reverseDict[key] = gid
return gid
self._reverseGlyphOrder = AllocatingDict({'.notdef': 0})
self._reverseGlyphOrder = AllocatingDict({".notdef": 0})
self.lazy = False
def getGlyphID(self, glyph):
@ -192,7 +197,6 @@ class MockFont(object):
class TestCase(_TestCase):
def __init__(self, methodName):
_TestCase.__init__(self, methodName)
# Python 3 renamed assertRaisesRegexp to assertRaisesRegex,
@ -202,7 +206,6 @@ class TestCase(_TestCase):
class DataFilesHandler(TestCase):
def setUp(self):
self.tempdir = None
self.num_tempfiles = 0

View File

@ -33,90 +33,90 @@ class Tag(str):
def readHex(content):
"""Convert a list of hex strings to binary data."""
return deHexStr(strjoin(chunk for chunk in content if isinstance(chunk, str)))
"""Convert a list of hex strings to binary data."""
return deHexStr(strjoin(chunk for chunk in content if isinstance(chunk, str)))
def deHexStr(hexdata):
"""Convert a hex string to binary data."""
hexdata = strjoin(hexdata.split())
if len(hexdata) % 2:
hexdata = hexdata + "0"
data = []
for i in range(0, len(hexdata), 2):
data.append(bytechr(int(hexdata[i:i+2], 16)))
return bytesjoin(data)
"""Convert a hex string to binary data."""
hexdata = strjoin(hexdata.split())
if len(hexdata) % 2:
hexdata = hexdata + "0"
data = []
for i in range(0, len(hexdata), 2):
data.append(bytechr(int(hexdata[i : i + 2], 16)))
return bytesjoin(data)
def hexStr(data):
"""Convert binary data to a hex string."""
h = string.hexdigits
r = ''
for c in data:
i = byteord(c)
r = r + h[(i >> 4) & 0xF] + h[i & 0xF]
return r
"""Convert binary data to a hex string."""
h = string.hexdigits
r = ""
for c in data:
i = byteord(c)
r = r + h[(i >> 4) & 0xF] + h[i & 0xF]
return r
def num2binary(l, bits=32):
items = []
binary = ""
for i in range(bits):
if l & 0x1:
binary = "1" + binary
else:
binary = "0" + binary
l = l >> 1
if not ((i+1) % 8):
items.append(binary)
binary = ""
if binary:
items.append(binary)
items.reverse()
assert l in (0, -1), "number doesn't fit in number of bits"
return ' '.join(items)
items = []
binary = ""
for i in range(bits):
if l & 0x1:
binary = "1" + binary
else:
binary = "0" + binary
l = l >> 1
if not ((i + 1) % 8):
items.append(binary)
binary = ""
if binary:
items.append(binary)
items.reverse()
assert l in (0, -1), "number doesn't fit in number of bits"
return " ".join(items)
def binary2num(bin):
bin = strjoin(bin.split())
l = 0
for digit in bin:
l = l << 1
if digit != "0":
l = l | 0x1
return l
bin = strjoin(bin.split())
l = 0
for digit in bin:
l = l << 1
if digit != "0":
l = l | 0x1
return l
def caselessSort(alist):
"""Return a sorted copy of a list. If there are only strings
in the list, it will not consider case.
"""
"""Return a sorted copy of a list. If there are only strings
in the list, it will not consider case.
"""
try:
return sorted(alist, key=lambda a: (a.lower(), a))
except TypeError:
return sorted(alist)
try:
return sorted(alist, key=lambda a: (a.lower(), a))
except TypeError:
return sorted(alist)
def pad(data, size):
r""" Pad byte string 'data' with null bytes until its length is a
multiple of 'size'.
r"""Pad byte string 'data' with null bytes until its length is a
multiple of 'size'.
>>> len(pad(b'abcd', 4))
4
>>> len(pad(b'abcde', 2))
6
>>> len(pad(b'abcde', 4))
8
>>> pad(b'abcdef', 4) == b'abcdef\x00\x00'
True
"""
data = tobytes(data)
if size > 1:
remainder = len(data) % size
if remainder:
data += b"\0" * (size - remainder)
return data
>>> len(pad(b'abcd', 4))
4
>>> len(pad(b'abcde', 2))
6
>>> len(pad(b'abcde', 4))
8
>>> pad(b'abcdef', 4) == b'abcdef\x00\x00'
True
"""
data = tobytes(data)
if size > 1:
remainder = len(data) % size
if remainder:
data += b"\0" * (size - remainder)
return data
def tostr(s, encoding="ascii", errors="strict"):
@ -150,5 +150,6 @@ def bytesjoin(iterable, joiner=b""):
if __name__ == "__main__":
import doctest, sys
sys.exit(doctest.testmod().failed)
import doctest, sys
sys.exit(doctest.testmod().failed)

View File

@ -10,59 +10,79 @@ import calendar
epoch_diff = calendar.timegm((1904, 1, 1, 0, 0, 0, 0, 0, 0))
DAYNAMES = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
MONTHNAMES = [None, "Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
MONTHNAMES = [
None,
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
]
def asctime(t=None):
"""
Convert a tuple or struct_time representing a time as returned by gmtime()
or localtime() to a 24-character string of the following form:
"""
Convert a tuple or struct_time representing a time as returned by gmtime()
or localtime() to a 24-character string of the following form:
>>> asctime(time.gmtime(0))
'Thu Jan 1 00:00:00 1970'
>>> asctime(time.gmtime(0))
'Thu Jan 1 00:00:00 1970'
If t is not provided, the current time as returned by localtime() is used.
Locale information is not used by asctime().
If t is not provided, the current time as returned by localtime() is used.
Locale information is not used by asctime().
This is meant to normalise the output of the built-in time.asctime() across
different platforms and Python versions.
In Python 3.x, the day of the month is right-justified, whereas on Windows
Python 2.7 it is padded with zeros.
This is meant to normalise the output of the built-in time.asctime() across
different platforms and Python versions.
In Python 3.x, the day of the month is right-justified, whereas on Windows
Python 2.7 it is padded with zeros.
See https://github.com/fonttools/fonttools/issues/455
"""
if t is None:
t = time.localtime()
s = "%s %s %2s %s" % (
DAYNAMES[t.tm_wday], MONTHNAMES[t.tm_mon], t.tm_mday,
time.strftime("%H:%M:%S %Y", t))
return s
See https://github.com/fonttools/fonttools/issues/455
"""
if t is None:
t = time.localtime()
s = "%s %s %2s %s" % (
DAYNAMES[t.tm_wday],
MONTHNAMES[t.tm_mon],
t.tm_mday,
time.strftime("%H:%M:%S %Y", t),
)
return s
def timestampToString(value):
return asctime(time.gmtime(max(0, value + epoch_diff)))
return asctime(time.gmtime(max(0, value + epoch_diff)))
def timestampFromString(value):
wkday, mnth = value[:7].split()
t = datetime.strptime(value[7:], ' %d %H:%M:%S %Y')
t = t.replace(month=MONTHNAMES.index(mnth), tzinfo=timezone.utc)
wkday_idx = DAYNAMES.index(wkday)
assert t.weekday() == wkday_idx, '"' + value + '" has inconsistent weekday'
return int(t.timestamp()) - epoch_diff
wkday, mnth = value[:7].split()
t = datetime.strptime(value[7:], " %d %H:%M:%S %Y")
t = t.replace(month=MONTHNAMES.index(mnth), tzinfo=timezone.utc)
wkday_idx = DAYNAMES.index(wkday)
assert t.weekday() == wkday_idx, '"' + value + '" has inconsistent weekday'
return int(t.timestamp()) - epoch_diff
def timestampNow():
# https://reproducible-builds.org/specs/source-date-epoch/
source_date_epoch = os.environ.get("SOURCE_DATE_EPOCH")
if source_date_epoch is not None:
return int(source_date_epoch) - epoch_diff
return int(time.time() - epoch_diff)
# https://reproducible-builds.org/specs/source-date-epoch/
source_date_epoch = os.environ.get("SOURCE_DATE_EPOCH")
if source_date_epoch is not None:
return int(source_date_epoch) - epoch_diff
return int(time.time() - epoch_diff)
def timestampSinceEpoch(value):
return int(value - epoch_diff)
return int(value - epoch_diff)
if __name__ == "__main__":
import sys
import doctest
sys.exit(doctest.testmod().failed)
import sys
import doctest
sys.exit(doctest.testmod().failed)

View File

@ -61,338 +61,345 @@ _MINUS_ONE_EPSILON = -1 + _EPSILON
def _normSinCos(v):
if abs(v) < _EPSILON:
v = 0
elif v > _ONE_EPSILON:
v = 1
elif v < _MINUS_ONE_EPSILON:
v = -1
return v
if abs(v) < _EPSILON:
v = 0
elif v > _ONE_EPSILON:
v = 1
elif v < _MINUS_ONE_EPSILON:
v = -1
return v
class Transform(NamedTuple):
"""2x2 transformation matrix plus offset, a.k.a. Affine transform.
Transform instances are immutable: all transforming methods, eg.
rotate(), return a new Transform instance.
"""2x2 transformation matrix plus offset, a.k.a. Affine transform.
Transform instances are immutable: all transforming methods, eg.
rotate(), return a new Transform instance.
:Example:
:Example:
>>> t = Transform()
>>> t
<Transform [1 0 0 1 0 0]>
>>> t.scale(2)
<Transform [2 0 0 2 0 0]>
>>> t.scale(2.5, 5.5)
<Transform [2.5 0 0 5.5 0 0]>
>>>
>>> t.scale(2, 3).transformPoint((100, 100))
(200, 300)
>>> t = Transform()
>>> t
<Transform [1 0 0 1 0 0]>
>>> t.scale(2)
<Transform [2 0 0 2 0 0]>
>>> t.scale(2.5, 5.5)
<Transform [2.5 0 0 5.5 0 0]>
>>>
>>> t.scale(2, 3).transformPoint((100, 100))
(200, 300)
Transform's constructor takes six arguments, all of which are
optional, and can be used as keyword arguments::
Transform's constructor takes six arguments, all of which are
optional, and can be used as keyword arguments::
>>> Transform(12)
<Transform [12 0 0 1 0 0]>
>>> Transform(dx=12)
<Transform [1 0 0 1 12 0]>
>>> Transform(yx=12)
<Transform [1 0 12 1 0 0]>
>>> Transform(12)
<Transform [12 0 0 1 0 0]>
>>> Transform(dx=12)
<Transform [1 0 0 1 12 0]>
>>> Transform(yx=12)
<Transform [1 0 12 1 0 0]>
Transform instances also behave like sequences of length 6::
Transform instances also behave like sequences of length 6::
>>> len(Identity)
6
>>> list(Identity)
[1, 0, 0, 1, 0, 0]
>>> tuple(Identity)
(1, 0, 0, 1, 0, 0)
>>> len(Identity)
6
>>> list(Identity)
[1, 0, 0, 1, 0, 0]
>>> tuple(Identity)
(1, 0, 0, 1, 0, 0)
Transform instances are comparable::
Transform instances are comparable::
>>> t1 = Identity.scale(2, 3).translate(4, 6)
>>> t2 = Identity.translate(8, 18).scale(2, 3)
>>> t1 == t2
1
>>> t1 = Identity.scale(2, 3).translate(4, 6)
>>> t2 = Identity.translate(8, 18).scale(2, 3)
>>> t1 == t2
1
But beware of floating point rounding errors::
But beware of floating point rounding errors::
>>> t1 = Identity.scale(0.2, 0.3).translate(0.4, 0.6)
>>> t2 = Identity.translate(0.08, 0.18).scale(0.2, 0.3)
>>> t1
<Transform [0.2 0 0 0.3 0.08 0.18]>
>>> t2
<Transform [0.2 0 0 0.3 0.08 0.18]>
>>> t1 == t2
0
>>> t1 = Identity.scale(0.2, 0.3).translate(0.4, 0.6)
>>> t2 = Identity.translate(0.08, 0.18).scale(0.2, 0.3)
>>> t1
<Transform [0.2 0 0 0.3 0.08 0.18]>
>>> t2
<Transform [0.2 0 0 0.3 0.08 0.18]>
>>> t1 == t2
0
Transform instances are hashable, meaning you can use them as
keys in dictionaries::
Transform instances are hashable, meaning you can use them as
keys in dictionaries::
>>> d = {Scale(12, 13): None}
>>> d
{<Transform [12 0 0 13 0 0]>: None}
>>> d = {Scale(12, 13): None}
>>> d
{<Transform [12 0 0 13 0 0]>: None}
But again, beware of floating point rounding errors::
But again, beware of floating point rounding errors::
>>> t1 = Identity.scale(0.2, 0.3).translate(0.4, 0.6)
>>> t2 = Identity.translate(0.08, 0.18).scale(0.2, 0.3)
>>> t1
<Transform [0.2 0 0 0.3 0.08 0.18]>
>>> t2
<Transform [0.2 0 0 0.3 0.08 0.18]>
>>> d = {t1: None}
>>> d
{<Transform [0.2 0 0 0.3 0.08 0.18]>: None}
>>> d[t2]
Traceback (most recent call last):
File "<stdin>", line 1, in ?
KeyError: <Transform [0.2 0 0 0.3 0.08 0.18]>
"""
>>> t1 = Identity.scale(0.2, 0.3).translate(0.4, 0.6)
>>> t2 = Identity.translate(0.08, 0.18).scale(0.2, 0.3)
>>> t1
<Transform [0.2 0 0 0.3 0.08 0.18]>
>>> t2
<Transform [0.2 0 0 0.3 0.08 0.18]>
>>> d = {t1: None}
>>> d
{<Transform [0.2 0 0 0.3 0.08 0.18]>: None}
>>> d[t2]
Traceback (most recent call last):
File "<stdin>", line 1, in ?
KeyError: <Transform [0.2 0 0 0.3 0.08 0.18]>
"""
xx: float = 1
xy: float = 0
yx: float = 0
yy: float = 1
dx: float = 0
dy: float = 0
xx: float = 1
xy: float = 0
yx: float = 0
yy: float = 1
dx: float = 0
dy: float = 0
def transformPoint(self, p):
"""Transform a point.
def transformPoint(self, p):
"""Transform a point.
:Example:
:Example:
>>> t = Transform()
>>> t = t.scale(2.5, 5.5)
>>> t.transformPoint((100, 100))
(250.0, 550.0)
"""
(x, y) = p
xx, xy, yx, yy, dx, dy = self
return (xx*x + yx*y + dx, xy*x + yy*y + dy)
>>> t = Transform()
>>> t = t.scale(2.5, 5.5)
>>> t.transformPoint((100, 100))
(250.0, 550.0)
"""
(x, y) = p
xx, xy, yx, yy, dx, dy = self
return (xx * x + yx * y + dx, xy * x + yy * y + dy)
def transformPoints(self, points):
"""Transform a list of points.
def transformPoints(self, points):
"""Transform a list of points.
:Example:
:Example:
>>> t = Scale(2, 3)
>>> t.transformPoints([(0, 0), (0, 100), (100, 100), (100, 0)])
[(0, 0), (0, 300), (200, 300), (200, 0)]
>>>
"""
xx, xy, yx, yy, dx, dy = self
return [(xx*x + yx*y + dx, xy*x + yy*y + dy) for x, y in points]
>>> t = Scale(2, 3)
>>> t.transformPoints([(0, 0), (0, 100), (100, 100), (100, 0)])
[(0, 0), (0, 300), (200, 300), (200, 0)]
>>>
"""
xx, xy, yx, yy, dx, dy = self
return [(xx * x + yx * y + dx, xy * x + yy * y + dy) for x, y in points]
def transformVector(self, v):
"""Transform an (dx, dy) vector, treating translation as zero.
def transformVector(self, v):
"""Transform an (dx, dy) vector, treating translation as zero.
:Example:
:Example:
>>> t = Transform(2, 0, 0, 2, 10, 20)
>>> t.transformVector((3, -4))
(6, -8)
>>>
"""
(dx, dy) = v
xx, xy, yx, yy = self[:4]
return (xx*dx + yx*dy, xy*dx + yy*dy)
>>> t = Transform(2, 0, 0, 2, 10, 20)
>>> t.transformVector((3, -4))
(6, -8)
>>>
"""
(dx, dy) = v
xx, xy, yx, yy = self[:4]
return (xx * dx + yx * dy, xy * dx + yy * dy)
def transformVectors(self, vectors):
"""Transform a list of (dx, dy) vector, treating translation as zero.
def transformVectors(self, vectors):
"""Transform a list of (dx, dy) vector, treating translation as zero.
:Example:
>>> t = Transform(2, 0, 0, 2, 10, 20)
>>> t.transformVectors([(3, -4), (5, -6)])
[(6, -8), (10, -12)]
>>>
"""
xx, xy, yx, yy = self[:4]
return [(xx*dx + yx*dy, xy*dx + yy*dy) for dx, dy in vectors]
:Example:
>>> t = Transform(2, 0, 0, 2, 10, 20)
>>> t.transformVectors([(3, -4), (5, -6)])
[(6, -8), (10, -12)]
>>>
"""
xx, xy, yx, yy = self[:4]
return [(xx * dx + yx * dy, xy * dx + yy * dy) for dx, dy in vectors]
def translate(self, x=0, y=0):
"""Return a new transformation, translated (offset) by x, y.
def translate(self, x=0, y=0):
"""Return a new transformation, translated (offset) by x, y.
:Example:
>>> t = Transform()
>>> t.translate(20, 30)
<Transform [1 0 0 1 20 30]>
>>>
"""
return self.transform((1, 0, 0, 1, x, y))
:Example:
>>> t = Transform()
>>> t.translate(20, 30)
<Transform [1 0 0 1 20 30]>
>>>
"""
return self.transform((1, 0, 0, 1, x, y))
def scale(self, x=1, y=None):
"""Return a new transformation, scaled by x, y. The 'y' argument
may be None, which implies to use the x value for y as well.
def scale(self, x=1, y=None):
"""Return a new transformation, scaled by x, y. The 'y' argument
may be None, which implies to use the x value for y as well.
:Example:
>>> t = Transform()
>>> t.scale(5)
<Transform [5 0 0 5 0 0]>
>>> t.scale(5, 6)
<Transform [5 0 0 6 0 0]>
>>>
"""
if y is None:
y = x
return self.transform((x, 0, 0, y, 0, 0))
:Example:
>>> t = Transform()
>>> t.scale(5)
<Transform [5 0 0 5 0 0]>
>>> t.scale(5, 6)
<Transform [5 0 0 6 0 0]>
>>>
"""
if y is None:
y = x
return self.transform((x, 0, 0, y, 0, 0))
def rotate(self, angle):
"""Return a new transformation, rotated by 'angle' (radians).
def rotate(self, angle):
"""Return a new transformation, rotated by 'angle' (radians).
:Example:
>>> import math
>>> t = Transform()
>>> t.rotate(math.pi / 2)
<Transform [0 1 -1 0 0 0]>
>>>
"""
import math
c = _normSinCos(math.cos(angle))
s = _normSinCos(math.sin(angle))
return self.transform((c, s, -s, c, 0, 0))
:Example:
>>> import math
>>> t = Transform()
>>> t.rotate(math.pi / 2)
<Transform [0 1 -1 0 0 0]>
>>>
"""
import math
def skew(self, x=0, y=0):
"""Return a new transformation, skewed by x and y.
c = _normSinCos(math.cos(angle))
s = _normSinCos(math.sin(angle))
return self.transform((c, s, -s, c, 0, 0))
:Example:
>>> import math
>>> t = Transform()
>>> t.skew(math.pi / 4)
<Transform [1 0 1 1 0 0]>
>>>
"""
import math
return self.transform((1, math.tan(y), math.tan(x), 1, 0, 0))
def skew(self, x=0, y=0):
"""Return a new transformation, skewed by x and y.
def transform(self, other):
"""Return a new transformation, transformed by another
transformation.
:Example:
>>> import math
>>> t = Transform()
>>> t.skew(math.pi / 4)
<Transform [1 0 1 1 0 0]>
>>>
"""
import math
:Example:
>>> t = Transform(2, 0, 0, 3, 1, 6)
>>> t.transform((4, 3, 2, 1, 5, 6))
<Transform [8 9 4 3 11 24]>
>>>
"""
xx1, xy1, yx1, yy1, dx1, dy1 = other
xx2, xy2, yx2, yy2, dx2, dy2 = self
return self.__class__(
xx1*xx2 + xy1*yx2,
xx1*xy2 + xy1*yy2,
yx1*xx2 + yy1*yx2,
yx1*xy2 + yy1*yy2,
xx2*dx1 + yx2*dy1 + dx2,
xy2*dx1 + yy2*dy1 + dy2)
return self.transform((1, math.tan(y), math.tan(x), 1, 0, 0))
def reverseTransform(self, other):
"""Return a new transformation, which is the other transformation
transformed by self. self.reverseTransform(other) is equivalent to
other.transform(self).
def transform(self, other):
"""Return a new transformation, transformed by another
transformation.
:Example:
>>> t = Transform(2, 0, 0, 3, 1, 6)
>>> t.reverseTransform((4, 3, 2, 1, 5, 6))
<Transform [8 6 6 3 21 15]>
>>> Transform(4, 3, 2, 1, 5, 6).transform((2, 0, 0, 3, 1, 6))
<Transform [8 6 6 3 21 15]>
>>>
"""
xx1, xy1, yx1, yy1, dx1, dy1 = self
xx2, xy2, yx2, yy2, dx2, dy2 = other
return self.__class__(
xx1*xx2 + xy1*yx2,
xx1*xy2 + xy1*yy2,
yx1*xx2 + yy1*yx2,
yx1*xy2 + yy1*yy2,
xx2*dx1 + yx2*dy1 + dx2,
xy2*dx1 + yy2*dy1 + dy2)
:Example:
>>> t = Transform(2, 0, 0, 3, 1, 6)
>>> t.transform((4, 3, 2, 1, 5, 6))
<Transform [8 9 4 3 11 24]>
>>>
"""
xx1, xy1, yx1, yy1, dx1, dy1 = other
xx2, xy2, yx2, yy2, dx2, dy2 = self
return self.__class__(
xx1 * xx2 + xy1 * yx2,
xx1 * xy2 + xy1 * yy2,
yx1 * xx2 + yy1 * yx2,
yx1 * xy2 + yy1 * yy2,
xx2 * dx1 + yx2 * dy1 + dx2,
xy2 * dx1 + yy2 * dy1 + dy2,
)
def inverse(self):
"""Return the inverse transformation.
def reverseTransform(self, other):
"""Return a new transformation, which is the other transformation
transformed by self. self.reverseTransform(other) is equivalent to
other.transform(self).
:Example:
>>> t = Identity.translate(2, 3).scale(4, 5)
>>> t.transformPoint((10, 20))
(42, 103)
>>> it = t.inverse()
>>> it.transformPoint((42, 103))
(10.0, 20.0)
>>>
"""
if self == Identity:
return self
xx, xy, yx, yy, dx, dy = self
det = xx*yy - yx*xy
xx, xy, yx, yy = yy/det, -xy/det, -yx/det, xx/det
dx, dy = -xx*dx - yx*dy, -xy*dx - yy*dy
return self.__class__(xx, xy, yx, yy, dx, dy)
:Example:
>>> t = Transform(2, 0, 0, 3, 1, 6)
>>> t.reverseTransform((4, 3, 2, 1, 5, 6))
<Transform [8 6 6 3 21 15]>
>>> Transform(4, 3, 2, 1, 5, 6).transform((2, 0, 0, 3, 1, 6))
<Transform [8 6 6 3 21 15]>
>>>
"""
xx1, xy1, yx1, yy1, dx1, dy1 = self
xx2, xy2, yx2, yy2, dx2, dy2 = other
return self.__class__(
xx1 * xx2 + xy1 * yx2,
xx1 * xy2 + xy1 * yy2,
yx1 * xx2 + yy1 * yx2,
yx1 * xy2 + yy1 * yy2,
xx2 * dx1 + yx2 * dy1 + dx2,
xy2 * dx1 + yy2 * dy1 + dy2,
)
def toPS(self):
"""Return a PostScript representation
def inverse(self):
"""Return the inverse transformation.
:Example:
:Example:
>>> t = Identity.translate(2, 3).scale(4, 5)
>>> t.transformPoint((10, 20))
(42, 103)
>>> it = t.inverse()
>>> it.transformPoint((42, 103))
(10.0, 20.0)
>>>
"""
if self == Identity:
return self
xx, xy, yx, yy, dx, dy = self
det = xx * yy - yx * xy
xx, xy, yx, yy = yy / det, -xy / det, -yx / det, xx / det
dx, dy = -xx * dx - yx * dy, -xy * dx - yy * dy
return self.__class__(xx, xy, yx, yy, dx, dy)
>>> t = Identity.scale(2, 3).translate(4, 5)
>>> t.toPS()
'[2 0 0 3 8 15]'
>>>
"""
return "[%s %s %s %s %s %s]" % self
def toPS(self):
"""Return a PostScript representation
def __bool__(self):
"""Returns True if transform is not identity, False otherwise.
:Example:
:Example:
>>> t = Identity.scale(2, 3).translate(4, 5)
>>> t.toPS()
'[2 0 0 3 8 15]'
>>>
"""
return "[%s %s %s %s %s %s]" % self
>>> bool(Identity)
False
>>> bool(Transform())
False
>>> bool(Scale(1.))
False
>>> bool(Scale(2))
True
>>> bool(Offset())
False
>>> bool(Offset(0))
False
>>> bool(Offset(2))
True
"""
return self != Identity
def __bool__(self):
"""Returns True if transform is not identity, False otherwise.
def __repr__(self):
return "<%s [%g %g %g %g %g %g]>" % ((self.__class__.__name__,) + self)
:Example:
>>> bool(Identity)
False
>>> bool(Transform())
False
>>> bool(Scale(1.))
False
>>> bool(Scale(2))
True
>>> bool(Offset())
False
>>> bool(Offset(0))
False
>>> bool(Offset(2))
True
"""
return self != Identity
def __repr__(self):
return "<%s [%g %g %g %g %g %g]>" % ((self.__class__.__name__,) + self)
Identity = Transform()
def Offset(x=0, y=0):
"""Return the identity transformation offset by x, y.
:Example:
>>> Offset(2, 3)
<Transform [1 0 0 1 2 3]>
>>>
"""
return Transform(1, 0, 0, 1, x, y)
def Offset(x=0, y=0):
"""Return the identity transformation offset by x, y.
:Example:
>>> Offset(2, 3)
<Transform [1 0 0 1 2 3]>
>>>
"""
return Transform(1, 0, 0, 1, x, y)
def Scale(x, y=None):
"""Return the identity transformation scaled by x, y. The 'y' argument
may be None, which implies to use the x value for y as well.
"""Return the identity transformation scaled by x, y. The 'y' argument
may be None, which implies to use the x value for y as well.
:Example:
>>> Scale(2, 3)
<Transform [2 0 0 3 0 0]>
>>>
"""
if y is None:
y = x
return Transform(x, 0, 0, y, 0, 0)
:Example:
>>> Scale(2, 3)
<Transform [2 0 0 3 0 0]>
>>>
"""
if y is None:
y = x
return Transform(x, 0, 0, y, 0, 0)
if __name__ == "__main__":
import sys
import doctest
sys.exit(doctest.testmod().failed)
import sys
import doctest
sys.exit(doctest.testmod().failed)

View File

@ -8,164 +8,169 @@ import logging
log = logging.getLogger(__name__)
class TTXParseError(Exception): pass
class TTXParseError(Exception):
pass
BUFSIZE = 0x4000
class XMLReader(object):
def __init__(
self, fileOrPath, ttFont, progress=None, quiet=None, contentOnly=False
):
if fileOrPath == "-":
fileOrPath = sys.stdin
if not hasattr(fileOrPath, "read"):
self.file = open(fileOrPath, "rb")
self._closeStream = True
else:
# assume readable file object
self.file = fileOrPath
self._closeStream = False
self.ttFont = ttFont
self.progress = progress
if quiet is not None:
from fontTools.misc.loggingTools import deprecateArgument
def __init__(self, fileOrPath, ttFont, progress=None, quiet=None, contentOnly=False):
if fileOrPath == '-':
fileOrPath = sys.stdin
if not hasattr(fileOrPath, "read"):
self.file = open(fileOrPath, "rb")
self._closeStream = True
else:
# assume readable file object
self.file = fileOrPath
self._closeStream = False
self.ttFont = ttFont
self.progress = progress
if quiet is not None:
from fontTools.misc.loggingTools import deprecateArgument
deprecateArgument("quiet", "configure logging instead")
self.quiet = quiet
self.root = None
self.contentStack = []
self.contentOnly = contentOnly
self.stackSize = 0
deprecateArgument("quiet", "configure logging instead")
self.quiet = quiet
self.root = None
self.contentStack = []
self.contentOnly = contentOnly
self.stackSize = 0
def read(self, rootless=False):
if rootless:
self.stackSize += 1
if self.progress:
self.file.seek(0, 2)
fileSize = self.file.tell()
self.progress.set(0, fileSize // 100 or 1)
self.file.seek(0)
self._parseFile(self.file)
if self._closeStream:
self.close()
if rootless:
self.stackSize -= 1
def read(self, rootless=False):
if rootless:
self.stackSize += 1
if self.progress:
self.file.seek(0, 2)
fileSize = self.file.tell()
self.progress.set(0, fileSize // 100 or 1)
self.file.seek(0)
self._parseFile(self.file)
if self._closeStream:
self.close()
if rootless:
self.stackSize -= 1
def close(self):
self.file.close()
def close(self):
self.file.close()
def _parseFile(self, file):
from xml.parsers.expat import ParserCreate
parser = ParserCreate()
parser.StartElementHandler = self._startElementHandler
parser.EndElementHandler = self._endElementHandler
parser.CharacterDataHandler = self._characterDataHandler
def _parseFile(self, file):
from xml.parsers.expat import ParserCreate
pos = 0
while True:
chunk = file.read(BUFSIZE)
if not chunk:
parser.Parse(chunk, 1)
break
pos = pos + len(chunk)
if self.progress:
self.progress.set(pos // 100)
parser.Parse(chunk, 0)
parser = ParserCreate()
parser.StartElementHandler = self._startElementHandler
parser.EndElementHandler = self._endElementHandler
parser.CharacterDataHandler = self._characterDataHandler
def _startElementHandler(self, name, attrs):
if self.stackSize == 1 and self.contentOnly:
# We already know the table we're parsing, skip
# parsing the table tag and continue to
# stack '2' which begins parsing content
self.contentStack.append([])
self.stackSize = 2
return
stackSize = self.stackSize
self.stackSize = stackSize + 1
subFile = attrs.get("src")
if subFile is not None:
if hasattr(self.file, 'name'):
# if file has a name, get its parent directory
dirname = os.path.dirname(self.file.name)
else:
# else fall back to using the current working directory
dirname = os.getcwd()
subFile = os.path.join(dirname, subFile)
if not stackSize:
if name != "ttFont":
raise TTXParseError("illegal root tag: %s" % name)
if self.ttFont.reader is None and not self.ttFont.tables:
sfntVersion = attrs.get("sfntVersion")
if sfntVersion is not None:
if len(sfntVersion) != 4:
sfntVersion = safeEval('"' + sfntVersion + '"')
self.ttFont.sfntVersion = sfntVersion
self.contentStack.append([])
elif stackSize == 1:
if subFile is not None:
subReader = XMLReader(subFile, self.ttFont, self.progress)
subReader.read()
self.contentStack.append([])
return
tag = ttLib.xmlToTag(name)
msg = "Parsing '%s' table..." % tag
if self.progress:
self.progress.setLabel(msg)
log.info(msg)
if tag == "GlyphOrder":
tableClass = ttLib.GlyphOrder
elif "ERROR" in attrs or ('raw' in attrs and safeEval(attrs['raw'])):
tableClass = DefaultTable
else:
tableClass = ttLib.getTableClass(tag)
if tableClass is None:
tableClass = DefaultTable
if tag == 'loca' and tag in self.ttFont:
# Special-case the 'loca' table as we need the
# original if the 'glyf' table isn't recompiled.
self.currentTable = self.ttFont[tag]
else:
self.currentTable = tableClass(tag)
self.ttFont[tag] = self.currentTable
self.contentStack.append([])
elif stackSize == 2 and subFile is not None:
subReader = XMLReader(subFile, self.ttFont, self.progress, contentOnly=True)
subReader.read()
self.contentStack.append([])
self.root = subReader.root
elif stackSize == 2:
self.contentStack.append([])
self.root = (name, attrs, self.contentStack[-1])
else:
l = []
self.contentStack[-1].append((name, attrs, l))
self.contentStack.append(l)
pos = 0
while True:
chunk = file.read(BUFSIZE)
if not chunk:
parser.Parse(chunk, 1)
break
pos = pos + len(chunk)
if self.progress:
self.progress.set(pos // 100)
parser.Parse(chunk, 0)
def _characterDataHandler(self, data):
if self.stackSize > 1:
self.contentStack[-1].append(data)
def _startElementHandler(self, name, attrs):
if self.stackSize == 1 and self.contentOnly:
# We already know the table we're parsing, skip
# parsing the table tag and continue to
# stack '2' which begins parsing content
self.contentStack.append([])
self.stackSize = 2
return
stackSize = self.stackSize
self.stackSize = stackSize + 1
subFile = attrs.get("src")
if subFile is not None:
if hasattr(self.file, "name"):
# if file has a name, get its parent directory
dirname = os.path.dirname(self.file.name)
else:
# else fall back to using the current working directory
dirname = os.getcwd()
subFile = os.path.join(dirname, subFile)
if not stackSize:
if name != "ttFont":
raise TTXParseError("illegal root tag: %s" % name)
if self.ttFont.reader is None and not self.ttFont.tables:
sfntVersion = attrs.get("sfntVersion")
if sfntVersion is not None:
if len(sfntVersion) != 4:
sfntVersion = safeEval('"' + sfntVersion + '"')
self.ttFont.sfntVersion = sfntVersion
self.contentStack.append([])
elif stackSize == 1:
if subFile is not None:
subReader = XMLReader(subFile, self.ttFont, self.progress)
subReader.read()
self.contentStack.append([])
return
tag = ttLib.xmlToTag(name)
msg = "Parsing '%s' table..." % tag
if self.progress:
self.progress.setLabel(msg)
log.info(msg)
if tag == "GlyphOrder":
tableClass = ttLib.GlyphOrder
elif "ERROR" in attrs or ("raw" in attrs and safeEval(attrs["raw"])):
tableClass = DefaultTable
else:
tableClass = ttLib.getTableClass(tag)
if tableClass is None:
tableClass = DefaultTable
if tag == "loca" and tag in self.ttFont:
# Special-case the 'loca' table as we need the
# original if the 'glyf' table isn't recompiled.
self.currentTable = self.ttFont[tag]
else:
self.currentTable = tableClass(tag)
self.ttFont[tag] = self.currentTable
self.contentStack.append([])
elif stackSize == 2 and subFile is not None:
subReader = XMLReader(subFile, self.ttFont, self.progress, contentOnly=True)
subReader.read()
self.contentStack.append([])
self.root = subReader.root
elif stackSize == 2:
self.contentStack.append([])
self.root = (name, attrs, self.contentStack[-1])
else:
l = []
self.contentStack[-1].append((name, attrs, l))
self.contentStack.append(l)
def _endElementHandler(self, name):
self.stackSize = self.stackSize - 1
del self.contentStack[-1]
if not self.contentOnly:
if self.stackSize == 1:
self.root = None
elif self.stackSize == 2:
name, attrs, content = self.root
self.currentTable.fromXML(name, attrs, content, self.ttFont)
self.root = None
def _characterDataHandler(self, data):
if self.stackSize > 1:
self.contentStack[-1].append(data)
def _endElementHandler(self, name):
self.stackSize = self.stackSize - 1
del self.contentStack[-1]
if not self.contentOnly:
if self.stackSize == 1:
self.root = None
elif self.stackSize == 2:
name, attrs, content = self.root
self.currentTable.fromXML(name, attrs, content, self.ttFont)
self.root = None
class ProgressPrinter(object):
def __init__(self, title, maxval=100):
print(title)
def __init__(self, title, maxval=100):
print(title)
def set(self, val, maxval=None):
pass
def set(self, val, maxval=None):
pass
def increment(self, val=1):
pass
def increment(self, val=1):
pass
def setLabel(self, text):
print(text)
def setLabel(self, text):
print(text)

View File

@ -9,186 +9,196 @@ INDENT = " "
class XMLWriter(object):
def __init__(
self,
fileOrPath,
indentwhite=INDENT,
idlefunc=None,
encoding="utf_8",
newlinestr="\n",
):
if encoding.lower().replace("-", "").replace("_", "") != "utf8":
raise Exception("Only UTF-8 encoding is supported.")
if fileOrPath == "-":
fileOrPath = sys.stdout
if not hasattr(fileOrPath, "write"):
self.filename = fileOrPath
self.file = open(fileOrPath, "wb")
self._closeStream = True
else:
self.filename = None
# assume writable file object
self.file = fileOrPath
self._closeStream = False
def __init__(self, fileOrPath, indentwhite=INDENT, idlefunc=None, encoding="utf_8",
newlinestr="\n"):
if encoding.lower().replace('-','').replace('_','') != 'utf8':
raise Exception('Only UTF-8 encoding is supported.')
if fileOrPath == '-':
fileOrPath = sys.stdout
if not hasattr(fileOrPath, "write"):
self.filename = fileOrPath
self.file = open(fileOrPath, "wb")
self._closeStream = True
else:
self.filename = None
# assume writable file object
self.file = fileOrPath
self._closeStream = False
# Figure out if writer expects bytes or unicodes
try:
# The bytes check should be first. See:
# https://github.com/fonttools/fonttools/pull/233
self.file.write(b"")
self.totype = tobytes
except TypeError:
# This better not fail.
self.file.write("")
self.totype = tostr
self.indentwhite = self.totype(indentwhite)
if newlinestr is None:
self.newlinestr = self.totype(os.linesep)
else:
self.newlinestr = self.totype(newlinestr)
self.indentlevel = 0
self.stack = []
self.needindent = 1
self.idlefunc = idlefunc
self.idlecounter = 0
self._writeraw('<?xml version="1.0" encoding="UTF-8"?>')
self.newline()
# Figure out if writer expects bytes or unicodes
try:
# The bytes check should be first. See:
# https://github.com/fonttools/fonttools/pull/233
self.file.write(b'')
self.totype = tobytes
except TypeError:
# This better not fail.
self.file.write('')
self.totype = tostr
self.indentwhite = self.totype(indentwhite)
if newlinestr is None:
self.newlinestr = self.totype(os.linesep)
else:
self.newlinestr = self.totype(newlinestr)
self.indentlevel = 0
self.stack = []
self.needindent = 1
self.idlefunc = idlefunc
self.idlecounter = 0
self._writeraw('<?xml version="1.0" encoding="UTF-8"?>')
self.newline()
def __enter__(self):
return self
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.close()
def __exit__(self, exception_type, exception_value, traceback):
self.close()
def close(self):
if self._closeStream:
self.file.close()
def close(self):
if self._closeStream:
self.file.close()
def write(self, string, indent=True):
"""Writes text."""
self._writeraw(escape(string), indent=indent)
def write(self, string, indent=True):
"""Writes text."""
self._writeraw(escape(string), indent=indent)
def writecdata(self, string):
"""Writes text in a CDATA section."""
self._writeraw("<![CDATA[" + string + "]]>")
def writecdata(self, string):
"""Writes text in a CDATA section."""
self._writeraw("<![CDATA[" + string + "]]>")
def write8bit(self, data, strip=False):
"""Writes a bytes() sequence into the XML, escaping
non-ASCII bytes. When this is read in xmlReader,
the original bytes can be recovered by encoding to
'latin-1'."""
self._writeraw(escape8bit(data), strip=strip)
def write8bit(self, data, strip=False):
"""Writes a bytes() sequence into the XML, escaping
non-ASCII bytes. When this is read in xmlReader,
the original bytes can be recovered by encoding to
'latin-1'."""
self._writeraw(escape8bit(data), strip=strip)
def write_noindent(self, string):
"""Writes text without indentation."""
self._writeraw(escape(string), indent=False)
def write_noindent(self, string):
"""Writes text without indentation."""
self._writeraw(escape(string), indent=False)
def _writeraw(self, data, indent=True, strip=False):
"""Writes bytes, possibly indented."""
if indent and self.needindent:
self.file.write(self.indentlevel * self.indentwhite)
self.needindent = 0
s = self.totype(data, encoding="utf_8")
if strip:
s = s.strip()
self.file.write(s)
def _writeraw(self, data, indent=True, strip=False):
"""Writes bytes, possibly indented."""
if indent and self.needindent:
self.file.write(self.indentlevel * self.indentwhite)
self.needindent = 0
s = self.totype(data, encoding="utf_8")
if (strip):
s = s.strip()
self.file.write(s)
def newline(self):
self.file.write(self.newlinestr)
self.needindent = 1
idlecounter = self.idlecounter
if not idlecounter % 100 and self.idlefunc is not None:
self.idlefunc()
self.idlecounter = idlecounter + 1
def newline(self):
self.file.write(self.newlinestr)
self.needindent = 1
idlecounter = self.idlecounter
if not idlecounter % 100 and self.idlefunc is not None:
self.idlefunc()
self.idlecounter = idlecounter + 1
def comment(self, data):
data = escape(data)
lines = data.split("\n")
self._writeraw("<!-- " + lines[0])
for line in lines[1:]:
self.newline()
self._writeraw(" " + line)
self._writeraw(" -->")
def comment(self, data):
data = escape(data)
lines = data.split("\n")
self._writeraw("<!-- " + lines[0])
for line in lines[1:]:
self.newline()
self._writeraw(" " + line)
self._writeraw(" -->")
def simpletag(self, _TAG_, *args, **kwargs):
attrdata = self.stringifyattrs(*args, **kwargs)
data = "<%s%s/>" % (_TAG_, attrdata)
self._writeraw(data)
def simpletag(self, _TAG_, *args, **kwargs):
attrdata = self.stringifyattrs(*args, **kwargs)
data = "<%s%s/>" % (_TAG_, attrdata)
self._writeraw(data)
def begintag(self, _TAG_, *args, **kwargs):
attrdata = self.stringifyattrs(*args, **kwargs)
data = "<%s%s>" % (_TAG_, attrdata)
self._writeraw(data)
self.stack.append(_TAG_)
self.indent()
def begintag(self, _TAG_, *args, **kwargs):
attrdata = self.stringifyattrs(*args, **kwargs)
data = "<%s%s>" % (_TAG_, attrdata)
self._writeraw(data)
self.stack.append(_TAG_)
self.indent()
def endtag(self, _TAG_):
assert self.stack and self.stack[-1] == _TAG_, "nonmatching endtag"
del self.stack[-1]
self.dedent()
data = "</%s>" % _TAG_
self._writeraw(data)
def endtag(self, _TAG_):
assert self.stack and self.stack[-1] == _TAG_, "nonmatching endtag"
del self.stack[-1]
self.dedent()
data = "</%s>" % _TAG_
self._writeraw(data)
def dumphex(self, data):
linelength = 16
hexlinelength = linelength * 2
chunksize = 8
for i in range(0, len(data), linelength):
hexline = hexStr(data[i : i + linelength])
line = ""
white = ""
for j in range(0, hexlinelength, chunksize):
line = line + white + hexline[j : j + chunksize]
white = " "
self._writeraw(line)
self.newline()
def dumphex(self, data):
linelength = 16
hexlinelength = linelength * 2
chunksize = 8
for i in range(0, len(data), linelength):
hexline = hexStr(data[i:i+linelength])
line = ""
white = ""
for j in range(0, hexlinelength, chunksize):
line = line + white + hexline[j:j+chunksize]
white = " "
self._writeraw(line)
self.newline()
def indent(self):
self.indentlevel = self.indentlevel + 1
def indent(self):
self.indentlevel = self.indentlevel + 1
def dedent(self):
assert self.indentlevel > 0
self.indentlevel = self.indentlevel - 1
def dedent(self):
assert self.indentlevel > 0
self.indentlevel = self.indentlevel - 1
def stringifyattrs(self, *args, **kwargs):
if kwargs:
assert not args
attributes = sorted(kwargs.items())
elif args:
assert len(args) == 1
attributes = args[0]
else:
return ""
data = ""
for attr, value in attributes:
if not isinstance(value, (bytes, str)):
value = str(value)
data = data + ' %s="%s"' % (attr, escapeattr(value))
return data
def stringifyattrs(self, *args, **kwargs):
if kwargs:
assert not args
attributes = sorted(kwargs.items())
elif args:
assert len(args) == 1
attributes = args[0]
else:
return ""
data = ""
for attr, value in attributes:
if not isinstance(value, (bytes, str)):
value = str(value)
data = data + ' %s="%s"' % (attr, escapeattr(value))
return data
def escape(data):
data = tostr(data, 'utf_8')
data = data.replace("&", "&amp;")
data = data.replace("<", "&lt;")
data = data.replace(">", "&gt;")
data = data.replace("\r", "&#13;")
return data
data = tostr(data, "utf_8")
data = data.replace("&", "&amp;")
data = data.replace("<", "&lt;")
data = data.replace(">", "&gt;")
data = data.replace("\r", "&#13;")
return data
def escapeattr(data):
data = escape(data)
data = data.replace('"', "&quot;")
return data
data = escape(data)
data = data.replace('"', "&quot;")
return data
def escape8bit(data):
"""Input is Unicode string."""
def escapechar(c):
n = ord(c)
if 32 <= n <= 127 and c not in "<&>":
return c
else:
return "&#" + repr(n) + ";"
return strjoin(map(escapechar, data.decode('latin-1')))
"""Input is Unicode string."""
def escapechar(c):
n = ord(c)
if 32 <= n <= 127 and c not in "<&>":
return c
else:
return "&#" + repr(n) + ";"
return strjoin(map(escapechar, data.decode("latin-1")))
def hexStr(s):
h = string.hexdigits
r = ''
for c in s:
i = byteord(c)
r = r + h[(i >> 4) & 0xF] + h[i & 0xF]
return r
h = string.hexdigits
r = ""
for c in s:
i = byteord(c)
r = r + h[(i >> 4) & 0xF] + h[i & 0xF]
return r

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
import sys
from fontTools.mtiLib import main
if __name__ == '__main__':
sys.exit(main())
if __name__ == "__main__":
sys.exit(main())

View File

@ -2,5 +2,5 @@ import sys
from fontTools.otlLib.optimize import main
if __name__ == '__main__':
sys.exit(main())
if __name__ == "__main__":
sys.exit(main())

View File

@ -7,51 +7,46 @@ __all__ = ["AreaPen"]
class AreaPen(BasePen):
def __init__(self, glyphset=None):
BasePen.__init__(self, glyphset)
self.value = 0
def __init__(self, glyphset=None):
BasePen.__init__(self, glyphset)
self.value = 0
def _moveTo(self, p0):
self._p0 = self._startPoint = p0
def _moveTo(self, p0):
self._p0 = self._startPoint = p0
def _lineTo(self, p1):
x0, y0 = self._p0
x1, y1 = p1
self.value -= (x1 - x0) * (y1 + y0) * 0.5
self._p0 = p1
def _lineTo(self, p1):
x0, y0 = self._p0
x1, y1 = p1
self.value -= (x1 - x0) * (y1 + y0) * .5
self._p0 = p1
def _qCurveToOne(self, p1, p2):
# https://github.com/Pomax/bezierinfo/issues/44
p0 = self._p0
x0, y0 = p0[0], p0[1]
x1, y1 = p1[0] - x0, p1[1] - y0
x2, y2 = p2[0] - x0, p2[1] - y0
self.value -= (x2 * y1 - x1 * y2) / 3
self._lineTo(p2)
self._p0 = p2
def _qCurveToOne(self, p1, p2):
# https://github.com/Pomax/bezierinfo/issues/44
p0 = self._p0
x0, y0 = p0[0], p0[1]
x1, y1 = p1[0] - x0, p1[1] - y0
x2, y2 = p2[0] - x0, p2[1] - y0
self.value -= (x2 * y1 - x1 * y2) / 3
self._lineTo(p2)
self._p0 = p2
def _curveToOne(self, p1, p2, p3):
# https://github.com/Pomax/bezierinfo/issues/44
p0 = self._p0
x0, y0 = p0[0], p0[1]
x1, y1 = p1[0] - x0, p1[1] - y0
x2, y2 = p2[0] - x0, p2[1] - y0
x3, y3 = p3[0] - x0, p3[1] - y0
self.value -= (x1 * (-y2 - y3) + x2 * (y1 - 2 * y3) + x3 * (y1 + 2 * y2)) * 0.15
self._lineTo(p3)
self._p0 = p3
def _curveToOne(self, p1, p2, p3):
# https://github.com/Pomax/bezierinfo/issues/44
p0 = self._p0
x0, y0 = p0[0], p0[1]
x1, y1 = p1[0] - x0, p1[1] - y0
x2, y2 = p2[0] - x0, p2[1] - y0
x3, y3 = p3[0] - x0, p3[1] - y0
self.value -= (
x1 * ( - y2 - y3) +
x2 * (y1 - 2*y3) +
x3 * (y1 + 2*y2 )
) * 0.15
self._lineTo(p3)
self._p0 = p3
def _closePath(self):
self._lineTo(self._startPoint)
del self._p0, self._startPoint
def _closePath(self):
self._lineTo(self._startPoint)
del self._p0, self._startPoint
def _endPath(self):
if self._p0 != self._startPoint:
# Area is not defined for open contours.
raise NotImplementedError
del self._p0, self._startPoint
def _endPath(self):
if self._p0 != self._startPoint:
# Area is not defined for open contours.
raise NotImplementedError
del self._p0, self._startPoint

View File

@ -40,372 +40,383 @@ from typing import Tuple
from fontTools.misc.loggingTools import LogMixin
__all__ = ["AbstractPen", "NullPen", "BasePen", "PenError",
"decomposeSuperBezierSegment", "decomposeQuadraticSegment"]
__all__ = [
"AbstractPen",
"NullPen",
"BasePen",
"PenError",
"decomposeSuperBezierSegment",
"decomposeQuadraticSegment",
]
class PenError(Exception):
"""Represents an error during penning."""
"""Represents an error during penning."""
class OpenContourError(PenError):
pass
pass
class AbstractPen:
def moveTo(self, pt: Tuple[float, float]) -> None:
"""Begin a new sub path, set the current point to 'pt'. You must
end each sub path with a call to pen.closePath() or pen.endPath().
"""
raise NotImplementedError
def moveTo(self, pt: Tuple[float, float]) -> None:
"""Begin a new sub path, set the current point to 'pt'. You must
end each sub path with a call to pen.closePath() or pen.endPath().
"""
raise NotImplementedError
def lineTo(self, pt: Tuple[float, float]) -> None:
"""Draw a straight line from the current point to 'pt'."""
raise NotImplementedError
def lineTo(self, pt: Tuple[float, float]) -> None:
"""Draw a straight line from the current point to 'pt'."""
raise NotImplementedError
def curveTo(self, *points: Tuple[float, float]) -> None:
"""Draw a cubic bezier with an arbitrary number of control points.
def curveTo(self, *points: Tuple[float, float]) -> None:
"""Draw a cubic bezier with an arbitrary number of control points.
The last point specified is on-curve, all others are off-curve
(control) points. If the number of control points is > 2, the
segment is split into multiple bezier segments. This works
like this:
The last point specified is on-curve, all others are off-curve
(control) points. If the number of control points is > 2, the
segment is split into multiple bezier segments. This works
like this:
Let n be the number of control points (which is the number of
arguments to this call minus 1). If n==2, a plain vanilla cubic
bezier is drawn. If n==1, we fall back to a quadratic segment and
if n==0 we draw a straight line. It gets interesting when n>2:
n-1 PostScript-style cubic segments will be drawn as if it were
one curve. See decomposeSuperBezierSegment().
Let n be the number of control points (which is the number of
arguments to this call minus 1). If n==2, a plain vanilla cubic
bezier is drawn. If n==1, we fall back to a quadratic segment and
if n==0 we draw a straight line. It gets interesting when n>2:
n-1 PostScript-style cubic segments will be drawn as if it were
one curve. See decomposeSuperBezierSegment().
The conversion algorithm used for n>2 is inspired by NURB
splines, and is conceptually equivalent to the TrueType "implied
points" principle. See also decomposeQuadraticSegment().
"""
raise NotImplementedError
The conversion algorithm used for n>2 is inspired by NURB
splines, and is conceptually equivalent to the TrueType "implied
points" principle. See also decomposeQuadraticSegment().
"""
raise NotImplementedError
def qCurveTo(self, *points: Tuple[float, float]) -> None:
"""Draw a whole string of quadratic curve segments.
def qCurveTo(self, *points: Tuple[float, float]) -> None:
"""Draw a whole string of quadratic curve segments.
The last point specified is on-curve, all others are off-curve
points.
The last point specified is on-curve, all others are off-curve
points.
This method implements TrueType-style curves, breaking up curves
using 'implied points': between each two consequtive off-curve points,
there is one implied point exactly in the middle between them. See
also decomposeQuadraticSegment().
This method implements TrueType-style curves, breaking up curves
using 'implied points': between each two consequtive off-curve points,
there is one implied point exactly in the middle between them. See
also decomposeQuadraticSegment().
The last argument (normally the on-curve point) may be None.
This is to support contours that have NO on-curve points (a rarely
seen feature of TrueType outlines).
"""
raise NotImplementedError
The last argument (normally the on-curve point) may be None.
This is to support contours that have NO on-curve points (a rarely
seen feature of TrueType outlines).
"""
raise NotImplementedError
def closePath(self) -> None:
"""Close the current sub path. You must call either pen.closePath()
or pen.endPath() after each sub path.
"""
pass
def closePath(self) -> None:
"""Close the current sub path. You must call either pen.closePath()
or pen.endPath() after each sub path.
"""
pass
def endPath(self) -> None:
"""End the current sub path, but don't close it. You must call
either pen.closePath() or pen.endPath() after each sub path.
"""
pass
def endPath(self) -> None:
"""End the current sub path, but don't close it. You must call
either pen.closePath() or pen.endPath() after each sub path.
"""
pass
def addComponent(
self,
glyphName: str,
transformation: Tuple[float, float, float, float, float, float]
) -> None:
"""Add a sub glyph. The 'transformation' argument must be a 6-tuple
containing an affine transformation, or a Transform object from the
fontTools.misc.transform module. More precisely: it should be a
sequence containing 6 numbers.
"""
raise NotImplementedError
def addComponent(
self,
glyphName: str,
transformation: Tuple[float, float, float, float, float, float],
) -> None:
"""Add a sub glyph. The 'transformation' argument must be a 6-tuple
containing an affine transformation, or a Transform object from the
fontTools.misc.transform module. More precisely: it should be a
sequence containing 6 numbers.
"""
raise NotImplementedError
class NullPen(AbstractPen):
"""A pen that does nothing.
"""
"""A pen that does nothing."""
def moveTo(self, pt):
pass
def moveTo(self, pt):
pass
def lineTo(self, pt):
pass
def lineTo(self, pt):
pass
def curveTo(self, *points):
pass
def curveTo(self, *points):
pass
def qCurveTo(self, *points):
pass
def qCurveTo(self, *points):
pass
def closePath(self):
pass
def closePath(self):
pass
def endPath(self):
pass
def endPath(self):
pass
def addComponent(self, glyphName, transformation):
pass
def addComponent(self, glyphName, transformation):
pass
class LoggingPen(LogMixin, AbstractPen):
"""A pen with a ``log`` property (see fontTools.misc.loggingTools.LogMixin)
"""
pass
"""A pen with a ``log`` property (see fontTools.misc.loggingTools.LogMixin)"""
pass
class MissingComponentError(KeyError):
"""Indicates a component pointing to a non-existent glyph in the glyphset."""
"""Indicates a component pointing to a non-existent glyph in the glyphset."""
class DecomposingPen(LoggingPen):
""" Implements a 'addComponent' method that decomposes components
(i.e. draws them onto self as simple contours).
It can also be used as a mixin class (e.g. see ContourRecordingPen).
"""Implements a 'addComponent' method that decomposes components
(i.e. draws them onto self as simple contours).
It can also be used as a mixin class (e.g. see ContourRecordingPen).
You must override moveTo, lineTo, curveTo and qCurveTo. You may
additionally override closePath, endPath and addComponent.
You must override moveTo, lineTo, curveTo and qCurveTo. You may
additionally override closePath, endPath and addComponent.
By default a warning message is logged when a base glyph is missing;
set the class variable ``skipMissingComponents`` to False if you want
to raise a :class:`MissingComponentError` exception.
"""
By default a warning message is logged when a base glyph is missing;
set the class variable ``skipMissingComponents`` to False if you want
to raise a :class:`MissingComponentError` exception.
"""
skipMissingComponents = True
skipMissingComponents = True
def __init__(self, glyphSet):
""" Takes a single 'glyphSet' argument (dict), in which the glyphs
that are referenced as components are looked up by their name.
"""
super(DecomposingPen, self).__init__()
self.glyphSet = glyphSet
def __init__(self, glyphSet):
"""Takes a single 'glyphSet' argument (dict), in which the glyphs
that are referenced as components are looked up by their name.
"""
super(DecomposingPen, self).__init__()
self.glyphSet = glyphSet
def addComponent(self, glyphName, transformation):
""" Transform the points of the base glyph and draw it onto self.
"""
from fontTools.pens.transformPen import TransformPen
try:
glyph = self.glyphSet[glyphName]
except KeyError:
if not self.skipMissingComponents:
raise MissingComponentError(glyphName)
self.log.warning(
"glyph '%s' is missing from glyphSet; skipped" % glyphName)
else:
tPen = TransformPen(self, transformation)
glyph.draw(tPen)
def addComponent(self, glyphName, transformation):
"""Transform the points of the base glyph and draw it onto self."""
from fontTools.pens.transformPen import TransformPen
try:
glyph = self.glyphSet[glyphName]
except KeyError:
if not self.skipMissingComponents:
raise MissingComponentError(glyphName)
self.log.warning("glyph '%s' is missing from glyphSet; skipped" % glyphName)
else:
tPen = TransformPen(self, transformation)
glyph.draw(tPen)
class BasePen(DecomposingPen):
"""Base class for drawing pens. You must override _moveTo, _lineTo and
_curveToOne. You may additionally override _closePath, _endPath,
addComponent and/or _qCurveToOne. You should not override any other
methods.
"""
"""Base class for drawing pens. You must override _moveTo, _lineTo and
_curveToOne. You may additionally override _closePath, _endPath,
addComponent and/or _qCurveToOne. You should not override any other
methods.
"""
def __init__(self, glyphSet=None):
super(BasePen, self).__init__(glyphSet)
self.__currentPoint = None
def __init__(self, glyphSet=None):
super(BasePen, self).__init__(glyphSet)
self.__currentPoint = None
# must override
# must override
def _moveTo(self, pt):
raise NotImplementedError
def _moveTo(self, pt):
raise NotImplementedError
def _lineTo(self, pt):
raise NotImplementedError
def _lineTo(self, pt):
raise NotImplementedError
def _curveToOne(self, pt1, pt2, pt3):
raise NotImplementedError
def _curveToOne(self, pt1, pt2, pt3):
raise NotImplementedError
# may override
# may override
def _closePath(self):
pass
def _closePath(self):
pass
def _endPath(self):
pass
def _endPath(self):
pass
def _qCurveToOne(self, pt1, pt2):
"""This method implements the basic quadratic curve type. The
default implementation delegates the work to the cubic curve
function. Optionally override with a native implementation.
"""
pt0x, pt0y = self.__currentPoint
pt1x, pt1y = pt1
pt2x, pt2y = pt2
mid1x = pt0x + 0.66666666666666667 * (pt1x - pt0x)
mid1y = pt0y + 0.66666666666666667 * (pt1y - pt0y)
mid2x = pt2x + 0.66666666666666667 * (pt1x - pt2x)
mid2y = pt2y + 0.66666666666666667 * (pt1y - pt2y)
self._curveToOne((mid1x, mid1y), (mid2x, mid2y), pt2)
def _qCurveToOne(self, pt1, pt2):
"""This method implements the basic quadratic curve type. The
default implementation delegates the work to the cubic curve
function. Optionally override with a native implementation.
"""
pt0x, pt0y = self.__currentPoint
pt1x, pt1y = pt1
pt2x, pt2y = pt2
mid1x = pt0x + 0.66666666666666667 * (pt1x - pt0x)
mid1y = pt0y + 0.66666666666666667 * (pt1y - pt0y)
mid2x = pt2x + 0.66666666666666667 * (pt1x - pt2x)
mid2y = pt2y + 0.66666666666666667 * (pt1y - pt2y)
self._curveToOne((mid1x, mid1y), (mid2x, mid2y), pt2)
# don't override
# don't override
def _getCurrentPoint(self):
"""Return the current point. This is not part of the public
interface, yet is useful for subclasses.
"""
return self.__currentPoint
def _getCurrentPoint(self):
"""Return the current point. This is not part of the public
interface, yet is useful for subclasses.
"""
return self.__currentPoint
def closePath(self):
self._closePath()
self.__currentPoint = None
def closePath(self):
self._closePath()
self.__currentPoint = None
def endPath(self):
self._endPath()
self.__currentPoint = None
def endPath(self):
self._endPath()
self.__currentPoint = None
def moveTo(self, pt):
self._moveTo(pt)
self.__currentPoint = pt
def moveTo(self, pt):
self._moveTo(pt)
self.__currentPoint = pt
def lineTo(self, pt):
self._lineTo(pt)
self.__currentPoint = pt
def lineTo(self, pt):
self._lineTo(pt)
self.__currentPoint = pt
def curveTo(self, *points):
n = len(points) - 1 # 'n' is the number of control points
assert n >= 0
if n == 2:
# The common case, we have exactly two BCP's, so this is a standard
# cubic bezier. Even though decomposeSuperBezierSegment() handles
# this case just fine, we special-case it anyway since it's so
# common.
self._curveToOne(*points)
self.__currentPoint = points[-1]
elif n > 2:
# n is the number of control points; split curve into n-1 cubic
# bezier segments. The algorithm used here is inspired by NURB
# splines and the TrueType "implied point" principle, and ensures
# the smoothest possible connection between two curve segments,
# with no disruption in the curvature. It is practical since it
# allows one to construct multiple bezier segments with a much
# smaller amount of points.
_curveToOne = self._curveToOne
for pt1, pt2, pt3 in decomposeSuperBezierSegment(points):
_curveToOne(pt1, pt2, pt3)
self.__currentPoint = pt3
elif n == 1:
self.qCurveTo(*points)
elif n == 0:
self.lineTo(points[0])
else:
raise AssertionError("can't get there from here")
def curveTo(self, *points):
n = len(points) - 1 # 'n' is the number of control points
assert n >= 0
if n == 2:
# The common case, we have exactly two BCP's, so this is a standard
# cubic bezier. Even though decomposeSuperBezierSegment() handles
# this case just fine, we special-case it anyway since it's so
# common.
self._curveToOne(*points)
self.__currentPoint = points[-1]
elif n > 2:
# n is the number of control points; split curve into n-1 cubic
# bezier segments. The algorithm used here is inspired by NURB
# splines and the TrueType "implied point" principle, and ensures
# the smoothest possible connection between two curve segments,
# with no disruption in the curvature. It is practical since it
# allows one to construct multiple bezier segments with a much
# smaller amount of points.
_curveToOne = self._curveToOne
for pt1, pt2, pt3 in decomposeSuperBezierSegment(points):
_curveToOne(pt1, pt2, pt3)
self.__currentPoint = pt3
elif n == 1:
self.qCurveTo(*points)
elif n == 0:
self.lineTo(points[0])
else:
raise AssertionError("can't get there from here")
def qCurveTo(self, *points):
n = len(points) - 1 # 'n' is the number of control points
assert n >= 0
if points[-1] is None:
# Special case for TrueType quadratics: it is possible to
# define a contour with NO on-curve points. BasePen supports
# this by allowing the final argument (the expected on-curve
# point) to be None. We simulate the feature by making the implied
# on-curve point between the last and the first off-curve points
# explicit.
x, y = points[-2] # last off-curve point
nx, ny = points[0] # first off-curve point
impliedStartPoint = (0.5 * (x + nx), 0.5 * (y + ny))
self.__currentPoint = impliedStartPoint
self._moveTo(impliedStartPoint)
points = points[:-1] + (impliedStartPoint,)
if n > 0:
# Split the string of points into discrete quadratic curve
# segments. Between any two consecutive off-curve points
# there's an implied on-curve point exactly in the middle.
# This is where the segment splits.
_qCurveToOne = self._qCurveToOne
for pt1, pt2 in decomposeQuadraticSegment(points):
_qCurveToOne(pt1, pt2)
self.__currentPoint = pt2
else:
self.lineTo(points[0])
def qCurveTo(self, *points):
n = len(points) - 1 # 'n' is the number of control points
assert n >= 0
if points[-1] is None:
# Special case for TrueType quadratics: it is possible to
# define a contour with NO on-curve points. BasePen supports
# this by allowing the final argument (the expected on-curve
# point) to be None. We simulate the feature by making the implied
# on-curve point between the last and the first off-curve points
# explicit.
x, y = points[-2] # last off-curve point
nx, ny = points[0] # first off-curve point
impliedStartPoint = (0.5 * (x + nx), 0.5 * (y + ny))
self.__currentPoint = impliedStartPoint
self._moveTo(impliedStartPoint)
points = points[:-1] + (impliedStartPoint,)
if n > 0:
# Split the string of points into discrete quadratic curve
# segments. Between any two consecutive off-curve points
# there's an implied on-curve point exactly in the middle.
# This is where the segment splits.
_qCurveToOne = self._qCurveToOne
for pt1, pt2 in decomposeQuadraticSegment(points):
_qCurveToOne(pt1, pt2)
self.__currentPoint = pt2
else:
self.lineTo(points[0])
def decomposeSuperBezierSegment(points):
"""Split the SuperBezier described by 'points' into a list of regular
bezier segments. The 'points' argument must be a sequence with length
3 or greater, containing (x, y) coordinates. The last point is the
destination on-curve point, the rest of the points are off-curve points.
The start point should not be supplied.
"""Split the SuperBezier described by 'points' into a list of regular
bezier segments. The 'points' argument must be a sequence with length
3 or greater, containing (x, y) coordinates. The last point is the
destination on-curve point, the rest of the points are off-curve points.
The start point should not be supplied.
This function returns a list of (pt1, pt2, pt3) tuples, which each
specify a regular curveto-style bezier segment.
"""
n = len(points) - 1
assert n > 1
bezierSegments = []
pt1, pt2, pt3 = points[0], None, None
for i in range(2, n+1):
# calculate points in between control points.
nDivisions = min(i, 3, n-i+2)
for j in range(1, nDivisions):
factor = j / nDivisions
temp1 = points[i-1]
temp2 = points[i-2]
temp = (temp2[0] + factor * (temp1[0] - temp2[0]),
temp2[1] + factor * (temp1[1] - temp2[1]))
if pt2 is None:
pt2 = temp
else:
pt3 = (0.5 * (pt2[0] + temp[0]),
0.5 * (pt2[1] + temp[1]))
bezierSegments.append((pt1, pt2, pt3))
pt1, pt2, pt3 = temp, None, None
bezierSegments.append((pt1, points[-2], points[-1]))
return bezierSegments
This function returns a list of (pt1, pt2, pt3) tuples, which each
specify a regular curveto-style bezier segment.
"""
n = len(points) - 1
assert n > 1
bezierSegments = []
pt1, pt2, pt3 = points[0], None, None
for i in range(2, n + 1):
# calculate points in between control points.
nDivisions = min(i, 3, n - i + 2)
for j in range(1, nDivisions):
factor = j / nDivisions
temp1 = points[i - 1]
temp2 = points[i - 2]
temp = (
temp2[0] + factor * (temp1[0] - temp2[0]),
temp2[1] + factor * (temp1[1] - temp2[1]),
)
if pt2 is None:
pt2 = temp
else:
pt3 = (0.5 * (pt2[0] + temp[0]), 0.5 * (pt2[1] + temp[1]))
bezierSegments.append((pt1, pt2, pt3))
pt1, pt2, pt3 = temp, None, None
bezierSegments.append((pt1, points[-2], points[-1]))
return bezierSegments
def decomposeQuadraticSegment(points):
"""Split the quadratic curve segment described by 'points' into a list
of "atomic" quadratic segments. The 'points' argument must be a sequence
with length 2 or greater, containing (x, y) coordinates. The last point
is the destination on-curve point, the rest of the points are off-curve
points. The start point should not be supplied.
"""Split the quadratic curve segment described by 'points' into a list
of "atomic" quadratic segments. The 'points' argument must be a sequence
with length 2 or greater, containing (x, y) coordinates. The last point
is the destination on-curve point, the rest of the points are off-curve
points. The start point should not be supplied.
This function returns a list of (pt1, pt2) tuples, which each specify a
plain quadratic bezier segment.
"""
n = len(points) - 1
assert n > 0
quadSegments = []
for i in range(n - 1):
x, y = points[i]
nx, ny = points[i+1]
impliedPt = (0.5 * (x + nx), 0.5 * (y + ny))
quadSegments.append((points[i], impliedPt))
quadSegments.append((points[-2], points[-1]))
return quadSegments
This function returns a list of (pt1, pt2) tuples, which each specify a
plain quadratic bezier segment.
"""
n = len(points) - 1
assert n > 0
quadSegments = []
for i in range(n - 1):
x, y = points[i]
nx, ny = points[i + 1]
impliedPt = (0.5 * (x + nx), 0.5 * (y + ny))
quadSegments.append((points[i], impliedPt))
quadSegments.append((points[-2], points[-1]))
return quadSegments
class _TestPen(BasePen):
"""Test class that prints PostScript to stdout."""
def _moveTo(self, pt):
print("%s %s moveto" % (pt[0], pt[1]))
def _lineTo(self, pt):
print("%s %s lineto" % (pt[0], pt[1]))
def _curveToOne(self, bcp1, bcp2, pt):
print("%s %s %s %s %s %s curveto" % (bcp1[0], bcp1[1],
bcp2[0], bcp2[1], pt[0], pt[1]))
def _closePath(self):
print("closepath")
"""Test class that prints PostScript to stdout."""
def _moveTo(self, pt):
print("%s %s moveto" % (pt[0], pt[1]))
def _lineTo(self, pt):
print("%s %s lineto" % (pt[0], pt[1]))
def _curveToOne(self, bcp1, bcp2, pt):
print(
"%s %s %s %s %s %s curveto"
% (bcp1[0], bcp1[1], bcp2[0], bcp2[1], pt[0], pt[1])
)
def _closePath(self):
print("closepath")
if __name__ == "__main__":
pen = _TestPen(None)
pen.moveTo((0, 0))
pen.lineTo((0, 100))
pen.curveTo((50, 75), (60, 50), (50, 25), (0, 0))
pen.closePath()
pen = _TestPen(None)
pen.moveTo((0, 0))
pen.lineTo((0, 100))
pen.curveTo((50, 75), (60, 50), (50, 25), (0, 0))
pen.closePath()
pen = _TestPen(None)
# testing the "no on-curve point" scenario
pen.qCurveTo((0, 0), (0, 100), (100, 100), (100, 0), None)
pen.closePath()
pen = _TestPen(None)
# testing the "no on-curve point" scenario
pen.qCurveTo((0, 0), (0, 100), (100, 100), (100, 0), None)
pen.closePath()

View File

@ -8,91 +8,93 @@ __all__ = ["BoundsPen", "ControlBoundsPen"]
class ControlBoundsPen(BasePen):
"""Pen to calculate the "control bounds" of a shape. This is the
bounding box of all control points, so may be larger than the
actual bounding box if there are curves that don't have points
on their extremes.
"""Pen to calculate the "control bounds" of a shape. This is the
bounding box of all control points, so may be larger than the
actual bounding box if there are curves that don't have points
on their extremes.
When the shape has been drawn, the bounds are available as the
``bounds`` attribute of the pen object. It's a 4-tuple::
When the shape has been drawn, the bounds are available as the
``bounds`` attribute of the pen object. It's a 4-tuple::
(xMin, yMin, xMax, yMax).
(xMin, yMin, xMax, yMax).
If ``ignoreSinglePoints`` is True, single points are ignored.
"""
If ``ignoreSinglePoints`` is True, single points are ignored.
"""
def __init__(self, glyphSet, ignoreSinglePoints=False):
BasePen.__init__(self, glyphSet)
self.ignoreSinglePoints = ignoreSinglePoints
self.init()
def __init__(self, glyphSet, ignoreSinglePoints=False):
BasePen.__init__(self, glyphSet)
self.ignoreSinglePoints = ignoreSinglePoints
self.init()
def init(self):
self.bounds = None
self._start = None
def init(self):
self.bounds = None
self._start = None
def _moveTo(self, pt):
self._start = pt
if not self.ignoreSinglePoints:
self._addMoveTo()
def _moveTo(self, pt):
self._start = pt
if not self.ignoreSinglePoints:
self._addMoveTo()
def _addMoveTo(self):
if self._start is None:
return
bounds = self.bounds
if bounds:
self.bounds = updateBounds(bounds, self._start)
else:
x, y = self._start
self.bounds = (x, y, x, y)
self._start = None
def _addMoveTo(self):
if self._start is None:
return
bounds = self.bounds
if bounds:
self.bounds = updateBounds(bounds, self._start)
else:
x, y = self._start
self.bounds = (x, y, x, y)
self._start = None
def _lineTo(self, pt):
self._addMoveTo()
self.bounds = updateBounds(self.bounds, pt)
def _lineTo(self, pt):
self._addMoveTo()
self.bounds = updateBounds(self.bounds, pt)
def _curveToOne(self, bcp1, bcp2, pt):
self._addMoveTo()
bounds = self.bounds
bounds = updateBounds(bounds, bcp1)
bounds = updateBounds(bounds, bcp2)
bounds = updateBounds(bounds, pt)
self.bounds = bounds
def _curveToOne(self, bcp1, bcp2, pt):
self._addMoveTo()
bounds = self.bounds
bounds = updateBounds(bounds, bcp1)
bounds = updateBounds(bounds, bcp2)
bounds = updateBounds(bounds, pt)
self.bounds = bounds
def _qCurveToOne(self, bcp, pt):
self._addMoveTo()
bounds = self.bounds
bounds = updateBounds(bounds, bcp)
bounds = updateBounds(bounds, pt)
self.bounds = bounds
def _qCurveToOne(self, bcp, pt):
self._addMoveTo()
bounds = self.bounds
bounds = updateBounds(bounds, bcp)
bounds = updateBounds(bounds, pt)
self.bounds = bounds
class BoundsPen(ControlBoundsPen):
"""Pen to calculate the bounds of a shape. It calculates the
correct bounds even when the shape contains curves that don't
have points on their extremes. This is somewhat slower to compute
than the "control bounds".
"""Pen to calculate the bounds of a shape. It calculates the
correct bounds even when the shape contains curves that don't
have points on their extremes. This is somewhat slower to compute
than the "control bounds".
When the shape has been drawn, the bounds are available as the
``bounds`` attribute of the pen object. It's a 4-tuple::
When the shape has been drawn, the bounds are available as the
``bounds`` attribute of the pen object. It's a 4-tuple::
(xMin, yMin, xMax, yMax)
"""
(xMin, yMin, xMax, yMax)
"""
def _curveToOne(self, bcp1, bcp2, pt):
self._addMoveTo()
bounds = self.bounds
bounds = updateBounds(bounds, pt)
if not pointInRect(bcp1, bounds) or not pointInRect(bcp2, bounds):
bounds = unionRect(bounds, calcCubicBounds(
self._getCurrentPoint(), bcp1, bcp2, pt))
self.bounds = bounds
def _curveToOne(self, bcp1, bcp2, pt):
self._addMoveTo()
bounds = self.bounds
bounds = updateBounds(bounds, pt)
if not pointInRect(bcp1, bounds) or not pointInRect(bcp2, bounds):
bounds = unionRect(
bounds, calcCubicBounds(self._getCurrentPoint(), bcp1, bcp2, pt)
)
self.bounds = bounds
def _qCurveToOne(self, bcp, pt):
self._addMoveTo()
bounds = self.bounds
bounds = updateBounds(bounds, pt)
if not pointInRect(bcp, bounds):
bounds = unionRect(bounds, calcQuadraticBounds(
self._getCurrentPoint(), bcp, pt))
self.bounds = bounds
def _qCurveToOne(self, bcp, pt):
self._addMoveTo()
bounds = self.bounds
bounds = updateBounds(bounds, pt)
if not pointInRect(bcp, bounds):
bounds = unionRect(
bounds, calcQuadraticBounds(self._getCurrentPoint(), bcp, pt)
)
self.bounds = bounds

View File

@ -5,22 +5,22 @@ __all__ = ["CocoaPen"]
class CocoaPen(BasePen):
def __init__(self, glyphSet, path=None):
BasePen.__init__(self, glyphSet)
if path is None:
from AppKit import NSBezierPath
def __init__(self, glyphSet, path=None):
BasePen.__init__(self, glyphSet)
if path is None:
from AppKit import NSBezierPath
path = NSBezierPath.bezierPath()
self.path = path
path = NSBezierPath.bezierPath()
self.path = path
def _moveTo(self, p):
self.path.moveToPoint_(p)
def _moveTo(self, p):
self.path.moveToPoint_(p)
def _lineTo(self, p):
self.path.lineToPoint_(p)
def _lineTo(self, p):
self.path.lineToPoint_(p)
def _curveToOne(self, p1, p2, p3):
self.path.curveToPoint_controlPoint1_controlPoint2_(p3, p1, p2)
def _curveToOne(self, p1, p2, p3):
self.path.curveToPoint_controlPoint1_controlPoint2_(p3, p1, p2)
def _closePath(self):
self.path.closePath()
def _closePath(self):
self.path.closePath()

View File

@ -20,7 +20,7 @@ from fontTools.pens.pointPen import ReverseContourPointPen
class Cu2QuPen(AbstractPen):
""" A filter pen to convert cubic bezier curves to quadratic b-splines
"""A filter pen to convert cubic bezier curves to quadratic b-splines
using the FontTools SegmentPen protocol.
Args:
@ -40,8 +40,14 @@ class Cu2QuPen(AbstractPen):
but are handled separately as anchors.
"""
def __init__(self, other_pen, max_err, reverse_direction=False,
stats=None, ignore_single_points=False):
def __init__(
self,
other_pen,
max_err,
reverse_direction=False,
stats=None,
ignore_single_points=False,
):
if reverse_direction:
self.pen = ReverseContourPen(other_pen)
else:
@ -50,9 +56,13 @@ class Cu2QuPen(AbstractPen):
self.stats = stats
if ignore_single_points:
import warnings
warnings.warn("ignore_single_points is deprecated and "
"will be removed in future versions",
UserWarning, stacklevel=2)
warnings.warn(
"ignore_single_points is deprecated and "
"will be removed in future versions",
UserWarning,
stacklevel=2,
)
self.ignore_single_points = ignore_single_points
self.start_pt = None
self.current_pt = None
@ -137,7 +147,7 @@ class Cu2QuPen(AbstractPen):
class Cu2QuPointPen(BasePointToSegmentPen):
""" A filter pen to convert cubic bezier curves to quadratic b-splines
"""A filter pen to convert cubic bezier curves to quadratic b-splines
using the RoboFab PointPen protocol.
Args:
@ -149,8 +159,7 @@ class Cu2QuPointPen(BasePointToSegmentPen):
stats: a dictionary counting the point numbers of quadratic segments.
"""
def __init__(self, other_point_pen, max_err, reverse_direction=False,
stats=None):
def __init__(self, other_point_pen, max_err, reverse_direction=False, stats=None):
BasePointToSegmentPen.__init__(self)
if reverse_direction:
self.pen = ReverseContourPointPen(other_point_pen)
@ -166,7 +175,7 @@ class Cu2QuPointPen(BasePointToSegmentPen):
prev_points = segments[-1][1]
prev_on_curve = prev_points[-1][0]
for segment_type, points in segments:
if segment_type == 'curve':
if segment_type == "curve":
for sub_points in self._split_super_bezier_segments(points):
on_curve, smooth, name, kwargs = sub_points[-1]
bcp1, bcp2 = sub_points[0][0], sub_points[1][0]
@ -200,8 +209,9 @@ class Cu2QuPointPen(BasePointToSegmentPen):
# a "super" bezier; decompose it
on_curve, smooth, name, kwargs = points[-1]
num_sub_segments = n - 1
for i, sub_points in enumerate(decomposeSuperBezierSegment([
pt for pt, _, _, _ in points])):
for i, sub_points in enumerate(
decomposeSuperBezierSegment([pt for pt, _, _, _ in points])
):
new_segment = []
for point in sub_points[:-1]:
new_segment.append((point, False, None, {}))
@ -213,8 +223,7 @@ class Cu2QuPointPen(BasePointToSegmentPen):
new_segment.append((sub_points[-1], True, None, {}))
sub_segments.append(new_segment)
else:
raise AssertionError(
"expected 2 control points, found: %d" % n)
raise AssertionError("expected 2 control points, found: %d" % n)
return sub_segments
def _drawPoints(self, segments):
@ -223,13 +232,15 @@ class Cu2QuPointPen(BasePointToSegmentPen):
last_offcurves = []
for i, (segment_type, points) in enumerate(segments):
if segment_type in ("move", "line"):
assert len(points) == 1, (
"illegal line segment point count: %d" % len(points))
assert len(points) == 1, "illegal line segment point count: %d" % len(
points
)
pt, smooth, name, kwargs = points[0]
pen.addPoint(pt, segment_type, smooth, name, **kwargs)
elif segment_type == "qcurve":
assert len(points) >= 2, (
"illegal qcurve segment point count: %d" % len(points))
assert len(points) >= 2, "illegal qcurve segment point count: %d" % len(
points
)
offcurves = points[:-1]
if offcurves:
if i == 0:
@ -249,8 +260,7 @@ class Cu2QuPointPen(BasePointToSegmentPen):
pen.addPoint(pt, segment_type, smooth, name, **kwargs)
else:
# 'curve' segments must have been converted to 'qcurve' by now
raise AssertionError(
"unexpected segment type: %r" % segment_type)
raise AssertionError("unexpected segment type: %r" % segment_type)
for (pt, smooth, name, kwargs) in last_offcurves:
pen.addPoint(pt, None, smooth, name, **kwargs)
pen.endPath()
@ -260,7 +270,6 @@ class Cu2QuPointPen(BasePointToSegmentPen):
self.pen.addComponent(baseGlyphName, transformation)
class Cu2QuMultiPen:
"""A filter multi-pen to convert cubic bezier curves to quadratic b-splines
in a interpolation-compatible manner, using the FontTools SegmentPen protocol.
@ -281,7 +290,10 @@ class Cu2QuMultiPen:
def __init__(self, other_pens, max_err, reverse_direction=False):
if reverse_direction:
other_pens = [ReverseContourPen(pen, outputImpliedClosingLine=True) for pen in other_pens]
other_pens = [
ReverseContourPen(pen, outputImpliedClosingLine=True)
for pen in other_pens
]
self.pens = other_pens
self.max_err = max_err
self.start_pts = None
@ -297,7 +309,7 @@ class Cu2QuMultiPen:
def _add_moveTo(self):
if self.start_pts is not None:
for pt,pen in zip(self.start_pts, self.pens):
for pt, pen in zip(self.start_pts, self.pens):
pen.moveTo(*pt)
self.start_pts = None
@ -309,7 +321,7 @@ class Cu2QuMultiPen:
def lineTo(self, pts):
self._check_contour_is_open()
self._add_moveTo()
for pt,pen in zip(pts, self.pens):
for pt, pen in zip(pts, self.pens):
pen.lineTo(*pt)
self.current_pts = pts
@ -320,14 +332,14 @@ class Cu2QuMultiPen:
return
self._add_moveTo()
current_pts = []
for points,pen in zip(pointsList, self.pens):
for points, pen in zip(pointsList, self.pens):
pen.qCurveTo(*points)
current_pts.append((points[-1],))
self.current_pts = current_pts
def _curves_to_quadratic(self, pointsList):
curves = []
for current_pt,points in zip(self.current_pts, pointsList):
for current_pt, points in zip(self.current_pts, pointsList):
curves.append(current_pt + points)
quadratics = curves_to_quadratic(curves, [self.max_err] * len(curves))
pointsList = []
@ -355,5 +367,5 @@ class Cu2QuMultiPen:
def addComponent(self, glyphName, transformations):
self._check_contour_is_closed()
for trans,pen in zip(transformations, self.pens):
for trans, pen in zip(transformations, self.pens):
pen.addComponent(glyphName, trans)

View File

@ -4,14 +4,13 @@ from fontTools.pens.recordingPen import RecordingPen
class _PassThruComponentsMixin(object):
def addComponent(self, glyphName, transformation, **kwargs):
self._outPen.addComponent(glyphName, transformation, **kwargs)
class FilterPen(_PassThruComponentsMixin, AbstractPen):
""" Base class for pens that apply some transformation to the coordinates
"""Base class for pens that apply some transformation to the coordinates
they receive and pass them to another pen.
You can override any of its methods. The default implementation does
@ -121,7 +120,7 @@ class ContourFilterPen(_PassThruComponentsMixin, RecordingPen):
class FilterPointPen(_PassThruComponentsMixin, AbstractPointPen):
""" Baseclass for point pens that apply some transformation to the
"""Baseclass for point pens that apply some transformation to the
coordinates they receive and pass them to another point pen.
You can override any of its methods. The default implementation does

View File

@ -65,9 +65,7 @@ class HashPointPen(AbstractPointPen):
pt_type = segmentType[0]
self.data.append(f"{pt_type}{pt[0]:g}{pt[1]:+g}")
def addComponent(
self, baseGlyphName, transformation, identifier=None, **kwargs
):
def addComponent(self, baseGlyphName, transformation, identifier=None, **kwargs):
tr = "".join([f"{t:+}" for t in transformation])
self.data.append("[")
try:

File diff suppressed because it is too large Load Diff

View File

@ -2,7 +2,12 @@
"""Calculate the perimeter of a glyph."""
from fontTools.pens.basePen import BasePen
from fontTools.misc.bezierTools import approximateQuadraticArcLengthC, calcQuadraticArcLengthC, approximateCubicArcLengthC, calcCubicArcLengthC
from fontTools.misc.bezierTools import (
approximateQuadraticArcLengthC,
calcQuadraticArcLengthC,
approximateCubicArcLengthC,
calcCubicArcLengthC,
)
import math
@ -10,49 +15,55 @@ __all__ = ["PerimeterPen"]
def _distance(p0, p1):
return math.hypot(p0[0] - p1[0], p0[1] - p1[1])
return math.hypot(p0[0] - p1[0], p0[1] - p1[1])
class PerimeterPen(BasePen):
def __init__(self, glyphset=None, tolerance=0.005):
BasePen.__init__(self, glyphset)
self.value = 0
self.tolerance = tolerance
def __init__(self, glyphset=None, tolerance=0.005):
BasePen.__init__(self, glyphset)
self.value = 0
self.tolerance = tolerance
# Choose which algorithm to use for quadratic and for cubic.
# Quadrature is faster but has fixed error characteristic with no strong
# error bound. The cutoff points are derived empirically.
self._addCubic = (
self._addCubicQuadrature if tolerance >= 0.0015 else self._addCubicRecursive
)
self._addQuadratic = (
self._addQuadraticQuadrature
if tolerance >= 0.00075
else self._addQuadraticExact
)
# Choose which algorithm to use for quadratic and for cubic.
# Quadrature is faster but has fixed error characteristic with no strong
# error bound. The cutoff points are derived empirically.
self._addCubic = self._addCubicQuadrature if tolerance >= 0.0015 else self._addCubicRecursive
self._addQuadratic = self._addQuadraticQuadrature if tolerance >= 0.00075 else self._addQuadraticExact
def _moveTo(self, p0):
self.__startPoint = p0
def _moveTo(self, p0):
self.__startPoint = p0
def _closePath(self):
p0 = self._getCurrentPoint()
if p0 != self.__startPoint:
self._lineTo(self.__startPoint)
def _closePath(self):
p0 = self._getCurrentPoint()
if p0 != self.__startPoint:
self._lineTo(self.__startPoint)
def _lineTo(self, p1):
p0 = self._getCurrentPoint()
self.value += _distance(p0, p1)
def _lineTo(self, p1):
p0 = self._getCurrentPoint()
self.value += _distance(p0, p1)
def _addQuadraticExact(self, c0, c1, c2):
self.value += calcQuadraticArcLengthC(c0, c1, c2)
def _addQuadraticExact(self, c0, c1, c2):
self.value += calcQuadraticArcLengthC(c0, c1, c2)
def _addQuadraticQuadrature(self, c0, c1, c2):
self.value += approximateQuadraticArcLengthC(c0, c1, c2)
def _addQuadraticQuadrature(self, c0, c1, c2):
self.value += approximateQuadraticArcLengthC(c0, c1, c2)
def _qCurveToOne(self, p1, p2):
p0 = self._getCurrentPoint()
self._addQuadratic(complex(*p0), complex(*p1), complex(*p2))
def _qCurveToOne(self, p1, p2):
p0 = self._getCurrentPoint()
self._addQuadratic(complex(*p0), complex(*p1), complex(*p2))
def _addCubicRecursive(self, c0, c1, c2, c3):
self.value += calcCubicArcLengthC(c0, c1, c2, c3, self.tolerance)
def _addCubicRecursive(self, c0, c1, c2, c3):
self.value += calcCubicArcLengthC(c0, c1, c2, c3, self.tolerance)
def _addCubicQuadrature(self, c0, c1, c2, c3):
self.value += approximateCubicArcLengthC(c0, c1, c2, c3)
def _addCubicQuadrature(self, c0, c1, c2, c3):
self.value += approximateCubicArcLengthC(c0, c1, c2, c3)
def _curveToOne(self, p1, p2, p3):
p0 = self._getCurrentPoint()
self._addCubic(complex(*p0), complex(*p1), complex(*p2), complex(*p3))
def _curveToOne(self, p1, p2, p3):
p0 = self._getCurrentPoint()
self._addCubic(complex(*p0), complex(*p1), complex(*p2), complex(*p3))

View File

@ -11,180 +11,182 @@ __all__ = ["PointInsidePen"]
class PointInsidePen(BasePen):
"""This pen implements "point inside" testing: to test whether
a given point lies inside the shape (black) or outside (white).
Instances of this class can be recycled, as long as the
setTestPoint() method is used to set the new point to test.
"""This pen implements "point inside" testing: to test whether
a given point lies inside the shape (black) or outside (white).
Instances of this class can be recycled, as long as the
setTestPoint() method is used to set the new point to test.
Typical usage:
Typical usage:
pen = PointInsidePen(glyphSet, (100, 200))
outline.draw(pen)
isInside = pen.getResult()
pen = PointInsidePen(glyphSet, (100, 200))
outline.draw(pen)
isInside = pen.getResult()
Both the even-odd algorithm and the non-zero-winding-rule
algorithm are implemented. The latter is the default, specify
True for the evenOdd argument of __init__ or setTestPoint
to use the even-odd algorithm.
"""
Both the even-odd algorithm and the non-zero-winding-rule
algorithm are implemented. The latter is the default, specify
True for the evenOdd argument of __init__ or setTestPoint
to use the even-odd algorithm.
"""
# This class implements the classical "shoot a ray from the test point
# to infinity and count how many times it intersects the outline" (as well
# as the non-zero variant, where the counter is incremented if the outline
# intersects the ray in one direction and decremented if it intersects in
# the other direction).
# I found an amazingly clear explanation of the subtleties involved in
# implementing this correctly for polygons here:
# http://graphics.cs.ucdavis.edu/~okreylos/TAship/Spring2000/PointInPolygon.html
# I extended the principles outlined on that page to curves.
# This class implements the classical "shoot a ray from the test point
# to infinity and count how many times it intersects the outline" (as well
# as the non-zero variant, where the counter is incremented if the outline
# intersects the ray in one direction and decremented if it intersects in
# the other direction).
# I found an amazingly clear explanation of the subtleties involved in
# implementing this correctly for polygons here:
# http://graphics.cs.ucdavis.edu/~okreylos/TAship/Spring2000/PointInPolygon.html
# I extended the principles outlined on that page to curves.
def __init__(self, glyphSet, testPoint, evenOdd=False):
BasePen.__init__(self, glyphSet)
self.setTestPoint(testPoint, evenOdd)
def __init__(self, glyphSet, testPoint, evenOdd=False):
BasePen.__init__(self, glyphSet)
self.setTestPoint(testPoint, evenOdd)
def setTestPoint(self, testPoint, evenOdd=False):
"""Set the point to test. Call this _before_ the outline gets drawn."""
self.testPoint = testPoint
self.evenOdd = evenOdd
self.firstPoint = None
self.intersectionCount = 0
def setTestPoint(self, testPoint, evenOdd=False):
"""Set the point to test. Call this _before_ the outline gets drawn."""
self.testPoint = testPoint
self.evenOdd = evenOdd
self.firstPoint = None
self.intersectionCount = 0
def getWinding(self):
if self.firstPoint is not None:
# always make sure the sub paths are closed; the algorithm only works
# for closed paths.
self.closePath()
return self.intersectionCount
def getWinding(self):
if self.firstPoint is not None:
# always make sure the sub paths are closed; the algorithm only works
# for closed paths.
self.closePath()
return self.intersectionCount
def getResult(self):
"""After the shape has been drawn, getResult() returns True if the test
point lies within the (black) shape, and False if it doesn't.
"""
winding = self.getWinding()
if self.evenOdd:
result = winding % 2
else: # non-zero
result = self.intersectionCount != 0
return not not result
def getResult(self):
"""After the shape has been drawn, getResult() returns True if the test
point lies within the (black) shape, and False if it doesn't.
"""
winding = self.getWinding()
if self.evenOdd:
result = winding % 2
else: # non-zero
result = self.intersectionCount != 0
return not not result
def _addIntersection(self, goingUp):
if self.evenOdd or goingUp:
self.intersectionCount += 1
else:
self.intersectionCount -= 1
def _addIntersection(self, goingUp):
if self.evenOdd or goingUp:
self.intersectionCount += 1
else:
self.intersectionCount -= 1
def _moveTo(self, point):
if self.firstPoint is not None:
# always make sure the sub paths are closed; the algorithm only works
# for closed paths.
self.closePath()
self.firstPoint = point
def _moveTo(self, point):
if self.firstPoint is not None:
# always make sure the sub paths are closed; the algorithm only works
# for closed paths.
self.closePath()
self.firstPoint = point
def _lineTo(self, point):
x, y = self.testPoint
x1, y1 = self._getCurrentPoint()
x2, y2 = point
def _lineTo(self, point):
x, y = self.testPoint
x1, y1 = self._getCurrentPoint()
x2, y2 = point
if x1 < x and x2 < x:
return
if y1 < y and y2 < y:
return
if y1 >= y and y2 >= y:
return
if x1 < x and x2 < x:
return
if y1 < y and y2 < y:
return
if y1 >= y and y2 >= y:
return
dx = x2 - x1
dy = y2 - y1
t = (y - y1) / dy
ix = dx * t + x1
if ix < x:
return
self._addIntersection(y2 > y1)
dx = x2 - x1
dy = y2 - y1
t = (y - y1) / dy
ix = dx * t + x1
if ix < x:
return
self._addIntersection(y2 > y1)
def _curveToOne(self, bcp1, bcp2, point):
x, y = self.testPoint
x1, y1 = self._getCurrentPoint()
x2, y2 = bcp1
x3, y3 = bcp2
x4, y4 = point
def _curveToOne(self, bcp1, bcp2, point):
x, y = self.testPoint
x1, y1 = self._getCurrentPoint()
x2, y2 = bcp1
x3, y3 = bcp2
x4, y4 = point
if x1 < x and x2 < x and x3 < x and x4 < x:
return
if y1 < y and y2 < y and y3 < y and y4 < y:
return
if y1 >= y and y2 >= y and y3 >= y and y4 >= y:
return
if x1 < x and x2 < x and x3 < x and x4 < x:
return
if y1 < y and y2 < y and y3 < y and y4 < y:
return
if y1 >= y and y2 >= y and y3 >= y and y4 >= y:
return
dy = y1
cy = (y2 - dy) * 3.0
by = (y3 - y2) * 3.0 - cy
ay = y4 - dy - cy - by
solutions = sorted(solveCubic(ay, by, cy, dy - y))
solutions = [t for t in solutions if -0. <= t <= 1.]
if not solutions:
return
dy = y1
cy = (y2 - dy) * 3.0
by = (y3 - y2) * 3.0 - cy
ay = y4 - dy - cy - by
solutions = sorted(solveCubic(ay, by, cy, dy - y))
solutions = [t for t in solutions if -0.0 <= t <= 1.0]
if not solutions:
return
dx = x1
cx = (x2 - dx) * 3.0
bx = (x3 - x2) * 3.0 - cx
ax = x4 - dx - cx - bx
dx = x1
cx = (x2 - dx) * 3.0
bx = (x3 - x2) * 3.0 - cx
ax = x4 - dx - cx - bx
above = y1 >= y
lastT = None
for t in solutions:
if t == lastT:
continue
lastT = t
t2 = t * t
t3 = t2 * t
above = y1 >= y
lastT = None
for t in solutions:
if t == lastT:
continue
lastT = t
t2 = t * t
t3 = t2 * t
direction = 3*ay*t2 + 2*by*t + cy
incomingGoingUp = outgoingGoingUp = direction > 0.0
if direction == 0.0:
direction = 6*ay*t + 2*by
outgoingGoingUp = direction > 0.0
incomingGoingUp = not outgoingGoingUp
if direction == 0.0:
direction = ay
incomingGoingUp = outgoingGoingUp = direction > 0.0
direction = 3 * ay * t2 + 2 * by * t + cy
incomingGoingUp = outgoingGoingUp = direction > 0.0
if direction == 0.0:
direction = 6 * ay * t + 2 * by
outgoingGoingUp = direction > 0.0
incomingGoingUp = not outgoingGoingUp
if direction == 0.0:
direction = ay
incomingGoingUp = outgoingGoingUp = direction > 0.0
xt = ax*t3 + bx*t2 + cx*t + dx
if xt < x:
continue
xt = ax * t3 + bx * t2 + cx * t + dx
if xt < x:
continue
if t in (0.0, -0.0):
if not outgoingGoingUp:
self._addIntersection(outgoingGoingUp)
elif t == 1.0:
if incomingGoingUp:
self._addIntersection(incomingGoingUp)
else:
if incomingGoingUp == outgoingGoingUp:
self._addIntersection(outgoingGoingUp)
#else:
# we're not really intersecting, merely touching
if t in (0.0, -0.0):
if not outgoingGoingUp:
self._addIntersection(outgoingGoingUp)
elif t == 1.0:
if incomingGoingUp:
self._addIntersection(incomingGoingUp)
else:
if incomingGoingUp == outgoingGoingUp:
self._addIntersection(outgoingGoingUp)
# else:
# we're not really intersecting, merely touching
def _qCurveToOne_unfinished(self, bcp, point):
# XXX need to finish this, for now doing it through a cubic
# (BasePen implements _qCurveTo in terms of a cubic) will
# have to do.
x, y = self.testPoint
x1, y1 = self._getCurrentPoint()
x2, y2 = bcp
x3, y3 = point
c = y1
b = (y2 - c) * 2.0
a = y3 - c - b
solutions = sorted(solveQuadratic(a, b, c - y))
solutions = [t for t in solutions if ZERO_MINUS_EPSILON <= t <= ONE_PLUS_EPSILON]
if not solutions:
return
# XXX
def _qCurveToOne_unfinished(self, bcp, point):
# XXX need to finish this, for now doing it through a cubic
# (BasePen implements _qCurveTo in terms of a cubic) will
# have to do.
x, y = self.testPoint
x1, y1 = self._getCurrentPoint()
x2, y2 = bcp
x3, y3 = point
c = y1
b = (y2 - c) * 2.0
a = y3 - c - b
solutions = sorted(solveQuadratic(a, b, c - y))
solutions = [
t for t in solutions if ZERO_MINUS_EPSILON <= t <= ONE_PLUS_EPSILON
]
if not solutions:
return
# XXX
def _closePath(self):
if self._getCurrentPoint() != self.firstPoint:
self.lineTo(self.firstPoint)
self.firstPoint = None
def _closePath(self):
if self._getCurrentPoint() != self.firstPoint:
self.lineTo(self.firstPoint)
self.firstPoint = None
def _endPath(self):
"""Insideness is not defined for open contours."""
raise NotImplementedError
def _endPath(self):
"""Insideness is not defined for open contours."""
raise NotImplementedError

View File

@ -18,476 +18,482 @@ from typing import Any, Optional, Tuple
from fontTools.pens.basePen import AbstractPen, PenError
__all__ = [
"AbstractPointPen",
"BasePointToSegmentPen",
"PointToSegmentPen",
"SegmentToPointPen",
"GuessSmoothPointPen",
"ReverseContourPointPen",
"AbstractPointPen",
"BasePointToSegmentPen",
"PointToSegmentPen",
"SegmentToPointPen",
"GuessSmoothPointPen",
"ReverseContourPointPen",
]
class AbstractPointPen:
"""Baseclass for all PointPens."""
"""Baseclass for all PointPens."""
def beginPath(self, identifier: Optional[str] = None, **kwargs: Any) -> None:
"""Start a new sub path."""
raise NotImplementedError
def beginPath(self, identifier: Optional[str] = None, **kwargs: Any) -> None:
"""Start a new sub path."""
raise NotImplementedError
def endPath(self) -> None:
"""End the current sub path."""
raise NotImplementedError
def endPath(self) -> None:
"""End the current sub path."""
raise NotImplementedError
def addPoint(
self,
pt: Tuple[float, float],
segmentType: Optional[str] = None,
smooth: bool = False,
name: Optional[str] = None,
identifier: Optional[str] = None,
**kwargs: Any
) -> None:
"""Add a point to the current sub path."""
raise NotImplementedError
def addPoint(
self,
pt: Tuple[float, float],
segmentType: Optional[str] = None,
smooth: bool = False,
name: Optional[str] = None,
identifier: Optional[str] = None,
**kwargs: Any,
) -> None:
"""Add a point to the current sub path."""
raise NotImplementedError
def addComponent(
self,
baseGlyphName: str,
transformation: Tuple[float, float, float, float, float, float],
identifier: Optional[str] = None,
**kwargs: Any
) -> None:
"""Add a sub glyph."""
raise NotImplementedError
def addComponent(
self,
baseGlyphName: str,
transformation: Tuple[float, float, float, float, float, float],
identifier: Optional[str] = None,
**kwargs: Any,
) -> None:
"""Add a sub glyph."""
raise NotImplementedError
class BasePointToSegmentPen(AbstractPointPen):
"""
Base class for retrieving the outline in a segment-oriented
way. The PointPen protocol is simple yet also a little tricky,
so when you need an outline presented as segments but you have
as points, do use this base implementation as it properly takes
care of all the edge cases.
"""
"""
Base class for retrieving the outline in a segment-oriented
way. The PointPen protocol is simple yet also a little tricky,
so when you need an outline presented as segments but you have
as points, do use this base implementation as it properly takes
care of all the edge cases.
"""
def __init__(self):
self.currentPath = None
def __init__(self):
self.currentPath = None
def beginPath(self, identifier=None, **kwargs):
if self.currentPath is not None:
raise PenError("Path already begun.")
self.currentPath = []
def beginPath(self, identifier=None, **kwargs):
if self.currentPath is not None:
raise PenError("Path already begun.")
self.currentPath = []
def _flushContour(self, segments):
"""Override this method.
def _flushContour(self, segments):
"""Override this method.
It will be called for each non-empty sub path with a list
of segments: the 'segments' argument.
It will be called for each non-empty sub path with a list
of segments: the 'segments' argument.
The segments list contains tuples of length 2:
(segmentType, points)
The segments list contains tuples of length 2:
(segmentType, points)
segmentType is one of "move", "line", "curve" or "qcurve".
"move" may only occur as the first segment, and it signifies
an OPEN path. A CLOSED path does NOT start with a "move", in
fact it will not contain a "move" at ALL.
segmentType is one of "move", "line", "curve" or "qcurve".
"move" may only occur as the first segment, and it signifies
an OPEN path. A CLOSED path does NOT start with a "move", in
fact it will not contain a "move" at ALL.
The 'points' field in the 2-tuple is a list of point info
tuples. The list has 1 or more items, a point tuple has
four items:
(point, smooth, name, kwargs)
'point' is an (x, y) coordinate pair.
The 'points' field in the 2-tuple is a list of point info
tuples. The list has 1 or more items, a point tuple has
four items:
(point, smooth, name, kwargs)
'point' is an (x, y) coordinate pair.
For a closed path, the initial moveTo point is defined as
the last point of the last segment.
For a closed path, the initial moveTo point is defined as
the last point of the last segment.
The 'points' list of "move" and "line" segments always contains
exactly one point tuple.
"""
raise NotImplementedError
The 'points' list of "move" and "line" segments always contains
exactly one point tuple.
"""
raise NotImplementedError
def endPath(self):
if self.currentPath is None:
raise PenError("Path not begun.")
points = self.currentPath
self.currentPath = None
if not points:
return
if len(points) == 1:
# Not much more we can do than output a single move segment.
pt, segmentType, smooth, name, kwargs = points[0]
segments = [("move", [(pt, smooth, name, kwargs)])]
self._flushContour(segments)
return
segments = []
if points[0][1] == "move":
# It's an open contour, insert a "move" segment for the first
# point and remove that first point from the point list.
pt, segmentType, smooth, name, kwargs = points[0]
segments.append(("move", [(pt, smooth, name, kwargs)]))
points.pop(0)
else:
# It's a closed contour. Locate the first on-curve point, and
# rotate the point list so that it _ends_ with an on-curve
# point.
firstOnCurve = None
for i in range(len(points)):
segmentType = points[i][1]
if segmentType is not None:
firstOnCurve = i
break
if firstOnCurve is None:
# Special case for quadratics: a contour with no on-curve
# points. Add a "None" point. (See also the Pen protocol's
# qCurveTo() method and fontTools.pens.basePen.py.)
points.append((None, "qcurve", None, None, None))
else:
points = points[firstOnCurve+1:] + points[:firstOnCurve+1]
def endPath(self):
if self.currentPath is None:
raise PenError("Path not begun.")
points = self.currentPath
self.currentPath = None
if not points:
return
if len(points) == 1:
# Not much more we can do than output a single move segment.
pt, segmentType, smooth, name, kwargs = points[0]
segments = [("move", [(pt, smooth, name, kwargs)])]
self._flushContour(segments)
return
segments = []
if points[0][1] == "move":
# It's an open contour, insert a "move" segment for the first
# point and remove that first point from the point list.
pt, segmentType, smooth, name, kwargs = points[0]
segments.append(("move", [(pt, smooth, name, kwargs)]))
points.pop(0)
else:
# It's a closed contour. Locate the first on-curve point, and
# rotate the point list so that it _ends_ with an on-curve
# point.
firstOnCurve = None
for i in range(len(points)):
segmentType = points[i][1]
if segmentType is not None:
firstOnCurve = i
break
if firstOnCurve is None:
# Special case for quadratics: a contour with no on-curve
# points. Add a "None" point. (See also the Pen protocol's
# qCurveTo() method and fontTools.pens.basePen.py.)
points.append((None, "qcurve", None, None, None))
else:
points = points[firstOnCurve + 1 :] + points[: firstOnCurve + 1]
currentSegment = []
for pt, segmentType, smooth, name, kwargs in points:
currentSegment.append((pt, smooth, name, kwargs))
if segmentType is None:
continue
segments.append((segmentType, currentSegment))
currentSegment = []
currentSegment = []
for pt, segmentType, smooth, name, kwargs in points:
currentSegment.append((pt, smooth, name, kwargs))
if segmentType is None:
continue
segments.append((segmentType, currentSegment))
currentSegment = []
self._flushContour(segments)
self._flushContour(segments)
def addPoint(self, pt, segmentType=None, smooth=False, name=None,
identifier=None, **kwargs):
if self.currentPath is None:
raise PenError("Path not begun")
self.currentPath.append((pt, segmentType, smooth, name, kwargs))
def addPoint(
self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs
):
if self.currentPath is None:
raise PenError("Path not begun")
self.currentPath.append((pt, segmentType, smooth, name, kwargs))
class PointToSegmentPen(BasePointToSegmentPen):
"""
Adapter class that converts the PointPen protocol to the
(Segment)Pen protocol.
"""
Adapter class that converts the PointPen protocol to the
(Segment)Pen protocol.
NOTE: The segment pen does not support and will drop point names, identifiers
and kwargs.
"""
NOTE: The segment pen does not support and will drop point names, identifiers
and kwargs.
"""
def __init__(self, segmentPen, outputImpliedClosingLine=False):
BasePointToSegmentPen.__init__(self)
self.pen = segmentPen
self.outputImpliedClosingLine = outputImpliedClosingLine
def __init__(self, segmentPen, outputImpliedClosingLine=False):
BasePointToSegmentPen.__init__(self)
self.pen = segmentPen
self.outputImpliedClosingLine = outputImpliedClosingLine
def _flushContour(self, segments):
if not segments:
raise PenError("Must have at least one segment.")
pen = self.pen
if segments[0][0] == "move":
# It's an open path.
closed = False
points = segments[0][1]
if len(points) != 1:
raise PenError(f"Illegal move segment point count: {len(points)}")
movePt, _, _ , _ = points[0]
del segments[0]
else:
# It's a closed path, do a moveTo to the last
# point of the last segment.
closed = True
segmentType, points = segments[-1]
movePt, _, _ , _ = points[-1]
if movePt is None:
# quad special case: a contour with no on-curve points contains
# one "qcurve" segment that ends with a point that's None. We
# must not output a moveTo() in that case.
pass
else:
pen.moveTo(movePt)
outputImpliedClosingLine = self.outputImpliedClosingLine
nSegments = len(segments)
lastPt = movePt
for i in range(nSegments):
segmentType, points = segments[i]
points = [pt for pt, _, _ , _ in points]
if segmentType == "line":
if len(points) != 1:
raise PenError(f"Illegal line segment point count: {len(points)}")
pt = points[0]
# For closed contours, a 'lineTo' is always implied from the last oncurve
# point to the starting point, thus we can omit it when the last and
# starting point don't overlap.
# However, when the last oncurve point is a "line" segment and has same
# coordinates as the starting point of a closed contour, we need to output
# the closing 'lineTo' explicitly (regardless of the value of the
# 'outputImpliedClosingLine' option) in order to disambiguate this case from
# the implied closing 'lineTo', otherwise the duplicate point would be lost.
# See https://github.com/googlefonts/fontmake/issues/572.
if (
i + 1 != nSegments
or outputImpliedClosingLine
or not closed
or pt == lastPt
):
pen.lineTo(pt)
lastPt = pt
elif segmentType == "curve":
pen.curveTo(*points)
lastPt = points[-1]
elif segmentType == "qcurve":
pen.qCurveTo(*points)
lastPt = points[-1]
else:
raise PenError(f"Illegal segmentType: {segmentType}")
if closed:
pen.closePath()
else:
pen.endPath()
def _flushContour(self, segments):
if not segments:
raise PenError("Must have at least one segment.")
pen = self.pen
if segments[0][0] == "move":
# It's an open path.
closed = False
points = segments[0][1]
if len(points) != 1:
raise PenError(f"Illegal move segment point count: {len(points)}")
movePt, _, _, _ = points[0]
del segments[0]
else:
# It's a closed path, do a moveTo to the last
# point of the last segment.
closed = True
segmentType, points = segments[-1]
movePt, _, _, _ = points[-1]
if movePt is None:
# quad special case: a contour with no on-curve points contains
# one "qcurve" segment that ends with a point that's None. We
# must not output a moveTo() in that case.
pass
else:
pen.moveTo(movePt)
outputImpliedClosingLine = self.outputImpliedClosingLine
nSegments = len(segments)
lastPt = movePt
for i in range(nSegments):
segmentType, points = segments[i]
points = [pt for pt, _, _, _ in points]
if segmentType == "line":
if len(points) != 1:
raise PenError(f"Illegal line segment point count: {len(points)}")
pt = points[0]
# For closed contours, a 'lineTo' is always implied from the last oncurve
# point to the starting point, thus we can omit it when the last and
# starting point don't overlap.
# However, when the last oncurve point is a "line" segment and has same
# coordinates as the starting point of a closed contour, we need to output
# the closing 'lineTo' explicitly (regardless of the value of the
# 'outputImpliedClosingLine' option) in order to disambiguate this case from
# the implied closing 'lineTo', otherwise the duplicate point would be lost.
# See https://github.com/googlefonts/fontmake/issues/572.
if (
i + 1 != nSegments
or outputImpliedClosingLine
or not closed
or pt == lastPt
):
pen.lineTo(pt)
lastPt = pt
elif segmentType == "curve":
pen.curveTo(*points)
lastPt = points[-1]
elif segmentType == "qcurve":
pen.qCurveTo(*points)
lastPt = points[-1]
else:
raise PenError(f"Illegal segmentType: {segmentType}")
if closed:
pen.closePath()
else:
pen.endPath()
def addComponent(self, glyphName, transform, identifier=None, **kwargs):
del identifier # unused
del kwargs # unused
self.pen.addComponent(glyphName, transform)
def addComponent(self, glyphName, transform, identifier=None, **kwargs):
del identifier # unused
del kwargs # unused
self.pen.addComponent(glyphName, transform)
class SegmentToPointPen(AbstractPen):
"""
Adapter class that converts the (Segment)Pen protocol to the
PointPen protocol.
"""
"""
Adapter class that converts the (Segment)Pen protocol to the
PointPen protocol.
"""
def __init__(self, pointPen, guessSmooth=True):
if guessSmooth:
self.pen = GuessSmoothPointPen(pointPen)
else:
self.pen = pointPen
self.contour = None
def __init__(self, pointPen, guessSmooth=True):
if guessSmooth:
self.pen = GuessSmoothPointPen(pointPen)
else:
self.pen = pointPen
self.contour = None
def _flushContour(self):
pen = self.pen
pen.beginPath()
for pt, segmentType in self.contour:
pen.addPoint(pt, segmentType=segmentType)
pen.endPath()
def _flushContour(self):
pen = self.pen
pen.beginPath()
for pt, segmentType in self.contour:
pen.addPoint(pt, segmentType=segmentType)
pen.endPath()
def moveTo(self, pt):
self.contour = []
self.contour.append((pt, "move"))
def moveTo(self, pt):
self.contour = []
self.contour.append((pt, "move"))
def lineTo(self, pt):
if self.contour is None:
raise PenError("Contour missing required initial moveTo")
self.contour.append((pt, "line"))
def lineTo(self, pt):
if self.contour is None:
raise PenError("Contour missing required initial moveTo")
self.contour.append((pt, "line"))
def curveTo(self, *pts):
if not pts:
raise TypeError("Must pass in at least one point")
if self.contour is None:
raise PenError("Contour missing required initial moveTo")
for pt in pts[:-1]:
self.contour.append((pt, None))
self.contour.append((pts[-1], "curve"))
def curveTo(self, *pts):
if not pts:
raise TypeError("Must pass in at least one point")
if self.contour is None:
raise PenError("Contour missing required initial moveTo")
for pt in pts[:-1]:
self.contour.append((pt, None))
self.contour.append((pts[-1], "curve"))
def qCurveTo(self, *pts):
if not pts:
raise TypeError("Must pass in at least one point")
if pts[-1] is None:
self.contour = []
else:
if self.contour is None:
raise PenError("Contour missing required initial moveTo")
for pt in pts[:-1]:
self.contour.append((pt, None))
if pts[-1] is not None:
self.contour.append((pts[-1], "qcurve"))
def qCurveTo(self, *pts):
if not pts:
raise TypeError("Must pass in at least one point")
if pts[-1] is None:
self.contour = []
else:
if self.contour is None:
raise PenError("Contour missing required initial moveTo")
for pt in pts[:-1]:
self.contour.append((pt, None))
if pts[-1] is not None:
self.contour.append((pts[-1], "qcurve"))
def closePath(self):
if self.contour is None:
raise PenError("Contour missing required initial moveTo")
if len(self.contour) > 1 and self.contour[0][0] == self.contour[-1][0]:
self.contour[0] = self.contour[-1]
del self.contour[-1]
else:
# There's an implied line at the end, replace "move" with "line"
# for the first point
pt, tp = self.contour[0]
if tp == "move":
self.contour[0] = pt, "line"
self._flushContour()
self.contour = None
def closePath(self):
if self.contour is None:
raise PenError("Contour missing required initial moveTo")
if len(self.contour) > 1 and self.contour[0][0] == self.contour[-1][0]:
self.contour[0] = self.contour[-1]
del self.contour[-1]
else:
# There's an implied line at the end, replace "move" with "line"
# for the first point
pt, tp = self.contour[0]
if tp == "move":
self.contour[0] = pt, "line"
self._flushContour()
self.contour = None
def endPath(self):
if self.contour is None:
raise PenError("Contour missing required initial moveTo")
self._flushContour()
self.contour = None
def endPath(self):
if self.contour is None:
raise PenError("Contour missing required initial moveTo")
self._flushContour()
self.contour = None
def addComponent(self, glyphName, transform):
if self.contour is not None:
raise PenError("Components must be added before or after contours")
self.pen.addComponent(glyphName, transform)
def addComponent(self, glyphName, transform):
if self.contour is not None:
raise PenError("Components must be added before or after contours")
self.pen.addComponent(glyphName, transform)
class GuessSmoothPointPen(AbstractPointPen):
"""
Filtering PointPen that tries to determine whether an on-curve point
should be "smooth", ie. that it's a "tangent" point or a "curve" point.
"""
"""
Filtering PointPen that tries to determine whether an on-curve point
should be "smooth", ie. that it's a "tangent" point or a "curve" point.
"""
def __init__(self, outPen, error=0.05):
self._outPen = outPen
self._error = error
self._points = None
def __init__(self, outPen, error=0.05):
self._outPen = outPen
self._error = error
self._points = None
def _flushContour(self):
if self._points is None:
raise PenError("Path not begun")
points = self._points
nPoints = len(points)
if not nPoints:
return
if points[0][1] == "move":
# Open path.
indices = range(1, nPoints - 1)
elif nPoints > 1:
# Closed path. To avoid having to mod the contour index, we
# simply abuse Python's negative index feature, and start at -1
indices = range(-1, nPoints - 1)
else:
# closed path containing 1 point (!), ignore.
indices = []
for i in indices:
pt, segmentType, _, name, kwargs = points[i]
if segmentType is None:
continue
prev = i - 1
next = i + 1
if points[prev][1] is not None and points[next][1] is not None:
continue
# At least one of our neighbors is an off-curve point
pt = points[i][0]
prevPt = points[prev][0]
nextPt = points[next][0]
if pt != prevPt and pt != nextPt:
dx1, dy1 = pt[0] - prevPt[0], pt[1] - prevPt[1]
dx2, dy2 = nextPt[0] - pt[0], nextPt[1] - pt[1]
a1 = math.atan2(dy1, dx1)
a2 = math.atan2(dy2, dx2)
if abs(a1 - a2) < self._error:
points[i] = pt, segmentType, True, name, kwargs
def _flushContour(self):
if self._points is None:
raise PenError("Path not begun")
points = self._points
nPoints = len(points)
if not nPoints:
return
if points[0][1] == "move":
# Open path.
indices = range(1, nPoints - 1)
elif nPoints > 1:
# Closed path. To avoid having to mod the contour index, we
# simply abuse Python's negative index feature, and start at -1
indices = range(-1, nPoints - 1)
else:
# closed path containing 1 point (!), ignore.
indices = []
for i in indices:
pt, segmentType, _, name, kwargs = points[i]
if segmentType is None:
continue
prev = i - 1
next = i + 1
if points[prev][1] is not None and points[next][1] is not None:
continue
# At least one of our neighbors is an off-curve point
pt = points[i][0]
prevPt = points[prev][0]
nextPt = points[next][0]
if pt != prevPt and pt != nextPt:
dx1, dy1 = pt[0] - prevPt[0], pt[1] - prevPt[1]
dx2, dy2 = nextPt[0] - pt[0], nextPt[1] - pt[1]
a1 = math.atan2(dy1, dx1)
a2 = math.atan2(dy2, dx2)
if abs(a1 - a2) < self._error:
points[i] = pt, segmentType, True, name, kwargs
for pt, segmentType, smooth, name, kwargs in points:
self._outPen.addPoint(pt, segmentType, smooth, name, **kwargs)
for pt, segmentType, smooth, name, kwargs in points:
self._outPen.addPoint(pt, segmentType, smooth, name, **kwargs)
def beginPath(self, identifier=None, **kwargs):
if self._points is not None:
raise PenError("Path already begun")
self._points = []
if identifier is not None:
kwargs["identifier"] = identifier
self._outPen.beginPath(**kwargs)
def beginPath(self, identifier=None, **kwargs):
if self._points is not None:
raise PenError("Path already begun")
self._points = []
if identifier is not None:
kwargs["identifier"] = identifier
self._outPen.beginPath(**kwargs)
def endPath(self):
self._flushContour()
self._outPen.endPath()
self._points = None
def endPath(self):
self._flushContour()
self._outPen.endPath()
self._points = None
def addPoint(self, pt, segmentType=None, smooth=False, name=None,
identifier=None, **kwargs):
if self._points is None:
raise PenError("Path not begun")
if identifier is not None:
kwargs["identifier"] = identifier
self._points.append((pt, segmentType, False, name, kwargs))
def addPoint(
self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs
):
if self._points is None:
raise PenError("Path not begun")
if identifier is not None:
kwargs["identifier"] = identifier
self._points.append((pt, segmentType, False, name, kwargs))
def addComponent(self, glyphName, transformation, identifier=None, **kwargs):
if self._points is not None:
raise PenError("Components must be added before or after contours")
if identifier is not None:
kwargs["identifier"] = identifier
self._outPen.addComponent(glyphName, transformation, **kwargs)
def addComponent(self, glyphName, transformation, identifier=None, **kwargs):
if self._points is not None:
raise PenError("Components must be added before or after contours")
if identifier is not None:
kwargs["identifier"] = identifier
self._outPen.addComponent(glyphName, transformation, **kwargs)
class ReverseContourPointPen(AbstractPointPen):
"""
This is a PointPen that passes outline data to another PointPen, but
reversing the winding direction of all contours. Components are simply
passed through unchanged.
"""
This is a PointPen that passes outline data to another PointPen, but
reversing the winding direction of all contours. Components are simply
passed through unchanged.
Closed contours are reversed in such a way that the first point remains
the first point.
"""
Closed contours are reversed in such a way that the first point remains
the first point.
"""
def __init__(self, outputPointPen):
self.pen = outputPointPen
# a place to store the points for the current sub path
self.currentContour = None
def __init__(self, outputPointPen):
self.pen = outputPointPen
# a place to store the points for the current sub path
self.currentContour = None
def _flushContour(self):
pen = self.pen
contour = self.currentContour
if not contour:
pen.beginPath(identifier=self.currentContourIdentifier)
pen.endPath()
return
def _flushContour(self):
pen = self.pen
contour = self.currentContour
if not contour:
pen.beginPath(identifier=self.currentContourIdentifier)
pen.endPath()
return
closed = contour[0][1] != "move"
if not closed:
lastSegmentType = "move"
else:
# Remove the first point and insert it at the end. When
# the list of points gets reversed, this point will then
# again be at the start. In other words, the following
# will hold:
# for N in range(len(originalContour)):
# originalContour[N] == reversedContour[-N]
contour.append(contour.pop(0))
# Find the first on-curve point.
firstOnCurve = None
for i in range(len(contour)):
if contour[i][1] is not None:
firstOnCurve = i
break
if firstOnCurve is None:
# There are no on-curve points, be basically have to
# do nothing but contour.reverse().
lastSegmentType = None
else:
lastSegmentType = contour[firstOnCurve][1]
closed = contour[0][1] != "move"
if not closed:
lastSegmentType = "move"
else:
# Remove the first point and insert it at the end. When
# the list of points gets reversed, this point will then
# again be at the start. In other words, the following
# will hold:
# for N in range(len(originalContour)):
# originalContour[N] == reversedContour[-N]
contour.append(contour.pop(0))
# Find the first on-curve point.
firstOnCurve = None
for i in range(len(contour)):
if contour[i][1] is not None:
firstOnCurve = i
break
if firstOnCurve is None:
# There are no on-curve points, be basically have to
# do nothing but contour.reverse().
lastSegmentType = None
else:
lastSegmentType = contour[firstOnCurve][1]
contour.reverse()
if not closed:
# Open paths must start with a move, so we simply dump
# all off-curve points leading up to the first on-curve.
while contour[0][1] is None:
contour.pop(0)
pen.beginPath(identifier=self.currentContourIdentifier)
for pt, nextSegmentType, smooth, name, kwargs in contour:
if nextSegmentType is not None:
segmentType = lastSegmentType
lastSegmentType = nextSegmentType
else:
segmentType = None
pen.addPoint(pt, segmentType=segmentType, smooth=smooth, name=name, **kwargs)
pen.endPath()
contour.reverse()
if not closed:
# Open paths must start with a move, so we simply dump
# all off-curve points leading up to the first on-curve.
while contour[0][1] is None:
contour.pop(0)
pen.beginPath(identifier=self.currentContourIdentifier)
for pt, nextSegmentType, smooth, name, kwargs in contour:
if nextSegmentType is not None:
segmentType = lastSegmentType
lastSegmentType = nextSegmentType
else:
segmentType = None
pen.addPoint(
pt, segmentType=segmentType, smooth=smooth, name=name, **kwargs
)
pen.endPath()
def beginPath(self, identifier=None, **kwargs):
if self.currentContour is not None:
raise PenError("Path already begun")
self.currentContour = []
self.currentContourIdentifier = identifier
self.onCurve = []
def beginPath(self, identifier=None, **kwargs):
if self.currentContour is not None:
raise PenError("Path already begun")
self.currentContour = []
self.currentContourIdentifier = identifier
self.onCurve = []
def endPath(self):
if self.currentContour is None:
raise PenError("Path not begun")
self._flushContour()
self.currentContour = None
def endPath(self):
if self.currentContour is None:
raise PenError("Path not begun")
self._flushContour()
self.currentContour = None
def addPoint(self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs):
if self.currentContour is None:
raise PenError("Path not begun")
if identifier is not None:
kwargs["identifier"] = identifier
self.currentContour.append((pt, segmentType, smooth, name, kwargs))
def addPoint(
self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs
):
if self.currentContour is None:
raise PenError("Path not begun")
if identifier is not None:
kwargs["identifier"] = identifier
self.currentContour.append((pt, segmentType, smooth, name, kwargs))
def addComponent(self, glyphName, transform, identifier=None, **kwargs):
if self.currentContour is not None:
raise PenError("Components must be added before or after contours")
self.pen.addComponent(glyphName, transform, identifier=identifier, **kwargs)
def addComponent(self, glyphName, transform, identifier=None, **kwargs):
if self.currentContour is not None:
raise PenError("Components must be added before or after contours")
self.pen.addComponent(glyphName, transform, identifier=identifier, **kwargs)

View File

@ -5,25 +5,25 @@ __all__ = ["QtPen"]
class QtPen(BasePen):
def __init__(self, glyphSet, path=None):
BasePen.__init__(self, glyphSet)
if path is None:
from PyQt5.QtGui import QPainterPath
def __init__(self, glyphSet, path=None):
BasePen.__init__(self, glyphSet)
if path is None:
from PyQt5.QtGui import QPainterPath
path = QPainterPath()
self.path = path
path = QPainterPath()
self.path = path
def _moveTo(self, p):
self.path.moveTo(*p)
def _moveTo(self, p):
self.path.moveTo(*p)
def _lineTo(self, p):
self.path.lineTo(*p)
def _lineTo(self, p):
self.path.lineTo(*p)
def _curveToOne(self, p1, p2, p3):
self.path.cubicTo(*p1, *p2, *p3)
def _curveToOne(self, p1, p2, p3):
self.path.cubicTo(*p1, *p2, *p3)
def _qCurveToOne(self, p1, p2):
self.path.quadTo(*p1, *p2)
def _qCurveToOne(self, p1, p2):
self.path.quadTo(*p1, *p2)
def _closePath(self):
self.path.closeSubpath()
def _closePath(self):
self.path.closeSubpath()

View File

@ -3,43 +3,42 @@ from fontTools.pens.basePen import BasePen
from Quartz.CoreGraphics import CGPathCreateMutable, CGPathMoveToPoint
from Quartz.CoreGraphics import CGPathAddLineToPoint, CGPathAddCurveToPoint
from Quartz.CoreGraphics import CGPathAddQuadCurveToPoint, CGPathCloseSubpath
__all__ = ["QuartzPen"]
class QuartzPen(BasePen):
"""A pen that creates a CGPath
Parameters
- path: an optional CGPath to add to
- xform: an optional CGAffineTransform to apply to the path
"""
def __init__(self, glyphSet, path=None, xform=None):
BasePen.__init__(self, glyphSet)
if path is None:
path = CGPathCreateMutable()
self.path = path
self.xform = xform
"""A pen that creates a CGPath
def _moveTo(self, pt):
x, y = pt
CGPathMoveToPoint(self.path, self.xform, x, y)
Parameters
- path: an optional CGPath to add to
- xform: an optional CGAffineTransform to apply to the path
"""
def _lineTo(self, pt):
x, y = pt
CGPathAddLineToPoint(self.path, self.xform, x, y)
def __init__(self, glyphSet, path=None, xform=None):
BasePen.__init__(self, glyphSet)
if path is None:
path = CGPathCreateMutable()
self.path = path
self.xform = xform
def _curveToOne(self, p1, p2, p3):
(x1, y1), (x2, y2), (x3, y3) = p1, p2, p3
CGPathAddCurveToPoint(self.path, self.xform, x1, y1, x2, y2, x3, y3)
def _qCurveToOne(self, p1, p2):
(x1, y1), (x2, y2) = p1, p2
CGPathAddQuadCurveToPoint(self.path, self.xform, x1, y1, x2, y2)
def _closePath(self):
CGPathCloseSubpath(self.path)
def _moveTo(self, pt):
x, y = pt
CGPathMoveToPoint(self.path, self.xform, x, y)
def _lineTo(self, pt):
x, y = pt
CGPathAddLineToPoint(self.path, self.xform, x, y)
def _curveToOne(self, p1, p2, p3):
(x1, y1), (x2, y2), (x3, y3) = p1, p2, p3
CGPathAddCurveToPoint(self.path, self.xform, x1, y1, x2, y2, x3, y3)
def _qCurveToOne(self, p1, p2):
(x1, y1), (x2, y2) = p1, p2
CGPathAddQuadCurveToPoint(self.path, self.xform, x1, y1, x2, y2)
def _closePath(self):
CGPathCloseSubpath(self.path)

View File

@ -4,152 +4,164 @@ from fontTools.pens.pointPen import AbstractPointPen
__all__ = [
"replayRecording",
"RecordingPen",
"DecomposingRecordingPen",
"RecordingPointPen",
"replayRecording",
"RecordingPen",
"DecomposingRecordingPen",
"RecordingPointPen",
]
def replayRecording(recording, pen):
"""Replay a recording, as produced by RecordingPen or DecomposingRecordingPen,
to a pen.
"""Replay a recording, as produced by RecordingPen or DecomposingRecordingPen,
to a pen.
Note that recording does not have to be produced by those pens.
It can be any iterable of tuples of method name and tuple-of-arguments.
Likewise, pen can be any objects receiving those method calls.
"""
for operator,operands in recording:
getattr(pen, operator)(*operands)
Note that recording does not have to be produced by those pens.
It can be any iterable of tuples of method name and tuple-of-arguments.
Likewise, pen can be any objects receiving those method calls.
"""
for operator, operands in recording:
getattr(pen, operator)(*operands)
class RecordingPen(AbstractPen):
"""Pen recording operations that can be accessed or replayed.
"""Pen recording operations that can be accessed or replayed.
The recording can be accessed as pen.value; or replayed using
pen.replay(otherPen).
The recording can be accessed as pen.value; or replayed using
pen.replay(otherPen).
:Example:
:Example:
from fontTools.ttLib import TTFont
from fontTools.pens.recordingPen import RecordingPen
from fontTools.ttLib import TTFont
from fontTools.pens.recordingPen import RecordingPen
glyph_name = 'dollar'
font_path = 'MyFont.otf'
glyph_name = 'dollar'
font_path = 'MyFont.otf'
font = TTFont(font_path)
glyphset = font.getGlyphSet()
glyph = glyphset[glyph_name]
font = TTFont(font_path)
glyphset = font.getGlyphSet()
glyph = glyphset[glyph_name]
pen = RecordingPen()
glyph.draw(pen)
print(pen.value)
"""
pen = RecordingPen()
glyph.draw(pen)
print(pen.value)
"""
def __init__(self):
self.value = []
def moveTo(self, p0):
self.value.append(('moveTo', (p0,)))
def lineTo(self, p1):
self.value.append(('lineTo', (p1,)))
def qCurveTo(self, *points):
self.value.append(('qCurveTo', points))
def curveTo(self, *points):
self.value.append(('curveTo', points))
def closePath(self):
self.value.append(('closePath', ()))
def endPath(self):
self.value.append(('endPath', ()))
def addComponent(self, glyphName, transformation):
self.value.append(('addComponent', (glyphName, transformation)))
def replay(self, pen):
replayRecording(self.value, pen)
def __init__(self):
self.value = []
def moveTo(self, p0):
self.value.append(("moveTo", (p0,)))
def lineTo(self, p1):
self.value.append(("lineTo", (p1,)))
def qCurveTo(self, *points):
self.value.append(("qCurveTo", points))
def curveTo(self, *points):
self.value.append(("curveTo", points))
def closePath(self):
self.value.append(("closePath", ()))
def endPath(self):
self.value.append(("endPath", ()))
def addComponent(self, glyphName, transformation):
self.value.append(("addComponent", (glyphName, transformation)))
def replay(self, pen):
replayRecording(self.value, pen)
class DecomposingRecordingPen(DecomposingPen, RecordingPen):
""" Same as RecordingPen, except that it doesn't keep components
as references, but draws them decomposed as regular contours.
"""Same as RecordingPen, except that it doesn't keep components
as references, but draws them decomposed as regular contours.
The constructor takes a single 'glyphSet' positional argument,
a dictionary of glyph objects (i.e. with a 'draw' method) keyed
by thir name::
The constructor takes a single 'glyphSet' positional argument,
a dictionary of glyph objects (i.e. with a 'draw' method) keyed
by thir name::
>>> class SimpleGlyph(object):
... def draw(self, pen):
... pen.moveTo((0, 0))
... pen.curveTo((1, 1), (2, 2), (3, 3))
... pen.closePath()
>>> class CompositeGlyph(object):
... def draw(self, pen):
... pen.addComponent('a', (1, 0, 0, 1, -1, 1))
>>> glyphSet = {'a': SimpleGlyph(), 'b': CompositeGlyph()}
>>> for name, glyph in sorted(glyphSet.items()):
... pen = DecomposingRecordingPen(glyphSet)
... glyph.draw(pen)
... print("{}: {}".format(name, pen.value))
a: [('moveTo', ((0, 0),)), ('curveTo', ((1, 1), (2, 2), (3, 3))), ('closePath', ())]
b: [('moveTo', ((-1, 1),)), ('curveTo', ((0, 2), (1, 3), (2, 4))), ('closePath', ())]
"""
# raises KeyError if base glyph is not found in glyphSet
skipMissingComponents = False
>>> class SimpleGlyph(object):
... def draw(self, pen):
... pen.moveTo((0, 0))
... pen.curveTo((1, 1), (2, 2), (3, 3))
... pen.closePath()
>>> class CompositeGlyph(object):
... def draw(self, pen):
... pen.addComponent('a', (1, 0, 0, 1, -1, 1))
>>> glyphSet = {'a': SimpleGlyph(), 'b': CompositeGlyph()}
>>> for name, glyph in sorted(glyphSet.items()):
... pen = DecomposingRecordingPen(glyphSet)
... glyph.draw(pen)
... print("{}: {}".format(name, pen.value))
a: [('moveTo', ((0, 0),)), ('curveTo', ((1, 1), (2, 2), (3, 3))), ('closePath', ())]
b: [('moveTo', ((-1, 1),)), ('curveTo', ((0, 2), (1, 3), (2, 4))), ('closePath', ())]
"""
# raises KeyError if base glyph is not found in glyphSet
skipMissingComponents = False
class RecordingPointPen(AbstractPointPen):
"""PointPen recording operations that can be accessed or replayed.
"""PointPen recording operations that can be accessed or replayed.
The recording can be accessed as pen.value; or replayed using
pointPen.replay(otherPointPen).
The recording can be accessed as pen.value; or replayed using
pointPen.replay(otherPointPen).
:Example:
:Example:
from defcon import Font
from fontTools.pens.recordingPen import RecordingPointPen
from defcon import Font
from fontTools.pens.recordingPen import RecordingPointPen
glyph_name = 'a'
font_path = 'MyFont.ufo'
glyph_name = 'a'
font_path = 'MyFont.ufo'
font = Font(font_path)
glyph = font[glyph_name]
font = Font(font_path)
glyph = font[glyph_name]
pen = RecordingPointPen()
glyph.drawPoints(pen)
print(pen.value)
pen = RecordingPointPen()
glyph.drawPoints(pen)
print(pen.value)
new_glyph = font.newGlyph('b')
pen.replay(new_glyph.getPointPen())
"""
new_glyph = font.newGlyph('b')
pen.replay(new_glyph.getPointPen())
"""
def __init__(self):
self.value = []
def __init__(self):
self.value = []
def beginPath(self, identifier=None, **kwargs):
if identifier is not None:
kwargs["identifier"] = identifier
self.value.append(("beginPath", (), kwargs))
def beginPath(self, identifier=None, **kwargs):
if identifier is not None:
kwargs["identifier"] = identifier
self.value.append(("beginPath", (), kwargs))
def endPath(self):
self.value.append(("endPath", (), {}))
def endPath(self):
self.value.append(("endPath", (), {}))
def addPoint(self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs):
if identifier is not None:
kwargs["identifier"] = identifier
self.value.append(("addPoint", (pt, segmentType, smooth, name), kwargs))
def addPoint(
self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs
):
if identifier is not None:
kwargs["identifier"] = identifier
self.value.append(("addPoint", (pt, segmentType, smooth, name), kwargs))
def addComponent(self, baseGlyphName, transformation, identifier=None, **kwargs):
if identifier is not None:
kwargs["identifier"] = identifier
self.value.append(("addComponent", (baseGlyphName, transformation), kwargs))
def addComponent(self, baseGlyphName, transformation, identifier=None, **kwargs):
if identifier is not None:
kwargs["identifier"] = identifier
self.value.append(("addComponent", (baseGlyphName, transformation), kwargs))
def replay(self, pointPen):
for operator, args, kwargs in self.value:
getattr(pointPen, operator)(*args, **kwargs)
def replay(self, pointPen):
for operator, args, kwargs in self.value:
getattr(pointPen, operator)(*args, **kwargs)
if __name__ == "__main__":
pen = RecordingPen()
pen.moveTo((0, 0))
pen.lineTo((0, 100))
pen.curveTo((50, 75), (60, 50), (50, 25))
pen.closePath()
from pprint import pprint
pprint(pen.value)
pen = RecordingPen()
pen.moveTo((0, 0))
pen.lineTo((0, 100))
pen.curveTo((50, 75), (60, 50), (50, 25))
pen.closePath()
from pprint import pprint
pprint(pen.value)

View File

@ -7,67 +7,74 @@ __all__ = ["ReportLabPen"]
class ReportLabPen(BasePen):
"""A pen for drawing onto a ``reportlab.graphics.shapes.Path`` object."""
"""A pen for drawing onto a ``reportlab.graphics.shapes.Path`` object."""
def __init__(self, glyphSet, path=None):
BasePen.__init__(self, glyphSet)
if path is None:
path = Path()
self.path = path
def __init__(self, glyphSet, path=None):
BasePen.__init__(self, glyphSet)
if path is None:
path = Path()
self.path = path
def _moveTo(self, p):
(x,y) = p
self.path.moveTo(x,y)
def _moveTo(self, p):
(x, y) = p
self.path.moveTo(x, y)
def _lineTo(self, p):
(x,y) = p
self.path.lineTo(x,y)
def _lineTo(self, p):
(x, y) = p
self.path.lineTo(x, y)
def _curveToOne(self, p1, p2, p3):
(x1,y1) = p1
(x2,y2) = p2
(x3,y3) = p3
self.path.curveTo(x1, y1, x2, y2, x3, y3)
def _curveToOne(self, p1, p2, p3):
(x1, y1) = p1
(x2, y2) = p2
(x3, y3) = p3
self.path.curveTo(x1, y1, x2, y2, x3, y3)
def _closePath(self):
self.path.closePath()
def _closePath(self):
self.path.closePath()
if __name__=="__main__":
import sys
if len(sys.argv) < 3:
print("Usage: reportLabPen.py <OTF/TTF font> <glyphname> [<image file to create>]")
print(" If no image file name is created, by default <glyphname>.png is created.")
print(" example: reportLabPen.py Arial.TTF R test.png")
print(" (The file format will be PNG, regardless of the image file name supplied)")
sys.exit(0)
if __name__ == "__main__":
import sys
from fontTools.ttLib import TTFont
from reportlab.lib import colors
if len(sys.argv) < 3:
print(
"Usage: reportLabPen.py <OTF/TTF font> <glyphname> [<image file to create>]"
)
print(
" If no image file name is created, by default <glyphname>.png is created."
)
print(" example: reportLabPen.py Arial.TTF R test.png")
print(
" (The file format will be PNG, regardless of the image file name supplied)"
)
sys.exit(0)
path = sys.argv[1]
glyphName = sys.argv[2]
if (len(sys.argv) > 3):
imageFile = sys.argv[3]
else:
imageFile = "%s.png" % glyphName
from fontTools.ttLib import TTFont
from reportlab.lib import colors
font = TTFont(path) # it would work just as well with fontTools.t1Lib.T1Font
gs = font.getGlyphSet()
pen = ReportLabPen(gs, Path(fillColor=colors.red, strokeWidth=5))
g = gs[glyphName]
g.draw(pen)
path = sys.argv[1]
glyphName = sys.argv[2]
if len(sys.argv) > 3:
imageFile = sys.argv[3]
else:
imageFile = "%s.png" % glyphName
w, h = g.width, 1000
from reportlab.graphics import renderPM
from reportlab.graphics.shapes import Group, Drawing, scale
font = TTFont(path) # it would work just as well with fontTools.t1Lib.T1Font
gs = font.getGlyphSet()
pen = ReportLabPen(gs, Path(fillColor=colors.red, strokeWidth=5))
g = gs[glyphName]
g.draw(pen)
# Everything is wrapped in a group to allow transformations.
g = Group(pen.path)
g.translate(0, 200)
g.scale(0.3, 0.3)
w, h = g.width, 1000
from reportlab.graphics import renderPM
from reportlab.graphics.shapes import Group, Drawing, scale
d = Drawing(w, h)
d.add(g)
# Everything is wrapped in a group to allow transformations.
g = Group(pen.path)
g.translate(0, 200)
g.scale(0.3, 0.3)
renderPM.drawToFile(d, imageFile, fmt="PNG")
d = Drawing(w, h)
d.add(g)
renderPM.drawToFile(d, imageFile, fmt="PNG")

View File

@ -23,7 +23,7 @@ class ReverseContourPen(ContourFilterPen):
def reversedContour(contour, outputImpliedClosingLine=False):
""" Generator that takes a list of pen's (operator, operands) tuples,
"""Generator that takes a list of pen's (operator, operands) tuples,
and yields them with the winding direction reversed.
"""
if not contour:
@ -40,16 +40,14 @@ def reversedContour(contour, outputImpliedClosingLine=False):
firstType, firstPts = contour.pop(0)
assert firstType in ("moveTo", "qCurveTo"), (
"invalid initial segment type: %r" % firstType)
"invalid initial segment type: %r" % firstType
)
firstOnCurve = firstPts[-1]
if firstType == "qCurveTo":
# special case for TrueType paths contaning only off-curve points
assert firstOnCurve is None, (
"off-curve only paths must end with 'None'")
assert not contour, (
"only one qCurveTo allowed per off-curve path")
firstPts = ((firstPts[0],) + tuple(reversed(firstPts[1:-1])) +
(None,))
assert firstOnCurve is None, "off-curve only paths must end with 'None'"
assert not contour, "only one qCurveTo allowed per off-curve path"
firstPts = (firstPts[0],) + tuple(reversed(firstPts[1:-1])) + (None,)
if not contour:
# contour contains only one segment, nothing to reverse
@ -67,8 +65,7 @@ def reversedContour(contour, outputImpliedClosingLine=False):
if outputImpliedClosingLine or firstOnCurve != lastOnCurve:
# emit an implied line between the last and first points
yield "lineTo", (lastOnCurve,)
contour[-1] = (lastType,
tuple(lastPts[:-1]) + (firstOnCurve,))
contour[-1] = (lastType, tuple(lastPts[:-1]) + (firstOnCurve,))
if len(contour) > 1:
secondType, secondPts = contour[0]
@ -84,8 +81,7 @@ def reversedContour(contour, outputImpliedClosingLine=False):
if secondType == "lineTo" and firstPts != secondPts:
del contour[0]
if contour:
contour[-1] = (lastType,
tuple(lastPts[:-1]) + secondPts)
contour[-1] = (lastType, tuple(lastPts[:-1]) + secondPts)
else:
# for open paths, the last point will become the first
yield firstType, (lastOnCurve,)
@ -94,8 +90,7 @@ def reversedContour(contour, outputImpliedClosingLine=False):
# we iterate over all segment pairs in reverse order, and yield
# each one with the off-curve points reversed (if any), and
# with the on-curve point of the following segment
for (curType, curPts), (_, nextPts) in pairwise(
contour, reverse=True):
for (curType, curPts), (_, nextPts) in pairwise(contour, reverse=True):
yield curType, tuple(reversed(curPts[:-1])) + (nextPts[-1],)
yield "closePath" if closed else "endPath", ()

View File

@ -8,95 +8,115 @@ __all__ = ["StatisticsPen"]
class StatisticsPen(MomentsPen):
"""Pen calculating area, center of mass, variance and
standard-deviation, covariance and correlation, and slant,
of glyph shapes.
"""Pen calculating area, center of mass, variance and
standard-deviation, covariance and correlation, and slant,
of glyph shapes.
Note that all the calculated values are 'signed'. Ie. if the
glyph shape is self-intersecting, the values are not correct
(but well-defined). As such, area will be negative if contour
directions are clockwise. Moreover, variance might be negative
if the shapes are self-intersecting in certain ways."""
Note that all the calculated values are 'signed'. Ie. if the
glyph shape is self-intersecting, the values are not correct
(but well-defined). As such, area will be negative if contour
directions are clockwise. Moreover, variance might be negative
if the shapes are self-intersecting in certain ways."""
def __init__(self, glyphset=None):
MomentsPen.__init__(self, glyphset=glyphset)
self.__zero()
def __init__(self, glyphset=None):
MomentsPen.__init__(self, glyphset=glyphset)
self.__zero()
def _closePath(self):
MomentsPen._closePath(self)
self.__update()
def _closePath(self):
MomentsPen._closePath(self)
self.__update()
def __zero(self):
self.meanX = 0
self.meanY = 0
self.varianceX = 0
self.varianceY = 0
self.stddevX = 0
self.stddevY = 0
self.covariance = 0
self.correlation = 0
self.slant = 0
def __zero(self):
self.meanX = 0
self.meanY = 0
self.varianceX = 0
self.varianceY = 0
self.stddevX = 0
self.stddevY = 0
self.covariance = 0
self.correlation = 0
self.slant = 0
def __update(self):
def __update(self):
area = self.area
if not area:
self.__zero()
return
area = self.area
if not area:
self.__zero()
return
# Center of mass
# https://en.wikipedia.org/wiki/Center_of_mass#A_continuous_volume
self.meanX = meanX = self.momentX / area
self.meanY = meanY = self.momentY / area
# Center of mass
# https://en.wikipedia.org/wiki/Center_of_mass#A_continuous_volume
self.meanX = meanX = self.momentX / area
self.meanY = meanY = self.momentY / area
# Var(X) = E[X^2] - E[X]^2
self.varianceX = varianceX = self.momentXX / area - meanX**2
self.varianceY = varianceY = self.momentYY / area - meanY**2
# Var(X) = E[X^2] - E[X]^2
self.varianceX = varianceX = self.momentXX / area - meanX**2
self.varianceY = varianceY = self.momentYY / area - meanY**2
self.stddevX = stddevX = math.copysign(abs(varianceX)**.5, varianceX)
self.stddevY = stddevY = math.copysign(abs(varianceY)**.5, varianceY)
self.stddevX = stddevX = math.copysign(abs(varianceX) ** 0.5, varianceX)
self.stddevY = stddevY = math.copysign(abs(varianceY) ** 0.5, varianceY)
# Covariance(X,Y) = ( E[X.Y] - E[X]E[Y] )
self.covariance = covariance = self.momentXY / area - meanX*meanY
# Covariance(X,Y) = ( E[X.Y] - E[X]E[Y] )
self.covariance = covariance = self.momentXY / area - meanX * meanY
# Correlation(X,Y) = Covariance(X,Y) / ( stddev(X) * stddev(Y) )
# https://en.wikipedia.org/wiki/Pearson_product-moment_correlation_coefficient
if stddevX * stddevY == 0:
correlation = float("NaN")
else:
correlation = covariance / (stddevX * stddevY)
self.correlation = correlation if abs(correlation) > 1e-3 else 0
# Correlation(X,Y) = Covariance(X,Y) / ( stddev(X) * stddev(Y) )
# https://en.wikipedia.org/wiki/Pearson_product-moment_correlation_coefficient
if stddevX * stddevY == 0:
correlation = float("NaN")
else:
correlation = covariance / (stddevX * stddevY)
self.correlation = correlation if abs(correlation) > 1e-3 else 0
slant = covariance / varianceY if varianceY != 0 else float("NaN")
self.slant = slant if abs(slant) > 1e-3 else 0
slant = covariance / varianceY if varianceY != 0 else float("NaN")
self.slant = slant if abs(slant) > 1e-3 else 0
def _test(glyphset, upem, glyphs):
from fontTools.pens.transformPen import TransformPen
from fontTools.misc.transform import Scale
from fontTools.pens.transformPen import TransformPen
from fontTools.misc.transform import Scale
print('upem', upem)
print("upem", upem)
for glyph_name in glyphs:
print()
print("glyph:", glyph_name)
glyph = glyphset[glyph_name]
pen = StatisticsPen(glyphset=glyphset)
transformer = TransformPen(pen, Scale(1.0 / upem))
glyph.draw(transformer)
for item in [
"area",
"momentX",
"momentY",
"momentXX",
"momentYY",
"momentXY",
"meanX",
"meanY",
"varianceX",
"varianceY",
"stddevX",
"stddevY",
"covariance",
"correlation",
"slant",
]:
print("%s: %g" % (item, getattr(pen, item)))
for glyph_name in glyphs:
print()
print("glyph:", glyph_name)
glyph = glyphset[glyph_name]
pen = StatisticsPen(glyphset=glyphset)
transformer = TransformPen(pen, Scale(1./upem))
glyph.draw(transformer)
for item in ['area', 'momentX', 'momentY', 'momentXX', 'momentYY', 'momentXY', 'meanX', 'meanY', 'varianceX', 'varianceY', 'stddevX', 'stddevY', 'covariance', 'correlation', 'slant']:
print ("%s: %g" % (item, getattr(pen, item)))
def main(args):
if not args:
return
filename, glyphs = args[0], args[1:]
from fontTools.ttLib import TTFont
font = TTFont(filename)
if not glyphs:
glyphs = font.getGlyphOrder()
_test(font.getGlyphSet(), font['head'].unitsPerEm, glyphs)
if not args:
return
filename, glyphs = args[0], args[1:]
from fontTools.ttLib import TTFont
if __name__ == '__main__':
import sys
main(sys.argv[1:])
font = TTFont(filename)
if not glyphs:
glyphs = font.getGlyphOrder()
_test(font.getGlyphSet(), font["head"].unitsPerEm, glyphs)
if __name__ == "__main__":
import sys
main(sys.argv[1:])

View File

@ -7,7 +7,7 @@ def pointToString(pt, ntos=str):
class SVGPathPen(BasePen):
""" Pen to draw SVG path d commands.
"""Pen to draw SVG path d commands.
Example::
>>> pen = SVGPathPen(None)
@ -36,6 +36,7 @@ class SVGPathPen(BasePen):
glyphset[glyphname].draw(pen)
print(tpen.getCommands())
"""
def __init__(self, glyphSet, ntos: Callable[[float], str] = str):
BasePen.__init__(self, glyphSet)
self._commands = []
@ -209,22 +210,25 @@ def main(args=None):
if args is None:
import sys
args = sys.argv[1:]
from fontTools.ttLib import TTFont
import argparse
parser = argparse.ArgumentParser(
"fonttools pens.svgPathPen", description="Generate SVG from text")
"fonttools pens.svgPathPen", description="Generate SVG from text"
)
parser.add_argument("font", metavar="font.ttf", help="Font file.")
parser.add_argument("text", metavar="text", help="Text string.")
parser.add_argument(
"font", metavar="font.ttf", help="Font file.")
parser.add_argument(
"text", metavar="text", help="Text string.")
parser.add_argument(
"--variations", metavar="AXIS=LOC", default='',
"--variations",
metavar="AXIS=LOC",
default="",
help="List of space separated locations. A location consist in "
"the name of a variation axis, followed by '=' and a number. E.g.: "
"wght=700 wdth=80. The default is the location of the base master.")
"wght=700 wdth=80. The default is the location of the base master.",
)
options = parser.parse_args(args)
@ -233,18 +237,18 @@ def main(args=None):
location = {}
for tag_v in options.variations.split():
fields = tag_v.split('=')
fields = tag_v.split("=")
tag = fields[0].strip()
v = int(fields[1])
location[tag] = v
hhea = font['hhea']
hhea = font["hhea"]
ascent, descent = hhea.ascent, hhea.descent
glyphset = font.getGlyphSet(location=location)
cmap = font['cmap'].getBestCmap()
cmap = font["cmap"].getBestCmap()
s = ''
s = ""
width = 0
for u in text:
g = cmap[ord(u)]
@ -254,20 +258,29 @@ def main(args=None):
glyph.draw(pen)
commands = pen.getCommands()
s += '<g transform="translate(%d %d) scale(1 -1)"><path d="%s"/></g>\n' % (width, ascent, commands)
s += '<g transform="translate(%d %d) scale(1 -1)"><path d="%s"/></g>\n' % (
width,
ascent,
commands,
)
width += glyph.width
print('<?xml version="1.0" encoding="UTF-8"?>')
print('<svg width="%d" height="%d" xmlns="http://www.w3.org/2000/svg">' % (width, ascent-descent))
print(s, end='')
print('</svg>')
print(
'<svg width="%d" height="%d" xmlns="http://www.w3.org/2000/svg">'
% (width, ascent - descent)
)
print(s, end="")
print("</svg>")
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
import doctest
sys.exit(doctest.testmod().failed)
sys.exit(main())

View File

@ -24,22 +24,22 @@ class T2CharStringPen(BasePen):
self._CFF2 = CFF2
self._width = width
self._commands = []
self._p0 = (0,0)
self._p0 = (0, 0)
def _p(self, pt):
p0 = self._p0
pt = self._p0 = (self.round(pt[0]), self.round(pt[1]))
return [pt[0]-p0[0], pt[1]-p0[1]]
return [pt[0] - p0[0], pt[1] - p0[1]]
def _moveTo(self, pt):
self._commands.append(('rmoveto', self._p(pt)))
self._commands.append(("rmoveto", self._p(pt)))
def _lineTo(self, pt):
self._commands.append(('rlineto', self._p(pt)))
self._commands.append(("rlineto", self._p(pt)))
def _curveToOne(self, pt1, pt2, pt3):
_p = self._p
self._commands.append(('rrcurveto', _p(pt1)+_p(pt2)+_p(pt3)))
self._commands.append(("rrcurveto", _p(pt1) + _p(pt2) + _p(pt3)))
def _closePath(self):
pass
@ -51,15 +51,18 @@ class T2CharStringPen(BasePen):
commands = self._commands
if optimize:
maxstack = 48 if not self._CFF2 else 513
commands = specializeCommands(commands,
generalizeFirst=False,
maxstack=maxstack)
commands = specializeCommands(
commands, generalizeFirst=False, maxstack=maxstack
)
program = commandsToProgram(commands)
if self._width is not None:
assert not self._CFF2, "CFF2 does not allow encoding glyph width in CharString."
assert (
not self._CFF2
), "CFF2 does not allow encoding glyph width in CharString."
program.insert(0, otRound(self._width))
if not self._CFF2:
program.append('endchar')
program.append("endchar")
charString = T2CharString(
program=program, private=private, globalSubrs=globalSubrs)
program=program, private=private, globalSubrs=globalSubrs
)
return charString

View File

@ -6,41 +6,49 @@ __all__ = ["TeePen"]
class TeePen(AbstractPen):
"""Pen multiplexing drawing to one or more pens.
"""Pen multiplexing drawing to one or more pens.
Use either as TeePen(pen1, pen2, ...) or TeePen(iterableOfPens)."""
Use either as TeePen(pen1, pen2, ...) or TeePen(iterableOfPens)."""
def __init__(self, *pens):
if len(pens) == 1:
pens = pens[0]
self.pens = pens
def moveTo(self, p0):
for pen in self.pens:
pen.moveTo(p0)
def lineTo(self, p1):
for pen in self.pens:
pen.lineTo(p1)
def qCurveTo(self, *points):
for pen in self.pens:
pen.qCurveTo(*points)
def curveTo(self, *points):
for pen in self.pens:
pen.curveTo(*points)
def closePath(self):
for pen in self.pens:
pen.closePath()
def endPath(self):
for pen in self.pens:
pen.endPath()
def addComponent(self, glyphName, transformation):
for pen in self.pens:
pen.addComponent(glyphName, transformation)
def __init__(self, *pens):
if len(pens) == 1:
pens = pens[0]
self.pens = pens
def moveTo(self, p0):
for pen in self.pens:
pen.moveTo(p0)
def lineTo(self, p1):
for pen in self.pens:
pen.lineTo(p1)
def qCurveTo(self, *points):
for pen in self.pens:
pen.qCurveTo(*points)
def curveTo(self, *points):
for pen in self.pens:
pen.curveTo(*points)
def closePath(self):
for pen in self.pens:
pen.closePath()
def endPath(self):
for pen in self.pens:
pen.endPath()
def addComponent(self, glyphName, transformation):
for pen in self.pens:
pen.addComponent(glyphName, transformation)
if __name__ == "__main__":
from fontTools.pens.basePen import _TestPen
pen = TeePen(_TestPen(), _TestPen())
pen.moveTo((0, 0))
pen.lineTo((0, 100))
pen.curveTo((50, 75), (60, 50), (50, 25))
pen.closePath()
from fontTools.pens.basePen import _TestPen
pen = TeePen(_TestPen(), _TestPen())
pen.moveTo((0, 0))
pen.lineTo((0, 100))
pen.curveTo((50, 75), (60, 50), (50, 25))
pen.closePath()

View File

@ -6,103 +6,106 @@ __all__ = ["TransformPen", "TransformPointPen"]
class TransformPen(FilterPen):
"""Pen that transforms all coordinates using a Affine transformation,
and passes them to another pen.
"""
"""Pen that transforms all coordinates using a Affine transformation,
and passes them to another pen.
"""
def __init__(self, outPen, transformation):
"""The 'outPen' argument is another pen object. It will receive the
transformed coordinates. The 'transformation' argument can either
be a six-tuple, or a fontTools.misc.transform.Transform object.
"""
super(TransformPen, self).__init__(outPen)
if not hasattr(transformation, "transformPoint"):
from fontTools.misc.transform import Transform
transformation = Transform(*transformation)
self._transformation = transformation
self._transformPoint = transformation.transformPoint
self._stack = []
def __init__(self, outPen, transformation):
"""The 'outPen' argument is another pen object. It will receive the
transformed coordinates. The 'transformation' argument can either
be a six-tuple, or a fontTools.misc.transform.Transform object.
"""
super(TransformPen, self).__init__(outPen)
if not hasattr(transformation, "transformPoint"):
from fontTools.misc.transform import Transform
def moveTo(self, pt):
self._outPen.moveTo(self._transformPoint(pt))
transformation = Transform(*transformation)
self._transformation = transformation
self._transformPoint = transformation.transformPoint
self._stack = []
def lineTo(self, pt):
self._outPen.lineTo(self._transformPoint(pt))
def moveTo(self, pt):
self._outPen.moveTo(self._transformPoint(pt))
def curveTo(self, *points):
self._outPen.curveTo(*self._transformPoints(points))
def lineTo(self, pt):
self._outPen.lineTo(self._transformPoint(pt))
def qCurveTo(self, *points):
if points[-1] is None:
points = self._transformPoints(points[:-1]) + [None]
else:
points = self._transformPoints(points)
self._outPen.qCurveTo(*points)
def curveTo(self, *points):
self._outPen.curveTo(*self._transformPoints(points))
def _transformPoints(self, points):
transformPoint = self._transformPoint
return [transformPoint(pt) for pt in points]
def qCurveTo(self, *points):
if points[-1] is None:
points = self._transformPoints(points[:-1]) + [None]
else:
points = self._transformPoints(points)
self._outPen.qCurveTo(*points)
def closePath(self):
self._outPen.closePath()
def _transformPoints(self, points):
transformPoint = self._transformPoint
return [transformPoint(pt) for pt in points]
def endPath(self):
self._outPen.endPath()
def closePath(self):
self._outPen.closePath()
def addComponent(self, glyphName, transformation):
transformation = self._transformation.transform(transformation)
self._outPen.addComponent(glyphName, transformation)
def endPath(self):
self._outPen.endPath()
def addComponent(self, glyphName, transformation):
transformation = self._transformation.transform(transformation)
self._outPen.addComponent(glyphName, transformation)
class TransformPointPen(FilterPointPen):
"""PointPen that transforms all coordinates using a Affine transformation,
and passes them to another PointPen.
"""PointPen that transforms all coordinates using a Affine transformation,
and passes them to another PointPen.
>>> from fontTools.pens.recordingPen import RecordingPointPen
>>> rec = RecordingPointPen()
>>> pen = TransformPointPen(rec, (2, 0, 0, 2, -10, 5))
>>> v = iter(rec.value)
>>> pen.beginPath(identifier="contour-0")
>>> next(v)
('beginPath', (), {'identifier': 'contour-0'})
>>> pen.addPoint((100, 100), "line")
>>> next(v)
('addPoint', ((190, 205), 'line', False, None), {})
>>> pen.endPath()
>>> next(v)
('endPath', (), {})
>>> pen.addComponent("a", (1, 0, 0, 1, -10, 5), identifier="component-0")
>>> next(v)
('addComponent', ('a', <Transform [2 0 0 2 -30 15]>), {'identifier': 'component-0'})
"""
>>> from fontTools.pens.recordingPen import RecordingPointPen
>>> rec = RecordingPointPen()
>>> pen = TransformPointPen(rec, (2, 0, 0, 2, -10, 5))
>>> v = iter(rec.value)
>>> pen.beginPath(identifier="contour-0")
>>> next(v)
('beginPath', (), {'identifier': 'contour-0'})
>>> pen.addPoint((100, 100), "line")
>>> next(v)
('addPoint', ((190, 205), 'line', False, None), {})
>>> pen.endPath()
>>> next(v)
('endPath', (), {})
>>> pen.addComponent("a", (1, 0, 0, 1, -10, 5), identifier="component-0")
>>> next(v)
('addComponent', ('a', <Transform [2 0 0 2 -30 15]>), {'identifier': 'component-0'})
"""
def __init__(self, outPointPen, transformation):
"""The 'outPointPen' argument is another point pen object.
It will receive the transformed coordinates.
The 'transformation' argument can either be a six-tuple, or a
fontTools.misc.transform.Transform object.
"""
super().__init__(outPointPen)
if not hasattr(transformation, "transformPoint"):
from fontTools.misc.transform import Transform
transformation = Transform(*transformation)
self._transformation = transformation
self._transformPoint = transformation.transformPoint
def __init__(self, outPointPen, transformation):
"""The 'outPointPen' argument is another point pen object.
It will receive the transformed coordinates.
The 'transformation' argument can either be a six-tuple, or a
fontTools.misc.transform.Transform object.
"""
super().__init__(outPointPen)
if not hasattr(transformation, "transformPoint"):
from fontTools.misc.transform import Transform
def addPoint(self, pt, segmentType=None, smooth=False, name=None, **kwargs):
self._outPen.addPoint(
self._transformPoint(pt), segmentType, smooth, name, **kwargs
)
transformation = Transform(*transformation)
self._transformation = transformation
self._transformPoint = transformation.transformPoint
def addComponent(self, baseGlyphName, transformation, **kwargs):
transformation = self._transformation.transform(transformation)
self._outPen.addComponent(baseGlyphName, transformation, **kwargs)
def addPoint(self, pt, segmentType=None, smooth=False, name=None, **kwargs):
self._outPen.addPoint(
self._transformPoint(pt), segmentType, smooth, name, **kwargs
)
def addComponent(self, baseGlyphName, transformation, **kwargs):
transformation = self._transformation.transform(transformation)
self._outPen.addComponent(baseGlyphName, transformation, **kwargs)
if __name__ == "__main__":
from fontTools.pens.basePen import _TestPen
pen = TransformPen(_TestPen(None), (2, 0, 0.5, 2, -10, 0))
pen.moveTo((0, 0))
pen.lineTo((0, 100))
pen.curveTo((50, 75), (60, 50), (50, 25), (0, 0))
pen.closePath()
from fontTools.pens.basePen import _TestPen
pen = TransformPen(_TestPen(None), (2, 0, 0.5, 2, -10, 0))
pen.moveTo((0, 0))
pen.lineTo((0, 100))
pen.curveTo((50, 75), (60, 50), (50, 25), (0, 0))
pen.closePath()

View File

@ -5,25 +5,25 @@ __all__ = ["WxPen"]
class WxPen(BasePen):
def __init__(self, glyphSet, path=None):
BasePen.__init__(self, glyphSet)
if path is None:
import wx
def __init__(self, glyphSet, path=None):
BasePen.__init__(self, glyphSet)
if path is None:
import wx
path = wx.GraphicsRenderer.GetDefaultRenderer().CreatePath()
self.path = path
path = wx.GraphicsRenderer.GetDefaultRenderer().CreatePath()
self.path = path
def _moveTo(self, p):
self.path.MoveToPoint(*p)
def _moveTo(self, p):
self.path.MoveToPoint(*p)
def _lineTo(self, p):
self.path.AddLineToPoint(*p)
def _lineTo(self, p):
self.path.AddLineToPoint(*p)
def _curveToOne(self, p1, p2, p3):
self.path.AddCurveToPoint(*p1+p2+p3)
def _curveToOne(self, p1, p2, p3):
self.path.AddCurveToPoint(*p1 + p2 + p3)
def _qCurveToOne(self, p1, p2):
self.path.AddQuadCurveToPoint(*p1+p2)
def _qCurveToOne(self, p1, p2):
self.path.AddQuadCurveToPoint(*p1 + p2)
def _closePath(self):
self.path.CloseSubpath()
def _closePath(self):
self.path.CloseSubpath()

File diff suppressed because it is too large Load Diff

View File

@ -2,5 +2,5 @@ import sys
from fontTools.subset import main
if __name__ == '__main__':
if __name__ == "__main__":
sys.exit(main())

View File

@ -7,496 +7,530 @@ from fontTools.subset.util import _add_method, _uniq_sort
class _ClosureGlyphsT2Decompiler(psCharStrings.SimpleT2Decompiler):
def __init__(self, components, localSubrs, globalSubrs):
psCharStrings.SimpleT2Decompiler.__init__(self, localSubrs, globalSubrs)
self.components = components
def __init__(self, components, localSubrs, globalSubrs):
psCharStrings.SimpleT2Decompiler.__init__(self,
localSubrs,
globalSubrs)
self.components = components
def op_endchar(self, index):
args = self.popall()
if len(args) >= 4:
from fontTools.encodings.StandardEncoding import StandardEncoding
def op_endchar(self, index):
args = self.popall()
if len(args) >= 4:
from fontTools.encodings.StandardEncoding import StandardEncoding
# endchar can do seac accent bulding; The T2 spec says it's deprecated,
# but recent software that shall remain nameless does output it.
adx, ady, bchar, achar = args[-4:]
baseGlyph = StandardEncoding[bchar]
accentGlyph = StandardEncoding[achar]
self.components.add(baseGlyph)
self.components.add(accentGlyph)
# endchar can do seac accent bulding; The T2 spec says it's deprecated,
# but recent software that shall remain nameless does output it.
adx, ady, bchar, achar = args[-4:]
baseGlyph = StandardEncoding[bchar]
accentGlyph = StandardEncoding[achar]
self.components.add(baseGlyph)
self.components.add(accentGlyph)
@_add_method(ttLib.getTableClass('CFF '))
@_add_method(ttLib.getTableClass("CFF "))
def closure_glyphs(self, s):
cff = self.cff
assert len(cff) == 1
font = cff[cff.keys()[0]]
glyphSet = font.CharStrings
cff = self.cff
assert len(cff) == 1
font = cff[cff.keys()[0]]
glyphSet = font.CharStrings
decompose = s.glyphs
while decompose:
components = set()
for g in decompose:
if g not in glyphSet:
continue
gl = glyphSet[g]
decompose = s.glyphs
while decompose:
components = set()
for g in decompose:
if g not in glyphSet:
continue
gl = glyphSet[g]
subrs = getattr(gl.private, "Subrs", [])
decompiler = _ClosureGlyphsT2Decompiler(components, subrs, gl.globalSubrs)
decompiler.execute(gl)
components -= s.glyphs
s.glyphs.update(components)
decompose = components
subrs = getattr(gl.private, "Subrs", [])
decompiler = _ClosureGlyphsT2Decompiler(components, subrs, gl.globalSubrs)
decompiler.execute(gl)
components -= s.glyphs
s.glyphs.update(components)
decompose = components
def _empty_charstring(font, glyphName, isCFF2, ignoreWidth=False):
c, fdSelectIndex = font.CharStrings.getItemAndSelector(glyphName)
if isCFF2 or ignoreWidth:
# CFF2 charstrings have no widths nor 'endchar' operators
c.setProgram([] if isCFF2 else ['endchar'])
else:
if hasattr(font, 'FDArray') and font.FDArray is not None:
private = font.FDArray[fdSelectIndex].Private
else:
private = font.Private
dfltWdX = private.defaultWidthX
nmnlWdX = private.nominalWidthX
pen = NullPen()
c.draw(pen) # this will set the charstring's width
if c.width != dfltWdX:
c.program = [c.width - nmnlWdX, 'endchar']
else:
c.program = ['endchar']
c, fdSelectIndex = font.CharStrings.getItemAndSelector(glyphName)
if isCFF2 or ignoreWidth:
# CFF2 charstrings have no widths nor 'endchar' operators
c.setProgram([] if isCFF2 else ["endchar"])
else:
if hasattr(font, "FDArray") and font.FDArray is not None:
private = font.FDArray[fdSelectIndex].Private
else:
private = font.Private
dfltWdX = private.defaultWidthX
nmnlWdX = private.nominalWidthX
pen = NullPen()
c.draw(pen) # this will set the charstring's width
if c.width != dfltWdX:
c.program = [c.width - nmnlWdX, "endchar"]
else:
c.program = ["endchar"]
@_add_method(ttLib.getTableClass('CFF '))
@_add_method(ttLib.getTableClass("CFF "))
def prune_pre_subset(self, font, options):
cff = self.cff
# CFF table must have one font only
cff.fontNames = cff.fontNames[:1]
cff = self.cff
# CFF table must have one font only
cff.fontNames = cff.fontNames[:1]
if options.notdef_glyph and not options.notdef_outline:
isCFF2 = cff.major > 1
for fontname in cff.keys():
font = cff[fontname]
_empty_charstring(font, ".notdef", isCFF2=isCFF2)
if options.notdef_glyph and not options.notdef_outline:
isCFF2 = cff.major > 1
for fontname in cff.keys():
font = cff[fontname]
_empty_charstring(font, ".notdef", isCFF2=isCFF2)
# Clear useless Encoding
for fontname in cff.keys():
font = cff[fontname]
# https://github.com/fonttools/fonttools/issues/620
font.Encoding = "StandardEncoding"
# Clear useless Encoding
for fontname in cff.keys():
font = cff[fontname]
# https://github.com/fonttools/fonttools/issues/620
font.Encoding = "StandardEncoding"
return True # bool(cff.fontNames)
return True # bool(cff.fontNames)
@_add_method(ttLib.getTableClass('CFF '))
@_add_method(ttLib.getTableClass("CFF "))
def subset_glyphs(self, s):
cff = self.cff
for fontname in cff.keys():
font = cff[fontname]
cs = font.CharStrings
cff = self.cff
for fontname in cff.keys():
font = cff[fontname]
cs = font.CharStrings
glyphs = s.glyphs.union(s.glyphs_emptied)
glyphs = s.glyphs.union(s.glyphs_emptied)
# Load all glyphs
for g in font.charset:
if g not in glyphs: continue
c, _ = cs.getItemAndSelector(g)
# Load all glyphs
for g in font.charset:
if g not in glyphs:
continue
c, _ = cs.getItemAndSelector(g)
if cs.charStringsAreIndexed:
indices = [i for i,g in enumerate(font.charset) if g in glyphs]
csi = cs.charStringsIndex
csi.items = [csi.items[i] for i in indices]
del csi.file, csi.offsets
if hasattr(font, "FDSelect"):
sel = font.FDSelect
sel.format = None
sel.gidArray = [sel.gidArray[i] for i in indices]
newCharStrings = {}
for indicesIdx, charsetIdx in enumerate(indices):
g = font.charset[charsetIdx]
if g in cs.charStrings:
newCharStrings[g] = indicesIdx
cs.charStrings = newCharStrings
else:
cs.charStrings = {g:v
for g,v in cs.charStrings.items()
if g in glyphs}
font.charset = [g for g in font.charset if g in glyphs]
font.numGlyphs = len(font.charset)
if cs.charStringsAreIndexed:
indices = [i for i, g in enumerate(font.charset) if g in glyphs]
csi = cs.charStringsIndex
csi.items = [csi.items[i] for i in indices]
del csi.file, csi.offsets
if hasattr(font, "FDSelect"):
sel = font.FDSelect
sel.format = None
sel.gidArray = [sel.gidArray[i] for i in indices]
newCharStrings = {}
for indicesIdx, charsetIdx in enumerate(indices):
g = font.charset[charsetIdx]
if g in cs.charStrings:
newCharStrings[g] = indicesIdx
cs.charStrings = newCharStrings
else:
cs.charStrings = {g: v for g, v in cs.charStrings.items() if g in glyphs}
font.charset = [g for g in font.charset if g in glyphs]
font.numGlyphs = len(font.charset)
if s.options.retain_gids:
isCFF2 = cff.major > 1
for g in s.glyphs_emptied:
_empty_charstring(font, g, isCFF2=isCFF2, ignoreWidth=True)
if s.options.retain_gids:
isCFF2 = cff.major > 1
for g in s.glyphs_emptied:
_empty_charstring(font, g, isCFF2=isCFF2, ignoreWidth=True)
return True # any(cff[fontname].numGlyphs for fontname in cff.keys())
return True # any(cff[fontname].numGlyphs for fontname in cff.keys())
@_add_method(psCharStrings.T2CharString)
def subset_subroutines(self, subrs, gsubrs):
p = self.program
for i in range(1, len(p)):
if p[i] == 'callsubr':
assert isinstance(p[i-1], int)
p[i-1] = subrs._used.index(p[i-1] + subrs._old_bias) - subrs._new_bias
elif p[i] == 'callgsubr':
assert isinstance(p[i-1], int)
p[i-1] = gsubrs._used.index(p[i-1] + gsubrs._old_bias) - gsubrs._new_bias
p = self.program
for i in range(1, len(p)):
if p[i] == "callsubr":
assert isinstance(p[i - 1], int)
p[i - 1] = subrs._used.index(p[i - 1] + subrs._old_bias) - subrs._new_bias
elif p[i] == "callgsubr":
assert isinstance(p[i - 1], int)
p[i - 1] = (
gsubrs._used.index(p[i - 1] + gsubrs._old_bias) - gsubrs._new_bias
)
@_add_method(psCharStrings.T2CharString)
def drop_hints(self):
hints = self._hints
hints = self._hints
if hints.deletions:
p = self.program
for idx in reversed(hints.deletions):
del p[idx-2:idx]
if hints.deletions:
p = self.program
for idx in reversed(hints.deletions):
del p[idx - 2 : idx]
if hints.has_hint:
assert not hints.deletions or hints.last_hint <= hints.deletions[0]
self.program = self.program[hints.last_hint:]
if not self.program:
# TODO CFF2 no need for endchar.
self.program.append('endchar')
if hasattr(self, 'width'):
# Insert width back if needed
if self.width != self.private.defaultWidthX:
# For CFF2 charstrings, this should never happen
assert self.private.defaultWidthX is not None, "CFF2 CharStrings must not have an initial width value"
self.program.insert(0, self.width - self.private.nominalWidthX)
if hints.has_hint:
assert not hints.deletions or hints.last_hint <= hints.deletions[0]
self.program = self.program[hints.last_hint :]
if not self.program:
# TODO CFF2 no need for endchar.
self.program.append("endchar")
if hasattr(self, "width"):
# Insert width back if needed
if self.width != self.private.defaultWidthX:
# For CFF2 charstrings, this should never happen
assert (
self.private.defaultWidthX is not None
), "CFF2 CharStrings must not have an initial width value"
self.program.insert(0, self.width - self.private.nominalWidthX)
if hints.has_hintmask:
i = 0
p = self.program
while i < len(p):
if p[i] in ['hintmask', 'cntrmask']:
assert i + 1 <= len(p)
del p[i:i+2]
continue
i += 1
if hints.has_hintmask:
i = 0
p = self.program
while i < len(p):
if p[i] in ["hintmask", "cntrmask"]:
assert i + 1 <= len(p)
del p[i : i + 2]
continue
i += 1
assert len(self.program)
assert len(self.program)
del self._hints
del self._hints
class _MarkingT2Decompiler(psCharStrings.SimpleT2Decompiler):
def __init__(self, localSubrs, globalSubrs, private):
psCharStrings.SimpleT2Decompiler.__init__(
self, localSubrs, globalSubrs, private
)
for subrs in [localSubrs, globalSubrs]:
if subrs and not hasattr(subrs, "_used"):
subrs._used = set()
def __init__(self, localSubrs, globalSubrs, private):
psCharStrings.SimpleT2Decompiler.__init__(self,
localSubrs,
globalSubrs,
private)
for subrs in [localSubrs, globalSubrs]:
if subrs and not hasattr(subrs, "_used"):
subrs._used = set()
def op_callsubr(self, index):
self.localSubrs._used.add(self.operandStack[-1] + self.localBias)
psCharStrings.SimpleT2Decompiler.op_callsubr(self, index)
def op_callsubr(self, index):
self.localSubrs._used.add(self.operandStack[-1]+self.localBias)
psCharStrings.SimpleT2Decompiler.op_callsubr(self, index)
def op_callgsubr(self, index):
self.globalSubrs._used.add(self.operandStack[-1] + self.globalBias)
psCharStrings.SimpleT2Decompiler.op_callgsubr(self, index)
def op_callgsubr(self, index):
self.globalSubrs._used.add(self.operandStack[-1]+self.globalBias)
psCharStrings.SimpleT2Decompiler.op_callgsubr(self, index)
class _DehintingT2Decompiler(psCharStrings.T2WidthExtractor):
class Hints(object):
def __init__(self):
# Whether calling this charstring produces any hint stems
# Note that if a charstring starts with hintmask, it will
# have has_hint set to True, because it *might* produce an
# implicit vstem if called under certain conditions.
self.has_hint = False
# Index to start at to drop all hints
self.last_hint = 0
# Index up to which we know more hints are possible.
# Only relevant if status is 0 or 1.
self.last_checked = 0
# The status means:
# 0: after dropping hints, this charstring is empty
# 1: after dropping hints, there may be more hints
# continuing after this, or there might be
# other things. Not clear yet.
# 2: no more hints possible after this charstring
self.status = 0
# Has hintmask instructions; not recursive
self.has_hintmask = False
# List of indices of calls to empty subroutines to remove.
self.deletions = []
class Hints(object):
def __init__(self):
# Whether calling this charstring produces any hint stems
# Note that if a charstring starts with hintmask, it will
# have has_hint set to True, because it *might* produce an
# implicit vstem if called under certain conditions.
self.has_hint = False
# Index to start at to drop all hints
self.last_hint = 0
# Index up to which we know more hints are possible.
# Only relevant if status is 0 or 1.
self.last_checked = 0
# The status means:
# 0: after dropping hints, this charstring is empty
# 1: after dropping hints, there may be more hints
# continuing after this, or there might be
# other things. Not clear yet.
# 2: no more hints possible after this charstring
self.status = 0
# Has hintmask instructions; not recursive
self.has_hintmask = False
# List of indices of calls to empty subroutines to remove.
self.deletions = []
pass
pass
def __init__(self, css, localSubrs, globalSubrs, nominalWidthX, defaultWidthX, private=None):
self._css = css
psCharStrings.T2WidthExtractor.__init__(
self, localSubrs, globalSubrs, nominalWidthX, defaultWidthX)
self.private = private
def __init__(
self, css, localSubrs, globalSubrs, nominalWidthX, defaultWidthX, private=None
):
self._css = css
psCharStrings.T2WidthExtractor.__init__(
self, localSubrs, globalSubrs, nominalWidthX, defaultWidthX
)
self.private = private
def execute(self, charString):
old_hints = charString._hints if hasattr(charString, '_hints') else None
charString._hints = self.Hints()
def execute(self, charString):
old_hints = charString._hints if hasattr(charString, "_hints") else None
charString._hints = self.Hints()
psCharStrings.T2WidthExtractor.execute(self, charString)
psCharStrings.T2WidthExtractor.execute(self, charString)
hints = charString._hints
hints = charString._hints
if hints.has_hint or hints.has_hintmask:
self._css.add(charString)
if hints.has_hint or hints.has_hintmask:
self._css.add(charString)
if hints.status != 2:
# Check from last_check, make sure we didn't have any operators.
for i in range(hints.last_checked, len(charString.program) - 1):
if isinstance(charString.program[i], str):
hints.status = 2
break
else:
hints.status = 1 # There's *something* here
hints.last_checked = len(charString.program)
if hints.status != 2:
# Check from last_check, make sure we didn't have any operators.
for i in range(hints.last_checked, len(charString.program) - 1):
if isinstance(charString.program[i], str):
hints.status = 2
break
else:
hints.status = 1 # There's *something* here
hints.last_checked = len(charString.program)
if old_hints:
assert hints.__dict__ == old_hints.__dict__
if old_hints:
assert hints.__dict__ == old_hints.__dict__
def op_callsubr(self, index):
subr = self.localSubrs[self.operandStack[-1]+self.localBias]
psCharStrings.T2WidthExtractor.op_callsubr(self, index)
self.processSubr(index, subr)
def op_callsubr(self, index):
subr = self.localSubrs[self.operandStack[-1] + self.localBias]
psCharStrings.T2WidthExtractor.op_callsubr(self, index)
self.processSubr(index, subr)
def op_callgsubr(self, index):
subr = self.globalSubrs[self.operandStack[-1]+self.globalBias]
psCharStrings.T2WidthExtractor.op_callgsubr(self, index)
self.processSubr(index, subr)
def op_callgsubr(self, index):
subr = self.globalSubrs[self.operandStack[-1] + self.globalBias]
psCharStrings.T2WidthExtractor.op_callgsubr(self, index)
self.processSubr(index, subr)
def op_hstem(self, index):
psCharStrings.T2WidthExtractor.op_hstem(self, index)
self.processHint(index)
def op_vstem(self, index):
psCharStrings.T2WidthExtractor.op_vstem(self, index)
self.processHint(index)
def op_hstemhm(self, index):
psCharStrings.T2WidthExtractor.op_hstemhm(self, index)
self.processHint(index)
def op_vstemhm(self, index):
psCharStrings.T2WidthExtractor.op_vstemhm(self, index)
self.processHint(index)
def op_hintmask(self, index):
rv = psCharStrings.T2WidthExtractor.op_hintmask(self, index)
self.processHintmask(index)
return rv
def op_cntrmask(self, index):
rv = psCharStrings.T2WidthExtractor.op_cntrmask(self, index)
self.processHintmask(index)
return rv
def op_hstem(self, index):
psCharStrings.T2WidthExtractor.op_hstem(self, index)
self.processHint(index)
def processHintmask(self, index):
cs = self.callingStack[-1]
hints = cs._hints
hints.has_hintmask = True
if hints.status != 2:
# Check from last_check, see if we may be an implicit vstem
for i in range(hints.last_checked, index - 1):
if isinstance(cs.program[i], str):
hints.status = 2
break
else:
# We are an implicit vstem
hints.has_hint = True
hints.last_hint = index + 1
hints.status = 0
hints.last_checked = index + 1
def op_vstem(self, index):
psCharStrings.T2WidthExtractor.op_vstem(self, index)
self.processHint(index)
def processHint(self, index):
cs = self.callingStack[-1]
hints = cs._hints
hints.has_hint = True
hints.last_hint = index
hints.last_checked = index
def op_hstemhm(self, index):
psCharStrings.T2WidthExtractor.op_hstemhm(self, index)
self.processHint(index)
def processSubr(self, index, subr):
cs = self.callingStack[-1]
hints = cs._hints
subr_hints = subr._hints
def op_vstemhm(self, index):
psCharStrings.T2WidthExtractor.op_vstemhm(self, index)
self.processHint(index)
# Check from last_check, make sure we didn't have
# any operators.
if hints.status != 2:
for i in range(hints.last_checked, index - 1):
if isinstance(cs.program[i], str):
hints.status = 2
break
hints.last_checked = index
def op_hintmask(self, index):
rv = psCharStrings.T2WidthExtractor.op_hintmask(self, index)
self.processHintmask(index)
return rv
if hints.status != 2:
if subr_hints.has_hint:
hints.has_hint = True
def op_cntrmask(self, index):
rv = psCharStrings.T2WidthExtractor.op_cntrmask(self, index)
self.processHintmask(index)
return rv
# Decide where to chop off from
if subr_hints.status == 0:
hints.last_hint = index
else:
hints.last_hint = index - 2 # Leave the subr call in
def processHintmask(self, index):
cs = self.callingStack[-1]
hints = cs._hints
hints.has_hintmask = True
if hints.status != 2:
# Check from last_check, see if we may be an implicit vstem
for i in range(hints.last_checked, index - 1):
if isinstance(cs.program[i], str):
hints.status = 2
break
else:
# We are an implicit vstem
hints.has_hint = True
hints.last_hint = index + 1
hints.status = 0
hints.last_checked = index + 1
elif subr_hints.status == 0:
hints.deletions.append(index)
def processHint(self, index):
cs = self.callingStack[-1]
hints = cs._hints
hints.has_hint = True
hints.last_hint = index
hints.last_checked = index
hints.status = max(hints.status, subr_hints.status)
def processSubr(self, index, subr):
cs = self.callingStack[-1]
hints = cs._hints
subr_hints = subr._hints
# Check from last_check, make sure we didn't have
# any operators.
if hints.status != 2:
for i in range(hints.last_checked, index - 1):
if isinstance(cs.program[i], str):
hints.status = 2
break
hints.last_checked = index
if hints.status != 2:
if subr_hints.has_hint:
hints.has_hint = True
# Decide where to chop off from
if subr_hints.status == 0:
hints.last_hint = index
else:
hints.last_hint = index - 2 # Leave the subr call in
elif subr_hints.status == 0:
hints.deletions.append(index)
hints.status = max(hints.status, subr_hints.status)
@_add_method(ttLib.getTableClass('CFF '))
@_add_method(ttLib.getTableClass("CFF "))
def prune_post_subset(self, ttfFont, options):
cff = self.cff
for fontname in cff.keys():
font = cff[fontname]
cs = font.CharStrings
cff = self.cff
for fontname in cff.keys():
font = cff[fontname]
cs = font.CharStrings
# Drop unused FontDictionaries
if hasattr(font, "FDSelect"):
sel = font.FDSelect
indices = _uniq_sort(sel.gidArray)
sel.gidArray = [indices.index (ss) for ss in sel.gidArray]
arr = font.FDArray
arr.items = [arr[i] for i in indices]
del arr.file, arr.offsets
# Drop unused FontDictionaries
if hasattr(font, "FDSelect"):
sel = font.FDSelect
indices = _uniq_sort(sel.gidArray)
sel.gidArray = [indices.index(ss) for ss in sel.gidArray]
arr = font.FDArray
arr.items = [arr[i] for i in indices]
del arr.file, arr.offsets
# Desubroutinize if asked for
if options.desubroutinize:
cff.desubroutinize()
# Desubroutinize if asked for
if options.desubroutinize:
cff.desubroutinize()
# Drop hints if not needed
if not options.hinting:
self.remove_hints()
elif not options.desubroutinize:
self.remove_unused_subroutines()
return True
# Drop hints if not needed
if not options.hinting:
self.remove_hints()
elif not options.desubroutinize:
self.remove_unused_subroutines()
return True
def _delete_empty_subrs(private_dict):
if hasattr(private_dict, 'Subrs') and not private_dict.Subrs:
if 'Subrs' in private_dict.rawDict:
del private_dict.rawDict['Subrs']
del private_dict.Subrs
if hasattr(private_dict, "Subrs") and not private_dict.Subrs:
if "Subrs" in private_dict.rawDict:
del private_dict.rawDict["Subrs"]
del private_dict.Subrs
@deprecateFunction("use 'CFFFontSet.desubroutinize()' instead", category=DeprecationWarning)
@_add_method(ttLib.getTableClass('CFF '))
@deprecateFunction(
"use 'CFFFontSet.desubroutinize()' instead", category=DeprecationWarning
)
@_add_method(ttLib.getTableClass("CFF "))
def desubroutinize(self):
self.cff.desubroutinize()
self.cff.desubroutinize()
@_add_method(ttLib.getTableClass('CFF '))
@_add_method(ttLib.getTableClass("CFF "))
def remove_hints(self):
cff = self.cff
for fontname in cff.keys():
font = cff[fontname]
cs = font.CharStrings
# This can be tricky, but doesn't have to. What we do is:
#
# - Run all used glyph charstrings and recurse into subroutines,
# - For each charstring (including subroutines), if it has any
# of the hint stem operators, we mark it as such.
# Upon returning, for each charstring we note all the
# subroutine calls it makes that (recursively) contain a stem,
# - Dropping hinting then consists of the following two ops:
# * Drop the piece of the program in each charstring before the
# last call to a stem op or a stem-calling subroutine,
# * Drop all hintmask operations.
# - It's trickier... A hintmask right after hints and a few numbers
# will act as an implicit vstemhm. As such, we track whether
# we have seen any non-hint operators so far and do the right
# thing, recursively... Good luck understanding that :(
css = set()
for g in font.charset:
c, _ = cs.getItemAndSelector(g)
c.decompile()
subrs = getattr(c.private, "Subrs", [])
decompiler = _DehintingT2Decompiler(css, subrs, c.globalSubrs,
c.private.nominalWidthX,
c.private.defaultWidthX,
c.private)
decompiler.execute(c)
c.width = decompiler.width
for charstring in css:
charstring.drop_hints()
del css
cff = self.cff
for fontname in cff.keys():
font = cff[fontname]
cs = font.CharStrings
# This can be tricky, but doesn't have to. What we do is:
#
# - Run all used glyph charstrings and recurse into subroutines,
# - For each charstring (including subroutines), if it has any
# of the hint stem operators, we mark it as such.
# Upon returning, for each charstring we note all the
# subroutine calls it makes that (recursively) contain a stem,
# - Dropping hinting then consists of the following two ops:
# * Drop the piece of the program in each charstring before the
# last call to a stem op or a stem-calling subroutine,
# * Drop all hintmask operations.
# - It's trickier... A hintmask right after hints and a few numbers
# will act as an implicit vstemhm. As such, we track whether
# we have seen any non-hint operators so far and do the right
# thing, recursively... Good luck understanding that :(
css = set()
for g in font.charset:
c, _ = cs.getItemAndSelector(g)
c.decompile()
subrs = getattr(c.private, "Subrs", [])
decompiler = _DehintingT2Decompiler(
css,
subrs,
c.globalSubrs,
c.private.nominalWidthX,
c.private.defaultWidthX,
c.private,
)
decompiler.execute(c)
c.width = decompiler.width
for charstring in css:
charstring.drop_hints()
del css
# Drop font-wide hinting values
all_privs = []
if hasattr(font, 'FDArray'):
all_privs.extend(fd.Private for fd in font.FDArray)
else:
all_privs.append(font.Private)
for priv in all_privs:
for k in ['BlueValues', 'OtherBlues',
'FamilyBlues', 'FamilyOtherBlues',
'BlueScale', 'BlueShift', 'BlueFuzz',
'StemSnapH', 'StemSnapV', 'StdHW', 'StdVW',
'ForceBold', 'LanguageGroup', 'ExpansionFactor']:
if hasattr(priv, k):
setattr(priv, k, None)
self.remove_unused_subroutines()
# Drop font-wide hinting values
all_privs = []
if hasattr(font, "FDArray"):
all_privs.extend(fd.Private for fd in font.FDArray)
else:
all_privs.append(font.Private)
for priv in all_privs:
for k in [
"BlueValues",
"OtherBlues",
"FamilyBlues",
"FamilyOtherBlues",
"BlueScale",
"BlueShift",
"BlueFuzz",
"StemSnapH",
"StemSnapV",
"StdHW",
"StdVW",
"ForceBold",
"LanguageGroup",
"ExpansionFactor",
]:
if hasattr(priv, k):
setattr(priv, k, None)
self.remove_unused_subroutines()
@_add_method(ttLib.getTableClass('CFF '))
@_add_method(ttLib.getTableClass("CFF "))
def remove_unused_subroutines(self):
cff = self.cff
for fontname in cff.keys():
font = cff[fontname]
cs = font.CharStrings
# Renumber subroutines to remove unused ones
cff = self.cff
for fontname in cff.keys():
font = cff[fontname]
cs = font.CharStrings
# Renumber subroutines to remove unused ones
# Mark all used subroutines
for g in font.charset:
c, _ = cs.getItemAndSelector(g)
subrs = getattr(c.private, "Subrs", [])
decompiler = _MarkingT2Decompiler(subrs, c.globalSubrs, c.private)
decompiler.execute(c)
# Mark all used subroutines
for g in font.charset:
c, _ = cs.getItemAndSelector(g)
subrs = getattr(c.private, "Subrs", [])
decompiler = _MarkingT2Decompiler(subrs, c.globalSubrs, c.private)
decompiler.execute(c)
all_subrs = [font.GlobalSubrs]
if hasattr(font, 'FDArray'):
all_subrs.extend(fd.Private.Subrs for fd in font.FDArray if hasattr(fd.Private, 'Subrs') and fd.Private.Subrs)
elif hasattr(font.Private, 'Subrs') and font.Private.Subrs:
all_subrs.append(font.Private.Subrs)
all_subrs = [font.GlobalSubrs]
if hasattr(font, "FDArray"):
all_subrs.extend(
fd.Private.Subrs
for fd in font.FDArray
if hasattr(fd.Private, "Subrs") and fd.Private.Subrs
)
elif hasattr(font.Private, "Subrs") and font.Private.Subrs:
all_subrs.append(font.Private.Subrs)
subrs = set(subrs) # Remove duplicates
subrs = set(subrs) # Remove duplicates
# Prepare
for subrs in all_subrs:
if not hasattr(subrs, '_used'):
subrs._used = set()
subrs._used = _uniq_sort(subrs._used)
subrs._old_bias = psCharStrings.calcSubrBias(subrs)
subrs._new_bias = psCharStrings.calcSubrBias(subrs._used)
# Prepare
for subrs in all_subrs:
if not hasattr(subrs, "_used"):
subrs._used = set()
subrs._used = _uniq_sort(subrs._used)
subrs._old_bias = psCharStrings.calcSubrBias(subrs)
subrs._new_bias = psCharStrings.calcSubrBias(subrs._used)
# Renumber glyph charstrings
for g in font.charset:
c, _ = cs.getItemAndSelector(g)
subrs = getattr(c.private, "Subrs", [])
c.subset_subroutines (subrs, font.GlobalSubrs)
# Renumber glyph charstrings
for g in font.charset:
c, _ = cs.getItemAndSelector(g)
subrs = getattr(c.private, "Subrs", [])
c.subset_subroutines(subrs, font.GlobalSubrs)
# Renumber subroutines themselves
for subrs in all_subrs:
if subrs == font.GlobalSubrs:
if not hasattr(font, 'FDArray') and hasattr(font.Private, 'Subrs'):
local_subrs = font.Private.Subrs
else:
local_subrs = []
else:
local_subrs = subrs
# Renumber subroutines themselves
for subrs in all_subrs:
if subrs == font.GlobalSubrs:
if not hasattr(font, "FDArray") and hasattr(font.Private, "Subrs"):
local_subrs = font.Private.Subrs
else:
local_subrs = []
else:
local_subrs = subrs
subrs.items = [subrs.items[i] for i in subrs._used]
if hasattr(subrs, 'file'):
del subrs.file
if hasattr(subrs, 'offsets'):
del subrs.offsets
subrs.items = [subrs.items[i] for i in subrs._used]
if hasattr(subrs, "file"):
del subrs.file
if hasattr(subrs, "offsets"):
del subrs.offsets
for subr in subrs.items:
subr.subset_subroutines (local_subrs, font.GlobalSubrs)
for subr in subrs.items:
subr.subset_subroutines(local_subrs, font.GlobalSubrs)
# Delete local SubrsIndex if empty
if hasattr(font, 'FDArray'):
for fd in font.FDArray:
_delete_empty_subrs(fd.Private)
else:
_delete_empty_subrs(font.Private)
# Delete local SubrsIndex if empty
if hasattr(font, "FDArray"):
for fd in font.FDArray:
_delete_empty_subrs(fd.Private)
else:
_delete_empty_subrs(font.Private)
# Cleanup
for subrs in all_subrs:
del subrs._used, subrs._old_bias, subrs._new_bias
# Cleanup
for subrs in all_subrs:
del subrs._used, subrs._old_bias, subrs._new_bias

View File

@ -9,7 +9,7 @@ __all__ = [tostr(s) for s in ("SVGPath", "parse_path")]
class SVGPath(object):
""" Parse SVG ``path`` elements from a file or string, and draw them
"""Parse SVG ``path`` elements from a file or string, and draw them
onto a glyph object that supports the FontTools Pen protocol.
For example, reading from an SVG file and drawing to a Defcon Glyph:

View File

@ -19,7 +19,6 @@ def _map_point(matrix, pt):
class EllipticalArc(object):
def __init__(self, current_point, rx, ry, rotation, large, sweep, target_point):
self.current_point = current_point
self.rx = rx

View File

@ -11,9 +11,9 @@ from .arc import EllipticalArc
import re
COMMANDS = set('MmZzLlHhVvCcSsQqTtAa')
COMMANDS = set("MmZzLlHhVvCcSsQqTtAa")
ARC_COMMANDS = set("Aa")
UPPERCASE = set('MZLHVCSQTA')
UPPERCASE = set("MZLHVCSQTA")
COMMAND_RE = re.compile("([MmZzLlHhVvCcSsQqTtAa])")
@ -93,7 +93,7 @@ def _tokenize_arc_arguments(arcdef):
def parse_path(pathdef, pen, current_pos=(0, 0), arc_class=EllipticalArc):
""" Parse SVG path definition (i.e. "d" attribute of <path> elements)
"""Parse SVG path definition (i.e. "d" attribute of <path> elements)
and call a 'pen' object's moveTo, lineTo, curveTo, qCurveTo and closePath
methods.
@ -136,11 +136,13 @@ def parse_path(pathdef, pen, current_pos=(0, 0), arc_class=EllipticalArc):
# If this element starts with numbers, it is an implicit command
# and we don't change the command. Check that it's allowed:
if command is None:
raise ValueError("Unallowed implicit command in %s, position %s" % (
pathdef, len(pathdef.split()) - len(elements)))
raise ValueError(
"Unallowed implicit command in %s, position %s"
% (pathdef, len(pathdef.split()) - len(elements))
)
last_command = command # Used by S and T
if command == 'M':
if command == "M":
# Moveto command.
x = elements.pop()
y = elements.pop()
@ -164,9 +166,9 @@ def parse_path(pathdef, pen, current_pos=(0, 0), arc_class=EllipticalArc):
# Implicit moveto commands are treated as lineto commands.
# So we set command to lineto here, in case there are
# further implicit commands after this moveto.
command = 'L'
command = "L"
elif command == 'Z':
elif command == "Z":
# Close path
if current_pos != start_pos:
pen.lineTo((start_pos.real, start_pos.imag))
@ -175,7 +177,7 @@ def parse_path(pathdef, pen, current_pos=(0, 0), arc_class=EllipticalArc):
start_pos = None
command = None # You can't have implicit commands after closing.
elif command == 'L':
elif command == "L":
x = elements.pop()
y = elements.pop()
pos = float(x) + float(y) * 1j
@ -184,7 +186,7 @@ def parse_path(pathdef, pen, current_pos=(0, 0), arc_class=EllipticalArc):
pen.lineTo((pos.real, pos.imag))
current_pos = pos
elif command == 'H':
elif command == "H":
x = elements.pop()
pos = float(x) + current_pos.imag * 1j
if not absolute:
@ -192,7 +194,7 @@ def parse_path(pathdef, pen, current_pos=(0, 0), arc_class=EllipticalArc):
pen.lineTo((pos.real, pos.imag))
current_pos = pos
elif command == 'V':
elif command == "V":
y = elements.pop()
pos = current_pos.real + float(y) * 1j
if not absolute:
@ -200,7 +202,7 @@ def parse_path(pathdef, pen, current_pos=(0, 0), arc_class=EllipticalArc):
pen.lineTo((pos.real, pos.imag))
current_pos = pos
elif command == 'C':
elif command == "C":
control1 = float(elements.pop()) + float(elements.pop()) * 1j
control2 = float(elements.pop()) + float(elements.pop()) * 1j
end = float(elements.pop()) + float(elements.pop()) * 1j
@ -210,17 +212,19 @@ def parse_path(pathdef, pen, current_pos=(0, 0), arc_class=EllipticalArc):
control2 += current_pos
end += current_pos
pen.curveTo((control1.real, control1.imag),
(control2.real, control2.imag),
(end.real, end.imag))
pen.curveTo(
(control1.real, control1.imag),
(control2.real, control2.imag),
(end.real, end.imag),
)
current_pos = end
last_control = control2
elif command == 'S':
elif command == "S":
# Smooth curve. First control point is the "reflection" of
# the second control point in the previous path.
if last_command not in 'CS':
if last_command not in "CS":
# If there is no previous command or if the previous command
# was not an C, c, S or s, assume the first control point is
# coincident with the current point.
@ -238,13 +242,15 @@ def parse_path(pathdef, pen, current_pos=(0, 0), arc_class=EllipticalArc):
control2 += current_pos
end += current_pos
pen.curveTo((control1.real, control1.imag),
(control2.real, control2.imag),
(end.real, end.imag))
pen.curveTo(
(control1.real, control1.imag),
(control2.real, control2.imag),
(end.real, end.imag),
)
current_pos = end
last_control = control2
elif command == 'Q':
elif command == "Q":
control = float(elements.pop()) + float(elements.pop()) * 1j
end = float(elements.pop()) + float(elements.pop()) * 1j
@ -256,11 +262,11 @@ def parse_path(pathdef, pen, current_pos=(0, 0), arc_class=EllipticalArc):
current_pos = end
last_control = control
elif command == 'T':
elif command == "T":
# Smooth curve. Control point is the "reflection" of
# the second control point in the previous path.
if last_command not in 'QT':
if last_command not in "QT":
# If there is no previous command or if the previous command
# was not an Q, q, T or t, assume the first control point is
# coincident with the current point.
@ -280,7 +286,7 @@ def parse_path(pathdef, pen, current_pos=(0, 0), arc_class=EllipticalArc):
current_pos = end
last_control = control
elif command == 'A':
elif command == "A":
rx = abs(float(elements.pop()))
ry = abs(float(elements.pop()))
rotation = float(elements.pop())

View File

@ -5,18 +5,18 @@ def _prefer_non_zero(*args):
for arg in args:
if arg != 0:
return arg
return 0.
return 0.0
def _ntos(n):
# %f likes to add unnecessary 0's, %g isn't consistent about # decimals
return ('%.3f' % n).rstrip('0').rstrip('.')
return ("%.3f" % n).rstrip("0").rstrip(".")
def _strip_xml_ns(tag):
# ElementTree API doesn't provide a way to ignore XML namespaces in tags
# so we here strip them ourselves: cf. https://bugs.python.org/issue18304
return tag.split('}', 1)[1] if '}' in tag else tag
return tag.split("}", 1)[1] if "}" in tag else tag
def _transform(raw_value):
@ -24,12 +24,12 @@ def _transform(raw_value):
# No other transform functions are supported at the moment.
# https://developer.mozilla.org/en-US/docs/Web/SVG/Attribute/transform
# start simple: if you aren't exactly matrix(...) then no love
match = re.match(r'matrix\((.*)\)', raw_value)
match = re.match(r"matrix\((.*)\)", raw_value)
if not match:
raise NotImplementedError
matrix = tuple(float(p) for p in re.split(r'\s+|,', match.group(1)))
matrix = tuple(float(p) for p in re.split(r"\s+|,", match.group(1)))
if len(matrix) != 6:
raise ValueError('wrong # of terms in %s' % raw_value)
raise ValueError("wrong # of terms in %s" % raw_value)
return matrix
@ -38,81 +38,83 @@ class PathBuilder(object):
self.paths = []
self.transforms = []
def _start_path(self, initial_path=''):
def _start_path(self, initial_path=""):
self.paths.append(initial_path)
self.transforms.append(None)
def _end_path(self):
self._add('z')
self._add("z")
def _add(self, path_snippet):
path = self.paths[-1]
if path:
path += ' ' + path_snippet
path += " " + path_snippet
else:
path = path_snippet
self.paths[-1] = path
def _move(self, c, x, y):
self._add('%s%s,%s' % (c, _ntos(x), _ntos(y)))
self._add("%s%s,%s" % (c, _ntos(x), _ntos(y)))
def M(self, x, y):
self._move('M', x, y)
self._move("M", x, y)
def m(self, x, y):
self._move('m', x, y)
self._move("m", x, y)
def _arc(self, c, rx, ry, x, y, large_arc):
self._add('%s%s,%s 0 %d 1 %s,%s' % (c, _ntos(rx), _ntos(ry), large_arc,
_ntos(x), _ntos(y)))
self._add(
"%s%s,%s 0 %d 1 %s,%s"
% (c, _ntos(rx), _ntos(ry), large_arc, _ntos(x), _ntos(y))
)
def A(self, rx, ry, x, y, large_arc=0):
self._arc('A', rx, ry, x, y, large_arc)
self._arc("A", rx, ry, x, y, large_arc)
def a(self, rx, ry, x, y, large_arc=0):
self._arc('a', rx, ry, x, y, large_arc)
self._arc("a", rx, ry, x, y, large_arc)
def _vhline(self, c, x):
self._add('%s%s' % (c, _ntos(x)))
self._add("%s%s" % (c, _ntos(x)))
def H(self, x):
self._vhline('H', x)
self._vhline("H", x)
def h(self, x):
self._vhline('h', x)
self._vhline("h", x)
def V(self, y):
self._vhline('V', y)
self._vhline("V", y)
def v(self, y):
self._vhline('v', y)
self._vhline("v", y)
def _line(self, c, x, y):
self._add('%s%s,%s' % (c, _ntos(x), _ntos(y)))
self._add("%s%s,%s" % (c, _ntos(x), _ntos(y)))
def L(self, x, y):
self._line('L', x, y)
self._line("L", x, y)
def l(self, x, y):
self._line('l', x, y)
self._line("l", x, y)
def _parse_line(self, line):
x1 = float(line.attrib.get('x1', 0))
y1 = float(line.attrib.get('y1', 0))
x2 = float(line.attrib.get('x2', 0))
y2 = float(line.attrib.get('y2', 0))
x1 = float(line.attrib.get("x1", 0))
y1 = float(line.attrib.get("y1", 0))
x2 = float(line.attrib.get("x2", 0))
y2 = float(line.attrib.get("y2", 0))
self._start_path()
self.M(x1, y1)
self.L(x2, y2)
def _parse_rect(self, rect):
x = float(rect.attrib.get('x', 0))
y = float(rect.attrib.get('y', 0))
w = float(rect.attrib.get('width'))
h = float(rect.attrib.get('height'))
rx = float(rect.attrib.get('rx', 0))
ry = float(rect.attrib.get('ry', 0))
x = float(rect.attrib.get("x", 0))
y = float(rect.attrib.get("y", 0))
w = float(rect.attrib.get("width"))
h = float(rect.attrib.get("height"))
rx = float(rect.attrib.get("rx", 0))
ry = float(rect.attrib.get("ry", 0))
rx = _prefer_non_zero(rx, ry)
ry = _prefer_non_zero(ry, rx)
@ -135,22 +137,22 @@ class PathBuilder(object):
self._end_path()
def _parse_path(self, path):
if 'd' in path.attrib:
self._start_path(initial_path=path.attrib['d'])
if "d" in path.attrib:
self._start_path(initial_path=path.attrib["d"])
def _parse_polygon(self, poly):
if 'points' in poly.attrib:
self._start_path('M' + poly.attrib['points'])
if "points" in poly.attrib:
self._start_path("M" + poly.attrib["points"])
self._end_path()
def _parse_polyline(self, poly):
if 'points' in poly.attrib:
self._start_path('M' + poly.attrib['points'])
if "points" in poly.attrib:
self._start_path("M" + poly.attrib["points"])
def _parse_circle(self, circle):
cx = float(circle.attrib.get('cx', 0))
cy = float(circle.attrib.get('cy', 0))
r = float(circle.attrib.get('r'))
cx = float(circle.attrib.get("cx", 0))
cy = float(circle.attrib.get("cy", 0))
r = float(circle.attrib.get("r"))
# arc doesn't seem to like being a complete shape, draw two halves
self._start_path()
@ -159,10 +161,10 @@ class PathBuilder(object):
self.A(r, r, cx - r, cy, large_arc=1)
def _parse_ellipse(self, ellipse):
cx = float(ellipse.attrib.get('cx', 0))
cy = float(ellipse.attrib.get('cy', 0))
rx = float(ellipse.attrib.get('rx'))
ry = float(ellipse.attrib.get('ry'))
cx = float(ellipse.attrib.get("cx", 0))
cy = float(ellipse.attrib.get("cy", 0))
rx = float(ellipse.attrib.get("rx"))
ry = float(ellipse.attrib.get("ry"))
# arc doesn't seem to like being a complete shape, draw two halves
self._start_path()
@ -172,10 +174,10 @@ class PathBuilder(object):
def add_path_from_element(self, el):
tag = _strip_xml_ns(el.tag)
parse_fn = getattr(self, '_parse_%s' % tag.lower(), None)
parse_fn = getattr(self, "_parse_%s" % tag.lower(), None)
if not callable(parse_fn):
return False
parse_fn(el)
if 'transform' in el.attrib:
self.transforms[-1] = _transform(el.attrib['transform'])
if "transform" in el.attrib:
self.transforms[-1] = _transform(el.attrib["transform"])
return True

File diff suppressed because it is too large Load Diff

View File

@ -7,53 +7,61 @@ import sys
log = logging.getLogger(__name__)
class TTLibError(Exception): pass
class TTLibFileIsCollectionError (TTLibError): pass
class TTLibError(Exception):
pass
class TTLibFileIsCollectionError(TTLibError):
pass
@deprecateFunction("use logging instead", category=DeprecationWarning)
def debugmsg(msg):
import time
print(msg + time.strftime(" (%H:%M:%S)", time.localtime(time.time())))
import time
print(msg + time.strftime(" (%H:%M:%S)", time.localtime(time.time())))
from fontTools.ttLib.ttFont import *
from fontTools.ttLib.ttCollection import TTCollection
def main(args=None):
"""Open/save fonts with TTFont() or TTCollection()
"""Open/save fonts with TTFont() or TTCollection()
./fonttools ttLib [-oFILE] [-yNUMBER] files...
./fonttools ttLib [-oFILE] [-yNUMBER] files...
If multiple files are given on the command-line,
they are each opened (as a font or collection),
and added to the font list.
If multiple files are given on the command-line,
they are each opened (as a font or collection),
and added to the font list.
If -o (output-file) argument is given, the font
list is then saved to the output file, either as
a single font, if there is only one font, or as
a collection otherwise.
If -o (output-file) argument is given, the font
list is then saved to the output file, either as
a single font, if there is only one font, or as
a collection otherwise.
If -y (font-number) argument is given, only the
specified font from collections is opened.
If -y (font-number) argument is given, only the
specified font from collections is opened.
The above allow extracting a single font from a
collection, or combining multiple fonts into a
collection.
The above allow extracting a single font from a
collection, or combining multiple fonts into a
collection.
If --lazy or --no-lazy are give, those are passed
to the TTFont() or TTCollection() constructors.
"""
from fontTools import configLogger
If --lazy or --no-lazy are give, those are passed
to the TTFont() or TTCollection() constructors.
"""
from fontTools import configLogger
if args is None:
args = sys.argv[1:]
if args is None:
args = sys.argv[1:]
import argparse
import argparse
parser = argparse.ArgumentParser(
"fonttools ttLib",
description="Open/save fonts with TTFont() or TTCollection()",
epilog="""
parser = argparse.ArgumentParser(
"fonttools ttLib",
description="Open/save fonts with TTFont() or TTCollection()",
epilog="""
If multiple files are given on the command-line,
they are each opened (as a font or collection),
and added to the font list.
@ -62,43 +70,44 @@ def main(args=None):
allows for extracting a single font from a
collection, or combining multiple fonts into a
collection.
"""
)
parser.add_argument("font", metavar="font", nargs="*", help="Font file.")
parser.add_argument(
"-o", "--output", metavar="FILE", default=None, help="Output file."
)
parser.add_argument(
"-y", metavar="NUMBER", default=-1, help="Font number to load from collections."
)
parser.add_argument(
"--lazy", action="store_true", default=None, help="Load fonts lazily."
)
parser.add_argument(
"--no-lazy", dest="lazy", action="store_false", help="Load fonts immediately."
)
options = parser.parse_args(args)
""",
)
parser.add_argument("font", metavar="font", nargs="*", help="Font file.")
parser.add_argument(
"-o", "--output", metavar="FILE", default=None, help="Output file."
)
parser.add_argument(
"-y", metavar="NUMBER", default=-1, help="Font number to load from collections."
)
parser.add_argument(
"--lazy", action="store_true", default=None, help="Load fonts lazily."
)
parser.add_argument(
"--no-lazy", dest="lazy", action="store_false", help="Load fonts immediately."
)
options = parser.parse_args(args)
fontNumber = int(options.y) if options.y is not None else None
outFile = options.output
lazy = options.lazy
fontNumber = int(options.y) if options.y is not None else None
outFile = options.output
lazy = options.lazy
fonts = []
for f in options.font:
try:
font = TTFont(f, fontNumber=fontNumber, lazy=lazy)
fonts.append(font)
except TTLibFileIsCollectionError:
collection = TTCollection(f, lazy=lazy)
fonts.extend(collection.fonts)
fonts = []
for f in options.font:
try:
font = TTFont(f, fontNumber=fontNumber, lazy=lazy)
fonts.append(font)
except TTLibFileIsCollectionError:
collection = TTCollection(f, lazy=lazy)
fonts.extend(collection.fonts)
if outFile is not None:
if len(fonts) == 1:
fonts[0].save(outFile)
else:
collection = TTCollection()
collection.fonts = fonts
collection.save(outFile)
if outFile is not None:
if len(fonts) == 1:
fonts[0].save(outFile)
else:
collection = TTCollection()
collection.fonts = fonts
collection.save(outFile)
if __name__ == "__main__":
sys.exit(main())
sys.exit(main())

View File

@ -4,49 +4,51 @@ from fontTools.misc.macRes import ResourceReader, ResourceError
def getSFNTResIndices(path):
"""Determine whether a file has a 'sfnt' resource fork or not."""
try:
reader = ResourceReader(path)
indices = reader.getIndices('sfnt')
reader.close()
return indices
except ResourceError:
return []
"""Determine whether a file has a 'sfnt' resource fork or not."""
try:
reader = ResourceReader(path)
indices = reader.getIndices("sfnt")
reader.close()
return indices
except ResourceError:
return []
def openTTFonts(path):
"""Given a pathname, return a list of TTFont objects. In the case
of a flat TTF/OTF file, the list will contain just one font object;
but in the case of a Mac font suitcase it will contain as many
font objects as there are sfnt resources in the file.
"""
from fontTools import ttLib
fonts = []
sfnts = getSFNTResIndices(path)
if not sfnts:
fonts.append(ttLib.TTFont(path))
else:
for index in sfnts:
fonts.append(ttLib.TTFont(path, index))
if not fonts:
raise ttLib.TTLibError("no fonts found in file '%s'" % path)
return fonts
"""Given a pathname, return a list of TTFont objects. In the case
of a flat TTF/OTF file, the list will contain just one font object;
but in the case of a Mac font suitcase it will contain as many
font objects as there are sfnt resources in the file.
"""
from fontTools import ttLib
fonts = []
sfnts = getSFNTResIndices(path)
if not sfnts:
fonts.append(ttLib.TTFont(path))
else:
for index in sfnts:
fonts.append(ttLib.TTFont(path, index))
if not fonts:
raise ttLib.TTLibError("no fonts found in file '%s'" % path)
return fonts
class SFNTResourceReader(BytesIO):
"""Simple read-only file wrapper for 'sfnt' resources."""
"""Simple read-only file wrapper for 'sfnt' resources."""
def __init__(self, path, res_name_or_index):
from fontTools import ttLib
reader = ResourceReader(path)
if isinstance(res_name_or_index, str):
rsrc = reader.getNamedResource('sfnt', res_name_or_index)
else:
rsrc = reader.getIndResource('sfnt', res_name_or_index)
if rsrc is None:
raise ttLib.TTLibError("sfnt resource not found: %s" % res_name_or_index)
reader.close()
self.rsrc = rsrc
super(SFNTResourceReader, self).__init__(rsrc.data)
self.name = path
def __init__(self, path, res_name_or_index):
from fontTools import ttLib
reader = ResourceReader(path)
if isinstance(res_name_or_index, str):
rsrc = reader.getNamedResource("sfnt", res_name_or_index)
else:
rsrc = reader.getIndResource("sfnt", res_name_or_index)
if rsrc is None:
raise ttLib.TTLibError("sfnt resource not found: %s" % res_name_or_index)
reader.close()
self.rsrc = rsrc
super(SFNTResourceReader, self).__init__(rsrc.data)
self.name = path

File diff suppressed because it is too large Load Diff

View File

@ -10,262 +10,262 @@
#
standardGlyphOrder = [
".notdef", # 0
".null", # 1
"nonmarkingreturn", # 2
"space", # 3
"exclam", # 4
"quotedbl", # 5
"numbersign", # 6
"dollar", # 7
"percent", # 8
"ampersand", # 9
"quotesingle", # 10
"parenleft", # 11
"parenright", # 12
"asterisk", # 13
"plus", # 14
"comma", # 15
"hyphen", # 16
"period", # 17
"slash", # 18
"zero", # 19
"one", # 20
"two", # 21
"three", # 22
"four", # 23
"five", # 24
"six", # 25
"seven", # 26
"eight", # 27
"nine", # 28
"colon", # 29
"semicolon", # 30
"less", # 31
"equal", # 32
"greater", # 33
"question", # 34
"at", # 35
"A", # 36
"B", # 37
"C", # 38
"D", # 39
"E", # 40
"F", # 41
"G", # 42
"H", # 43
"I", # 44
"J", # 45
"K", # 46
"L", # 47
"M", # 48
"N", # 49
"O", # 50
"P", # 51
"Q", # 52
"R", # 53
"S", # 54
"T", # 55
"U", # 56
"V", # 57
"W", # 58
"X", # 59
"Y", # 60
"Z", # 61
"bracketleft", # 62
"backslash", # 63
"bracketright", # 64
"asciicircum", # 65
"underscore", # 66
"grave", # 67
"a", # 68
"b", # 69
"c", # 70
"d", # 71
"e", # 72
"f", # 73
"g", # 74
"h", # 75
"i", # 76
"j", # 77
"k", # 78
"l", # 79
"m", # 80
"n", # 81
"o", # 82
"p", # 83
"q", # 84
"r", # 85
"s", # 86
"t", # 87
"u", # 88
"v", # 89
"w", # 90
"x", # 91
"y", # 92
"z", # 93
"braceleft", # 94
"bar", # 95
"braceright", # 96
"asciitilde", # 97
"Adieresis", # 98
"Aring", # 99
"Ccedilla", # 100
"Eacute", # 101
"Ntilde", # 102
"Odieresis", # 103
"Udieresis", # 104
"aacute", # 105
"agrave", # 106
"acircumflex", # 107
"adieresis", # 108
"atilde", # 109
"aring", # 110
"ccedilla", # 111
"eacute", # 112
"egrave", # 113
"ecircumflex", # 114
"edieresis", # 115
"iacute", # 116
"igrave", # 117
"icircumflex", # 118
"idieresis", # 119
"ntilde", # 120
"oacute", # 121
"ograve", # 122
"ocircumflex", # 123
"odieresis", # 124
"otilde", # 125
"uacute", # 126
"ugrave", # 127
"ucircumflex", # 128
"udieresis", # 129
"dagger", # 130
"degree", # 131
"cent", # 132
"sterling", # 133
"section", # 134
"bullet", # 135
"paragraph", # 136
"germandbls", # 137
"registered", # 138
"copyright", # 139
"trademark", # 140
"acute", # 141
"dieresis", # 142
"notequal", # 143
"AE", # 144
"Oslash", # 145
"infinity", # 146
"plusminus", # 147
"lessequal", # 148
"greaterequal", # 149
"yen", # 150
"mu", # 151
"partialdiff", # 152
"summation", # 153
"product", # 154
"pi", # 155
"integral", # 156
"ordfeminine", # 157
"ordmasculine", # 158
"Omega", # 159
"ae", # 160
"oslash", # 161
"questiondown", # 162
"exclamdown", # 163
"logicalnot", # 164
"radical", # 165
"florin", # 166
"approxequal", # 167
"Delta", # 168
"guillemotleft", # 169
"guillemotright", # 170
"ellipsis", # 171
"nonbreakingspace", # 172
"Agrave", # 173
"Atilde", # 174
"Otilde", # 175
"OE", # 176
"oe", # 177
"endash", # 178
"emdash", # 179
"quotedblleft", # 180
"quotedblright", # 181
"quoteleft", # 182
"quoteright", # 183
"divide", # 184
"lozenge", # 185
"ydieresis", # 186
"Ydieresis", # 187
"fraction", # 188
"currency", # 189
"guilsinglleft", # 190
"guilsinglright", # 191
"fi", # 192
"fl", # 193
"daggerdbl", # 194
"periodcentered", # 195
"quotesinglbase", # 196
"quotedblbase", # 197
"perthousand", # 198
"Acircumflex", # 199
"Ecircumflex", # 200
"Aacute", # 201
"Edieresis", # 202
"Egrave", # 203
"Iacute", # 204
"Icircumflex", # 205
"Idieresis", # 206
"Igrave", # 207
"Oacute", # 208
"Ocircumflex", # 209
"apple", # 210
"Ograve", # 211
"Uacute", # 212
"Ucircumflex", # 213
"Ugrave", # 214
"dotlessi", # 215
"circumflex", # 216
"tilde", # 217
"macron", # 218
"breve", # 219
"dotaccent", # 220
"ring", # 221
"cedilla", # 222
"hungarumlaut", # 223
"ogonek", # 224
"caron", # 225
"Lslash", # 226
"lslash", # 227
"Scaron", # 228
"scaron", # 229
"Zcaron", # 230
"zcaron", # 231
"brokenbar", # 232
"Eth", # 233
"eth", # 234
"Yacute", # 235
"yacute", # 236
"Thorn", # 237
"thorn", # 238
"minus", # 239
"multiply", # 240
"onesuperior", # 241
"twosuperior", # 242
"threesuperior", # 243
"onehalf", # 244
"onequarter", # 245
"threequarters", # 246
"franc", # 247
"Gbreve", # 248
"gbreve", # 249
"Idotaccent", # 250
"Scedilla", # 251
"scedilla", # 252
"Cacute", # 253
"cacute", # 254
"Ccaron", # 255
"ccaron", # 256
"dcroat" # 257
".notdef", # 0
".null", # 1
"nonmarkingreturn", # 2
"space", # 3
"exclam", # 4
"quotedbl", # 5
"numbersign", # 6
"dollar", # 7
"percent", # 8
"ampersand", # 9
"quotesingle", # 10
"parenleft", # 11
"parenright", # 12
"asterisk", # 13
"plus", # 14
"comma", # 15
"hyphen", # 16
"period", # 17
"slash", # 18
"zero", # 19
"one", # 20
"two", # 21
"three", # 22
"four", # 23
"five", # 24
"six", # 25
"seven", # 26
"eight", # 27
"nine", # 28
"colon", # 29
"semicolon", # 30
"less", # 31
"equal", # 32
"greater", # 33
"question", # 34
"at", # 35
"A", # 36
"B", # 37
"C", # 38
"D", # 39
"E", # 40
"F", # 41
"G", # 42
"H", # 43
"I", # 44
"J", # 45
"K", # 46
"L", # 47
"M", # 48
"N", # 49
"O", # 50
"P", # 51
"Q", # 52
"R", # 53
"S", # 54
"T", # 55
"U", # 56
"V", # 57
"W", # 58
"X", # 59
"Y", # 60
"Z", # 61
"bracketleft", # 62
"backslash", # 63
"bracketright", # 64
"asciicircum", # 65
"underscore", # 66
"grave", # 67
"a", # 68
"b", # 69
"c", # 70
"d", # 71
"e", # 72
"f", # 73
"g", # 74
"h", # 75
"i", # 76
"j", # 77
"k", # 78
"l", # 79
"m", # 80
"n", # 81
"o", # 82
"p", # 83
"q", # 84
"r", # 85
"s", # 86
"t", # 87
"u", # 88
"v", # 89
"w", # 90
"x", # 91
"y", # 92
"z", # 93
"braceleft", # 94
"bar", # 95
"braceright", # 96
"asciitilde", # 97
"Adieresis", # 98
"Aring", # 99
"Ccedilla", # 100
"Eacute", # 101
"Ntilde", # 102
"Odieresis", # 103
"Udieresis", # 104
"aacute", # 105
"agrave", # 106
"acircumflex", # 107
"adieresis", # 108
"atilde", # 109
"aring", # 110
"ccedilla", # 111
"eacute", # 112
"egrave", # 113
"ecircumflex", # 114
"edieresis", # 115
"iacute", # 116
"igrave", # 117
"icircumflex", # 118
"idieresis", # 119
"ntilde", # 120
"oacute", # 121
"ograve", # 122
"ocircumflex", # 123
"odieresis", # 124
"otilde", # 125
"uacute", # 126
"ugrave", # 127
"ucircumflex", # 128
"udieresis", # 129
"dagger", # 130
"degree", # 131
"cent", # 132
"sterling", # 133
"section", # 134
"bullet", # 135
"paragraph", # 136
"germandbls", # 137
"registered", # 138
"copyright", # 139
"trademark", # 140
"acute", # 141
"dieresis", # 142
"notequal", # 143
"AE", # 144
"Oslash", # 145
"infinity", # 146
"plusminus", # 147
"lessequal", # 148
"greaterequal", # 149
"yen", # 150
"mu", # 151
"partialdiff", # 152
"summation", # 153
"product", # 154
"pi", # 155
"integral", # 156
"ordfeminine", # 157
"ordmasculine", # 158
"Omega", # 159
"ae", # 160
"oslash", # 161
"questiondown", # 162
"exclamdown", # 163
"logicalnot", # 164
"radical", # 165
"florin", # 166
"approxequal", # 167
"Delta", # 168
"guillemotleft", # 169
"guillemotright", # 170
"ellipsis", # 171
"nonbreakingspace", # 172
"Agrave", # 173
"Atilde", # 174
"Otilde", # 175
"OE", # 176
"oe", # 177
"endash", # 178
"emdash", # 179
"quotedblleft", # 180
"quotedblright", # 181
"quoteleft", # 182
"quoteright", # 183
"divide", # 184
"lozenge", # 185
"ydieresis", # 186
"Ydieresis", # 187
"fraction", # 188
"currency", # 189
"guilsinglleft", # 190
"guilsinglright", # 191
"fi", # 192
"fl", # 193
"daggerdbl", # 194
"periodcentered", # 195
"quotesinglbase", # 196
"quotedblbase", # 197
"perthousand", # 198
"Acircumflex", # 199
"Ecircumflex", # 200
"Aacute", # 201
"Edieresis", # 202
"Egrave", # 203
"Iacute", # 204
"Icircumflex", # 205
"Idieresis", # 206
"Igrave", # 207
"Oacute", # 208
"Ocircumflex", # 209
"apple", # 210
"Ograve", # 211
"Uacute", # 212
"Ucircumflex", # 213
"Ugrave", # 214
"dotlessi", # 215
"circumflex", # 216
"tilde", # 217
"macron", # 218
"breve", # 219
"dotaccent", # 220
"ring", # 221
"cedilla", # 222
"hungarumlaut", # 223
"ogonek", # 224
"caron", # 225
"Lslash", # 226
"lslash", # 227
"Scaron", # 228
"scaron", # 229
"Zcaron", # 230
"zcaron", # 231
"brokenbar", # 232
"Eth", # 233
"eth", # 234
"Yacute", # 235
"yacute", # 236
"Thorn", # 237
"thorn", # 238
"minus", # 239
"multiply", # 240
"onesuperior", # 241
"twosuperior", # 242
"threesuperior", # 243
"onehalf", # 244
"onequarter", # 245
"threequarters", # 246
"franc", # 247
"Gbreve", # 248
"gbreve", # 249
"Idotaccent", # 250
"Scedilla", # 251
"scedilla", # 252
"Cacute", # 253
"cacute", # 254
"Ccaron", # 255
"ccaron", # 256
"dcroat", # 257
]

View File

@ -2,4 +2,4 @@ from .otBase import BaseTTXConverter
class table_B_A_S_E_(BaseTTXConverter):
pass
pass

View File

@ -28,32 +28,37 @@ smallGlyphMetricsFormat = """
Advance: B
"""
class BitmapGlyphMetrics(object):
def toXML(self, writer, ttFont):
writer.begintag(self.__class__.__name__)
writer.newline()
for metricName in sstruct.getformat(self.__class__.binaryFormat)[1]:
writer.simpletag(metricName, value=getattr(self, metricName))
writer.newline()
writer.endtag(self.__class__.__name__)
writer.newline()
def toXML(self, writer, ttFont):
writer.begintag(self.__class__.__name__)
writer.newline()
for metricName in sstruct.getformat(self.__class__.binaryFormat)[1]:
writer.simpletag(metricName, value=getattr(self, metricName))
writer.newline()
writer.endtag(self.__class__.__name__)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
metricNames = set(sstruct.getformat(self.__class__.binaryFormat)[1])
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
# Make sure this is a metric that is needed by GlyphMetrics.
if name in metricNames:
vars(self)[name] = safeEval(attrs['value'])
else:
log.warning("unknown name '%s' being ignored in %s.", name, self.__class__.__name__)
def fromXML(self, name, attrs, content, ttFont):
metricNames = set(sstruct.getformat(self.__class__.binaryFormat)[1])
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
# Make sure this is a metric that is needed by GlyphMetrics.
if name in metricNames:
vars(self)[name] = safeEval(attrs["value"])
else:
log.warning(
"unknown name '%s' being ignored in %s.",
name,
self.__class__.__name__,
)
class BigGlyphMetrics(BitmapGlyphMetrics):
binaryFormat = bigGlyphMetricsFormat
binaryFormat = bigGlyphMetricsFormat
class SmallGlyphMetrics(BitmapGlyphMetrics):
binaryFormat = smallGlyphMetricsFormat
binaryFormat = smallGlyphMetricsFormat

View File

@ -6,87 +6,100 @@
from fontTools.misc.textTools import bytesjoin
from fontTools.misc import sstruct
from . import E_B_D_T_
from .BitmapGlyphMetrics import BigGlyphMetrics, bigGlyphMetricsFormat, SmallGlyphMetrics, smallGlyphMetricsFormat
from .E_B_D_T_ import BitmapGlyph, BitmapPlusSmallMetricsMixin, BitmapPlusBigMetricsMixin
from .BitmapGlyphMetrics import (
BigGlyphMetrics,
bigGlyphMetricsFormat,
SmallGlyphMetrics,
smallGlyphMetricsFormat,
)
from .E_B_D_T_ import (
BitmapGlyph,
BitmapPlusSmallMetricsMixin,
BitmapPlusBigMetricsMixin,
)
import struct
class table_C_B_D_T_(E_B_D_T_.table_E_B_D_T_):
# Change the data locator table being referenced.
locatorName = 'CBLC'
# Change the data locator table being referenced.
locatorName = "CBLC"
# Modify the format class accessor for color bitmap use.
def getImageFormatClass(self, imageFormat):
try:
return E_B_D_T_.table_E_B_D_T_.getImageFormatClass(self, imageFormat)
except KeyError:
return cbdt_bitmap_classes[imageFormat]
# Modify the format class accessor for color bitmap use.
def getImageFormatClass(self, imageFormat):
try:
return E_B_D_T_.table_E_B_D_T_.getImageFormatClass(self, imageFormat)
except KeyError:
return cbdt_bitmap_classes[imageFormat]
# Helper method for removing export features not supported by color bitmaps.
# Write data in the parent class will default to raw if an option is unsupported.
def _removeUnsupportedForColor(dataFunctions):
dataFunctions = dict(dataFunctions)
del dataFunctions['row']
return dataFunctions
dataFunctions = dict(dataFunctions)
del dataFunctions["row"]
return dataFunctions
class ColorBitmapGlyph(BitmapGlyph):
fileExtension = '.png'
xmlDataFunctions = _removeUnsupportedForColor(BitmapGlyph.xmlDataFunctions)
fileExtension = ".png"
xmlDataFunctions = _removeUnsupportedForColor(BitmapGlyph.xmlDataFunctions)
class cbdt_bitmap_format_17(BitmapPlusSmallMetricsMixin, ColorBitmapGlyph):
def decompile(self):
self.metrics = SmallGlyphMetrics()
dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics)
(dataLen,) = struct.unpack(">L", data[:4])
data = data[4:]
def decompile(self):
self.metrics = SmallGlyphMetrics()
dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics)
(dataLen,) = struct.unpack(">L", data[:4])
data = data[4:]
# For the image data cut it to the size specified by dataLen.
assert dataLen <= len(data), "Data overun in format 17"
self.imageData = data[:dataLen]
# For the image data cut it to the size specified by dataLen.
assert dataLen <= len(data), "Data overun in format 17"
self.imageData = data[:dataLen]
def compile(self, ttFont):
dataList = []
dataList.append(sstruct.pack(smallGlyphMetricsFormat, self.metrics))
dataList.append(struct.pack(">L", len(self.imageData)))
dataList.append(self.imageData)
return bytesjoin(dataList)
def compile(self, ttFont):
dataList = []
dataList.append(sstruct.pack(smallGlyphMetricsFormat, self.metrics))
dataList.append(struct.pack(">L", len(self.imageData)))
dataList.append(self.imageData)
return bytesjoin(dataList)
class cbdt_bitmap_format_18(BitmapPlusBigMetricsMixin, ColorBitmapGlyph):
def decompile(self):
self.metrics = BigGlyphMetrics()
dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics)
(dataLen,) = struct.unpack(">L", data[:4])
data = data[4:]
def decompile(self):
self.metrics = BigGlyphMetrics()
dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics)
(dataLen,) = struct.unpack(">L", data[:4])
data = data[4:]
# For the image data cut it to the size specified by dataLen.
assert dataLen <= len(data), "Data overun in format 18"
self.imageData = data[:dataLen]
# For the image data cut it to the size specified by dataLen.
assert dataLen <= len(data), "Data overun in format 18"
self.imageData = data[:dataLen]
def compile(self, ttFont):
dataList = []
dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
dataList.append(struct.pack(">L", len(self.imageData)))
dataList.append(self.imageData)
return bytesjoin(dataList)
def compile(self, ttFont):
dataList = []
dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
dataList.append(struct.pack(">L", len(self.imageData)))
dataList.append(self.imageData)
return bytesjoin(dataList)
class cbdt_bitmap_format_19(ColorBitmapGlyph):
def decompile(self):
(dataLen,) = struct.unpack(">L", self.data[:4])
data = self.data[4:]
def decompile(self):
(dataLen,) = struct.unpack(">L", self.data[:4])
data = self.data[4:]
assert dataLen <= len(data), "Data overun in format 19"
self.imageData = data[:dataLen]
assert dataLen <= len(data), "Data overun in format 19"
self.imageData = data[:dataLen]
def compile(self, ttFont):
return struct.pack(">L", len(self.imageData)) + self.imageData
def compile(self, ttFont):
return struct.pack(">L", len(self.imageData)) + self.imageData
# Dict for CBDT extended formats.
cbdt_bitmap_classes = {
17: cbdt_bitmap_format_17,
18: cbdt_bitmap_format_18,
19: cbdt_bitmap_format_19,
17: cbdt_bitmap_format_17,
18: cbdt_bitmap_format_18,
19: cbdt_bitmap_format_19,
}

View File

@ -4,6 +4,7 @@
from . import E_B_L_C_
class table_C_B_L_C_(E_B_L_C_.table_E_B_L_C_):
dependencies = ['CBDT']
dependencies = ["CBDT"]

View File

@ -4,43 +4,43 @@ from . import DefaultTable
class table_C_F_F_(DefaultTable.DefaultTable):
def __init__(self, tag=None):
DefaultTable.DefaultTable.__init__(self, tag)
self.cff = cffLib.CFFFontSet()
self._gaveGlyphOrder = False
def __init__(self, tag=None):
DefaultTable.DefaultTable.__init__(self, tag)
self.cff = cffLib.CFFFontSet()
self._gaveGlyphOrder = False
def decompile(self, data, otFont):
self.cff.decompile(BytesIO(data), otFont, isCFF2=False)
assert len(self.cff) == 1, "can't deal with multi-font CFF tables."
def decompile(self, data, otFont):
self.cff.decompile(BytesIO(data), otFont, isCFF2=False)
assert len(self.cff) == 1, "can't deal with multi-font CFF tables."
def compile(self, otFont):
f = BytesIO()
self.cff.compile(f, otFont, isCFF2=False)
return f.getvalue()
def compile(self, otFont):
f = BytesIO()
self.cff.compile(f, otFont, isCFF2=False)
return f.getvalue()
def haveGlyphNames(self):
if hasattr(self.cff[self.cff.fontNames[0]], "ROS"):
return False # CID-keyed font
else:
return True
def haveGlyphNames(self):
if hasattr(self.cff[self.cff.fontNames[0]], "ROS"):
return False # CID-keyed font
else:
return True
def getGlyphOrder(self):
if self._gaveGlyphOrder:
from fontTools import ttLib
def getGlyphOrder(self):
if self._gaveGlyphOrder:
from fontTools import ttLib
raise ttLib.TTLibError("illegal use of getGlyphOrder()")
self._gaveGlyphOrder = True
return self.cff[self.cff.fontNames[0]].getGlyphOrder()
raise ttLib.TTLibError("illegal use of getGlyphOrder()")
self._gaveGlyphOrder = True
return self.cff[self.cff.fontNames[0]].getGlyphOrder()
def setGlyphOrder(self, glyphOrder):
pass
# XXX
#self.cff[self.cff.fontNames[0]].setGlyphOrder(glyphOrder)
def setGlyphOrder(self, glyphOrder):
pass
# XXX
# self.cff[self.cff.fontNames[0]].setGlyphOrder(glyphOrder)
def toXML(self, writer, otFont):
self.cff.toXML(writer)
def toXML(self, writer, otFont):
self.cff.toXML(writer)
def fromXML(self, name, attrs, content, otFont):
if not hasattr(self, "cff"):
self.cff = cffLib.CFFFontSet()
self.cff.fromXML(name, attrs, content, otFont)
def fromXML(self, name, attrs, content, otFont):
if not hasattr(self, "cff"):
self.cff = cffLib.CFFFontSet()
self.cff.fromXML(name, attrs, content, otFont)

Some files were not shown because too many files have changed in this diff Show More