Merge branch 'main' into avar2

This commit is contained in:
Behdad Esfahbod 2023-03-07 11:21:20 -07:00
commit fd822a2602
434 changed files with 78131 additions and 62625 deletions

2
.git-blame-ignore-revs Normal file
View File

@ -0,0 +1,2 @@
# First blackening of code
d584daa8fdc71030f92ee665472d6c7cddd49283

View File

@ -9,6 +9,10 @@ on:
permissions: permissions:
contents: read contents: read
env:
# turns off tox's output redirection so we can debug package installation
TOX_OPTIONS: -vv
jobs: jobs:
lint: lint:
runs-on: ubuntu-latest runs-on: ubuntu-latest
@ -23,20 +27,20 @@ jobs:
- name: Install packages - name: Install packages
run: pip install tox run: pip install tox
- name: Run Tox - name: Run Tox
run: tox -e mypy,package_readme run: tox $TOX_OPTIONS -e lint,package_readme
test: test:
runs-on: ${{ matrix.platform }} runs-on: ${{ matrix.platform }}
if: "! contains(toJSON(github.event.commits.*.message), '[skip ci]')" if: "! contains(toJSON(github.event.commits.*.message), '[skip ci]')"
strategy: strategy:
matrix: matrix:
python-version: ["3.7", "3.10"] python-version: ["3.8", "3.10"]
platform: [ubuntu-latest, macos-latest, windows-latest] platform: [ubuntu-latest, macos-latest, windows-latest]
exclude: # Only test on the latest supported stable Python on macOS and Windows. exclude: # Only test on the latest supported stable Python on macOS and Windows.
- platform: macos-latest - platform: macos-latest
python-version: 3.7 python-version: 3.8
- platform: windows-latest - platform: windows-latest
python-version: 3.7 python-version: 3.8
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- name: Set up Python ${{ matrix.python-version }} - name: Set up Python ${{ matrix.python-version }}
@ -46,9 +50,9 @@ jobs:
- name: Install packages - name: Install packages
run: pip install tox coverage run: pip install tox coverage
- name: Run Tox - name: Run Tox
run: tox -e py-cov run: tox $TOX_OPTIONS -e py-cov
- name: Run Tox without lxml - name: Run Tox without lxml
run: tox -e py-cov-nolxml run: tox $TOX_OPTIONS -e py-cov-nolxml
- name: Produce coverage files - name: Produce coverage files
run: | run: |
coverage combine coverage combine
@ -71,11 +75,11 @@ jobs:
- name: Set up Python 3.x - name: Set up Python 3.x
uses: actions/setup-python@v4 uses: actions/setup-python@v4
with: with:
python-version: "3.10" python-version: "3.11"
- name: Install packages - name: Install packages
run: pip install tox run: pip install tox
- name: Run Tox - name: Run Tox
run: tox -e py-cy-nolxml run: tox $TOX_OPTIONS -e py-cy-nolxml
test-pypy3: test-pypy3:
runs-on: ubuntu-latest runs-on: ubuntu-latest
@ -85,8 +89,8 @@ jobs:
- name: Set up Python pypy3 - name: Set up Python pypy3
uses: actions/setup-python@v4 uses: actions/setup-python@v4
with: with:
python-version: "pypy-3.7" python-version: "pypy-3.8"
- name: Install packages - name: Install packages
run: pip install tox run: pip install tox
- name: Run Tox - name: Run Tox
run: tox -e pypy3-nolxml run: tox $TOX_OPTIONS -e pypy3-nolxml

View File

@ -1,4 +1,4 @@
sphinx==5.3.0 sphinx==5.3.0
sphinx_rtd_theme==1.0.0 sphinx_rtd_theme==1.1.1
reportlab==3.6.11 reportlab==3.6.12
freetype-py==2.3.0 freetype-py==2.3.0

View File

@ -30,14 +30,17 @@ needs_sphinx = "1.3"
# Add any Sphinx extension module names here, as strings. They can be # Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones. # ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.viewcode", "sphinx.ext.napoleon", "sphinx.ext.coverage", "sphinx.ext.autosectionlabel"] extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.viewcode",
"sphinx.ext.napoleon",
"sphinx.ext.coverage",
"sphinx.ext.autosectionlabel",
]
autodoc_mock_imports = ["gtk", "reportlab"] autodoc_mock_imports = ["gtk", "reportlab"]
autodoc_default_options = { autodoc_default_options = {"members": True, "inherited-members": True}
'members': True,
'inherited-members': True
}
# Add any paths that contain templates here, relative to this directory. # Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"] templates_path = ["_templates"]
@ -52,9 +55,11 @@ source_suffix = ".rst"
master_doc = "index" master_doc = "index"
# General information about the project. # General information about the project.
project = u"fontTools" project = "fontTools"
copyright = u"2020, Just van Rossum, Behdad Esfahbod, and the fontTools Authors. CC BY-SA 4.0" copyright = (
author = u"Just van Rossum, Behdad Esfahbod, and the fontTools Authors" "2020, Just van Rossum, Behdad Esfahbod, and the fontTools Authors. CC BY-SA 4.0"
)
author = "Just van Rossum, Behdad Esfahbod, and the fontTools Authors"
# HTML page title # HTML page title
html_title = "fontTools Documentation" html_title = "fontTools Documentation"
@ -64,9 +69,9 @@ html_title = "fontTools Documentation"
# built documents. # built documents.
# #
# The short X.Y version. # The short X.Y version.
version = u"4.0" version = "4.0"
# The full version, including alpha/beta/rc tags. # The full version, including alpha/beta/rc tags.
release = u"4.0" release = "4.0"
# The language for content autogenerated by Sphinx. Refer to documentation # The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages. # for a list of supported languages.
@ -142,8 +147,8 @@ latex_documents = [
( (
master_doc, master_doc,
"fontTools.tex", "fontTools.tex",
u"fontTools Documentation", "fontTools Documentation",
u"Just van Rossum, Behdad Esfahbod et al.", "Just van Rossum, Behdad Esfahbod et al.",
"manual", "manual",
) )
] ]
@ -153,7 +158,7 @@ latex_documents = [
# One entry per manual page. List of tuples # One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section). # (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "fonttools", u"fontTools Documentation", [author], 1)] man_pages = [(master_doc, "fonttools", "fontTools Documentation", [author], 1)]
# -- Options for Texinfo output ------------------------------------------- # -- Options for Texinfo output -------------------------------------------
@ -165,7 +170,7 @@ texinfo_documents = [
( (
master_doc, master_doc,
"fontTools", "fontTools",
u"fontTools Documentation", "fontTools Documentation",
author, author,
"fontTools", "fontTools",
"A library for manipulating fonts, written in Python.", "A library for manipulating fonts, written in Python.",

View File

@ -187,10 +187,10 @@ for more information.
.. automodule:: fontTools.designspaceLib.split .. automodule:: fontTools.designspaceLib.split
fontTools.designspaceLib.stat fontTools.varLib.stat
============================= =============================
.. automodule:: fontTools.designspaceLib.stat .. automodule:: fontTools.varLib.stat
fontTools.designspaceLib.statNames fontTools.designspaceLib.statNames

View File

@ -542,13 +542,13 @@ element with an ``xml:lang`` attribute:
Defines the coordinates of this source in the design space. Defines the coordinates of this source in the design space.
.. seealso:: `Full documentation of the <location> element <location>`__ .. seealso:: :ref:`Full documentation of the \<location\> element <location>`
``<dimension>`` element (source) ``<dimension>`` element (source)
................................ ................................
.. seealso:: `Full documentation of the <dimension> element <dimension>`__ .. seealso:: :ref:`Full documentation of the \<dimension\> element <dimension>`
``<lib>`` element (source) ``<lib>`` element (source)
@ -836,13 +836,13 @@ The ``<instances>`` element contains one or more ``<instance>`` elements.
Defines the coordinates of this instance in the design space. Defines the coordinates of this instance in the design space.
.. seealso:: `Full documentation of the <location> element <location>`__ .. seealso:: :ref:`Full documentation of the \<location\> element <location>`
``<dimension>`` element (instance) ``<dimension>`` element (instance)
.................................. ..................................
.. seealso:: `Full documentation of the <dimension> element <dimension>`__ .. seealso:: :ref:`Full documentation of the \<dimension\> element <dimension>`
``<lib>`` element (instance) ``<lib>`` element (instance)

View File

@ -101,13 +101,13 @@ Paul Wise.
License License
------- -------
`MIT license <https://github.com/fonttools/fonttools/blob/master/LICENSE>`_. See the full text of the license for details. `MIT license <https://github.com/fonttools/fonttools/blob/main/LICENSE>`_. See the full text of the license for details.
.. |Travis Build Status| image:: https://travis-ci.org/fonttools/fonttools.svg .. |Travis Build Status| image:: https://travis-ci.org/fonttools/fonttools.svg
:target: https://travis-ci.org/fonttools/fonttools :target: https://travis-ci.org/fonttools/fonttools
.. |Appveyor Build status| image:: https://ci.appveyor.com/api/projects/status/0f7fmee9as744sl7/branch/master?svg=true .. |Appveyor Build status| image:: https://ci.appveyor.com/api/projects/status/0f7fmee9as744sl7/branch/master?svg=true
:target: https://ci.appveyor.com/project/fonttools/fonttools/branch/master :target: https://ci.appveyor.com/project/fonttools/fonttools/branch/master
.. |Coverage Status| image:: https://codecov.io/gh/fonttools/fonttools/branch/master/graph/badge.svg .. |Coverage Status| image:: https://codecov.io/gh/fonttools/fonttools/branch/main/graph/badge.svg
:target: https://codecov.io/gh/fonttools/fonttools :target: https://codecov.io/gh/fonttools/fonttools
.. |PyPI| image:: https://img.shields.io/pypi/v/fonttools.svg .. |PyPI| image:: https://img.shields.io/pypi/v/fonttools.svg
:target: https://pypi.org/project/FontTools :target: https://pypi.org/project/FontTools

View File

@ -13,7 +13,7 @@ About
fontTools is a family of libraries and utilities for manipulating fonts in Python. fontTools is a family of libraries and utilities for manipulating fonts in Python.
The project has an `MIT open-source license <https://github.com/fonttools/fonttools/blob/master/LICENSE>`_. Among other things this means you can use it free of charge. The project has an `MIT open-source license <https://github.com/fonttools/fonttools/blob/main/LICENSE>`_. Among other things this means you can use it free of charge.
Installation Installation
------------ ------------
@ -88,7 +88,7 @@ libraries in the fontTools suite:
- :py:mod:`fontTools.varLib`: Module for dealing with 'gvar'-style font variations - :py:mod:`fontTools.varLib`: Module for dealing with 'gvar'-style font variations
- :py:mod:`fontTools.voltLib`: Module for dealing with Visual OpenType Layout Tool (VOLT) files - :py:mod:`fontTools.voltLib`: Module for dealing with Visual OpenType Layout Tool (VOLT) files
A selection of sample Python programs using these libaries can be found in the `Snippets directory <https://github.com/fonttools/fonttools/blob/master/Snippets/>`_ of the fontTools repository. A selection of sample Python programs using these libaries can be found in the `Snippets directory <https://github.com/fonttools/fonttools/blob/main/Snippets/>`_ of the fontTools repository.
Optional Dependencies Optional Dependencies
--------------------- ---------------------
@ -107,7 +107,7 @@ Information for developers can be found :doc:`here <./developer>`.
License License
------- -------
`MIT license <https://github.com/fonttools/fonttools/blob/master/LICENSE>`_. See the full text of the license for details. `MIT license <https://github.com/fonttools/fonttools/blob/main/LICENSE>`_. See the full text of the license for details.
Table of Contents Table of Contents
@ -148,7 +148,7 @@ Table of Contents
:target: https://travis-ci.org/fonttools/fonttools :target: https://travis-ci.org/fonttools/fonttools
.. |Appveyor Build status| image:: https://ci.appveyor.com/api/projects/status/0f7fmee9as744sl7/branch/master?svg=true .. |Appveyor Build status| image:: https://ci.appveyor.com/api/projects/status/0f7fmee9as744sl7/branch/master?svg=true
:target: https://ci.appveyor.com/project/fonttools/fonttools/branch/master :target: https://ci.appveyor.com/project/fonttools/fonttools/branch/master
.. |Coverage Status| image:: https://codecov.io/gh/fonttools/fonttools/branch/master/graph/badge.svg .. |Coverage Status| image:: https://codecov.io/gh/fonttools/fonttools/branch/main/graph/badge.svg
:target: https://codecov.io/gh/fonttools/fonttools :target: https://codecov.io/gh/fonttools/fonttools
.. |PyPI| image:: https://img.shields.io/pypi/v/fonttools.svg .. |PyPI| image:: https://img.shields.io/pypi/v/fonttools.svg
:target: https://pypi.org/project/FontTools :target: https://pypi.org/project/FontTools

View File

@ -3,6 +3,6 @@ from fontTools.misc.loggingTools import configLogger
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
version = __version__ = "4.38.1.dev0" version = __version__ = "4.39.1.dev0"
__all__ = ["version", "log", "configLogger"] __all__ = ["version", "log", "configLogger"]

View File

@ -2,33 +2,34 @@ import sys
def main(args=None): def main(args=None):
if args is None: if args is None:
args = sys.argv[1:] args = sys.argv[1:]
# TODO Handle library-wide options. Eg.: # TODO Handle library-wide options. Eg.:
# --unicodedata # --unicodedata
# --verbose / other logging stuff # --verbose / other logging stuff
# TODO Allow a way to run arbitrary modules? Useful for setting # TODO Allow a way to run arbitrary modules? Useful for setting
# library-wide options and calling another library. Eg.: # library-wide options and calling another library. Eg.:
# #
# $ fonttools --unicodedata=... fontmake ... # $ fonttools --unicodedata=... fontmake ...
# #
# This allows for a git-like command where thirdparty commands # This allows for a git-like command where thirdparty commands
# can be added. Should we just try importing the fonttools # can be added. Should we just try importing the fonttools
# module first and try without if it fails? # module first and try without if it fails?
if len(sys.argv) < 2: if len(sys.argv) < 2:
sys.argv.append("help") sys.argv.append("help")
if sys.argv[1] == "-h" or sys.argv[1] == "--help": if sys.argv[1] == "-h" or sys.argv[1] == "--help":
sys.argv[1] = "help" sys.argv[1] = "help"
mod = 'fontTools.'+sys.argv[1] mod = "fontTools." + sys.argv[1]
sys.argv[1] = sys.argv[0] + ' ' + sys.argv[1] sys.argv[1] = sys.argv[0] + " " + sys.argv[1]
del sys.argv[0] del sys.argv[0]
import runpy import runpy
runpy.run_module(mod, run_name='__main__')
runpy.run_module(mod, run_name="__main__")
if __name__ == '__main__': if __name__ == "__main__":
sys.exit(main()) sys.exit(main())

View File

@ -53,378 +53,386 @@ identifierRE = re.compile(r"^([A-Za-z]+).*")
# regular expression to parse char lines # regular expression to parse char lines
charRE = re.compile( charRE = re.compile(
r"(-?\d+)" # charnum r"(-?\d+)" # charnum
r"\s*;\s*WX\s+" # ; WX r"\s*;\s*WX\s+" # ; WX
r"(-?\d+)" # width r"(-?\d+)" # width
r"\s*;\s*N\s+" # ; N r"\s*;\s*N\s+" # ; N
r"([.A-Za-z0-9_]+)" # charname r"([.A-Za-z0-9_]+)" # charname
r"\s*;\s*B\s+" # ; B r"\s*;\s*B\s+" # ; B
r"(-?\d+)" # left r"(-?\d+)" # left
r"\s+" r"\s+"
r"(-?\d+)" # bottom r"(-?\d+)" # bottom
r"\s+" r"\s+"
r"(-?\d+)" # right r"(-?\d+)" # right
r"\s+" r"\s+"
r"(-?\d+)" # top r"(-?\d+)" # top
r"\s*;\s*" # ; r"\s*;\s*" # ;
) )
# regular expression to parse kerning lines # regular expression to parse kerning lines
kernRE = re.compile( kernRE = re.compile(
r"([.A-Za-z0-9_]+)" # leftchar r"([.A-Za-z0-9_]+)" # leftchar
r"\s+" r"\s+"
r"([.A-Za-z0-9_]+)" # rightchar r"([.A-Za-z0-9_]+)" # rightchar
r"\s+" r"\s+"
r"(-?\d+)" # value r"(-?\d+)" # value
r"\s*" r"\s*"
) )
# regular expressions to parse composite info lines of the form: # regular expressions to parse composite info lines of the form:
# Aacute 2 ; PCC A 0 0 ; PCC acute 182 211 ; # Aacute 2 ; PCC A 0 0 ; PCC acute 182 211 ;
compositeRE = re.compile( compositeRE = re.compile(
r"([.A-Za-z0-9_]+)" # char name r"([.A-Za-z0-9_]+)" r"\s+" r"(\d+)" r"\s*;\s*" # char name # number of parts
r"\s+" )
r"(\d+)" # number of parts
r"\s*;\s*"
)
componentRE = re.compile( componentRE = re.compile(
r"PCC\s+" # PPC r"PCC\s+" # PPC
r"([.A-Za-z0-9_]+)" # base char name r"([.A-Za-z0-9_]+)" # base char name
r"\s+" r"\s+"
r"(-?\d+)" # x offset r"(-?\d+)" # x offset
r"\s+" r"\s+"
r"(-?\d+)" # y offset r"(-?\d+)" # y offset
r"\s*;\s*" r"\s*;\s*"
) )
preferredAttributeOrder = [ preferredAttributeOrder = [
"FontName", "FontName",
"FullName", "FullName",
"FamilyName", "FamilyName",
"Weight", "Weight",
"ItalicAngle", "ItalicAngle",
"IsFixedPitch", "IsFixedPitch",
"FontBBox", "FontBBox",
"UnderlinePosition", "UnderlinePosition",
"UnderlineThickness", "UnderlineThickness",
"Version", "Version",
"Notice", "Notice",
"EncodingScheme", "EncodingScheme",
"CapHeight", "CapHeight",
"XHeight", "XHeight",
"Ascender", "Ascender",
"Descender", "Descender",
] ]
class error(Exception): class error(Exception):
pass pass
class AFM(object): class AFM(object):
_attrs = None _attrs = None
_keywords = ['StartFontMetrics', _keywords = [
'EndFontMetrics', "StartFontMetrics",
'StartCharMetrics', "EndFontMetrics",
'EndCharMetrics', "StartCharMetrics",
'StartKernData', "EndCharMetrics",
'StartKernPairs', "StartKernData",
'EndKernPairs', "StartKernPairs",
'EndKernData', "EndKernPairs",
'StartComposites', "EndKernData",
'EndComposites', "StartComposites",
] "EndComposites",
]
def __init__(self, path=None): def __init__(self, path=None):
"""AFM file reader. """AFM file reader.
Instantiating an object with a path name will cause the file to be opened, Instantiating an object with a path name will cause the file to be opened,
read, and parsed. Alternatively the path can be left unspecified, and a read, and parsed. Alternatively the path can be left unspecified, and a
file can be parsed later with the :meth:`read` method.""" file can be parsed later with the :meth:`read` method."""
self._attrs = {} self._attrs = {}
self._chars = {} self._chars = {}
self._kerning = {} self._kerning = {}
self._index = {} self._index = {}
self._comments = [] self._comments = []
self._composites = {} self._composites = {}
if path is not None: if path is not None:
self.read(path) self.read(path)
def read(self, path): def read(self, path):
"""Opens, reads and parses a file.""" """Opens, reads and parses a file."""
lines = readlines(path) lines = readlines(path)
for line in lines: for line in lines:
if not line.strip(): if not line.strip():
continue continue
m = identifierRE.match(line) m = identifierRE.match(line)
if m is None: if m is None:
raise error("syntax error in AFM file: " + repr(line)) raise error("syntax error in AFM file: " + repr(line))
pos = m.regs[1][1] pos = m.regs[1][1]
word = line[:pos] word = line[:pos]
rest = line[pos:].strip() rest = line[pos:].strip()
if word in self._keywords: if word in self._keywords:
continue continue
if word == "C": if word == "C":
self.parsechar(rest) self.parsechar(rest)
elif word == "KPX": elif word == "KPX":
self.parsekernpair(rest) self.parsekernpair(rest)
elif word == "CC": elif word == "CC":
self.parsecomposite(rest) self.parsecomposite(rest)
else: else:
self.parseattr(word, rest) self.parseattr(word, rest)
def parsechar(self, rest): def parsechar(self, rest):
m = charRE.match(rest) m = charRE.match(rest)
if m is None: if m is None:
raise error("syntax error in AFM file: " + repr(rest)) raise error("syntax error in AFM file: " + repr(rest))
things = [] things = []
for fr, to in m.regs[1:]: for fr, to in m.regs[1:]:
things.append(rest[fr:to]) things.append(rest[fr:to])
charname = things[2] charname = things[2]
del things[2] del things[2]
charnum, width, l, b, r, t = (int(thing) for thing in things) charnum, width, l, b, r, t = (int(thing) for thing in things)
self._chars[charname] = charnum, width, (l, b, r, t) self._chars[charname] = charnum, width, (l, b, r, t)
def parsekernpair(self, rest): def parsekernpair(self, rest):
m = kernRE.match(rest) m = kernRE.match(rest)
if m is None: if m is None:
raise error("syntax error in AFM file: " + repr(rest)) raise error("syntax error in AFM file: " + repr(rest))
things = [] things = []
for fr, to in m.regs[1:]: for fr, to in m.regs[1:]:
things.append(rest[fr:to]) things.append(rest[fr:to])
leftchar, rightchar, value = things leftchar, rightchar, value = things
value = int(value) value = int(value)
self._kerning[(leftchar, rightchar)] = value self._kerning[(leftchar, rightchar)] = value
def parseattr(self, word, rest): def parseattr(self, word, rest):
if word == "FontBBox": if word == "FontBBox":
l, b, r, t = [int(thing) for thing in rest.split()] l, b, r, t = [int(thing) for thing in rest.split()]
self._attrs[word] = l, b, r, t self._attrs[word] = l, b, r, t
elif word == "Comment": elif word == "Comment":
self._comments.append(rest) self._comments.append(rest)
else: else:
try: try:
value = int(rest) value = int(rest)
except (ValueError, OverflowError): except (ValueError, OverflowError):
self._attrs[word] = rest self._attrs[word] = rest
else: else:
self._attrs[word] = value self._attrs[word] = value
def parsecomposite(self, rest): def parsecomposite(self, rest):
m = compositeRE.match(rest) m = compositeRE.match(rest)
if m is None: if m is None:
raise error("syntax error in AFM file: " + repr(rest)) raise error("syntax error in AFM file: " + repr(rest))
charname = m.group(1) charname = m.group(1)
ncomponents = int(m.group(2)) ncomponents = int(m.group(2))
rest = rest[m.regs[0][1]:] rest = rest[m.regs[0][1] :]
components = [] components = []
while True: while True:
m = componentRE.match(rest) m = componentRE.match(rest)
if m is None: if m is None:
raise error("syntax error in AFM file: " + repr(rest)) raise error("syntax error in AFM file: " + repr(rest))
basechar = m.group(1) basechar = m.group(1)
xoffset = int(m.group(2)) xoffset = int(m.group(2))
yoffset = int(m.group(3)) yoffset = int(m.group(3))
components.append((basechar, xoffset, yoffset)) components.append((basechar, xoffset, yoffset))
rest = rest[m.regs[0][1]:] rest = rest[m.regs[0][1] :]
if not rest: if not rest:
break break
assert len(components) == ncomponents assert len(components) == ncomponents
self._composites[charname] = components self._composites[charname] = components
def write(self, path, sep='\r'): def write(self, path, sep="\r"):
"""Writes out an AFM font to the given path.""" """Writes out an AFM font to the given path."""
import time import time
lines = [ "StartFontMetrics 2.0",
"Comment Generated by afmLib; at %s" % (
time.strftime("%m/%d/%Y %H:%M:%S",
time.localtime(time.time())))]
# write comments, assuming (possibly wrongly!) they should lines = [
# all appear at the top "StartFontMetrics 2.0",
for comment in self._comments: "Comment Generated by afmLib; at %s"
lines.append("Comment " + comment) % (time.strftime("%m/%d/%Y %H:%M:%S", time.localtime(time.time()))),
]
# write attributes, first the ones we know about, in # write comments, assuming (possibly wrongly!) they should
# a preferred order # all appear at the top
attrs = self._attrs for comment in self._comments:
for attr in preferredAttributeOrder: lines.append("Comment " + comment)
if attr in attrs:
value = attrs[attr]
if attr == "FontBBox":
value = "%s %s %s %s" % value
lines.append(attr + " " + str(value))
# then write the attributes we don't know about,
# in alphabetical order
items = sorted(attrs.items())
for attr, value in items:
if attr in preferredAttributeOrder:
continue
lines.append(attr + " " + str(value))
# write char metrics # write attributes, first the ones we know about, in
lines.append("StartCharMetrics " + repr(len(self._chars))) # a preferred order
items = [(charnum, (charname, width, box)) for charname, (charnum, width, box) in self._chars.items()] attrs = self._attrs
for attr in preferredAttributeOrder:
if attr in attrs:
value = attrs[attr]
if attr == "FontBBox":
value = "%s %s %s %s" % value
lines.append(attr + " " + str(value))
# then write the attributes we don't know about,
# in alphabetical order
items = sorted(attrs.items())
for attr, value in items:
if attr in preferredAttributeOrder:
continue
lines.append(attr + " " + str(value))
def myKey(a): # write char metrics
"""Custom key function to make sure unencoded chars (-1) lines.append("StartCharMetrics " + repr(len(self._chars)))
end up at the end of the list after sorting.""" items = [
if a[0] == -1: (charnum, (charname, width, box))
a = (0xffff,) + a[1:] # 0xffff is an arbitrary large number for charname, (charnum, width, box) in self._chars.items()
return a ]
items.sort(key=myKey)
for charnum, (charname, width, (l, b, r, t)) in items: def myKey(a):
lines.append("C %d ; WX %d ; N %s ; B %d %d %d %d ;" % """Custom key function to make sure unencoded chars (-1)
(charnum, width, charname, l, b, r, t)) end up at the end of the list after sorting."""
lines.append("EndCharMetrics") if a[0] == -1:
a = (0xFFFF,) + a[1:] # 0xffff is an arbitrary large number
return a
# write kerning info items.sort(key=myKey)
lines.append("StartKernData")
lines.append("StartKernPairs " + repr(len(self._kerning)))
items = sorted(self._kerning.items())
for (leftchar, rightchar), value in items:
lines.append("KPX %s %s %d" % (leftchar, rightchar, value))
lines.append("EndKernPairs")
lines.append("EndKernData")
if self._composites: for charnum, (charname, width, (l, b, r, t)) in items:
composites = sorted(self._composites.items()) lines.append(
lines.append("StartComposites %s" % len(self._composites)) "C %d ; WX %d ; N %s ; B %d %d %d %d ;"
for charname, components in composites: % (charnum, width, charname, l, b, r, t)
line = "CC %s %s ;" % (charname, len(components)) )
for basechar, xoffset, yoffset in components: lines.append("EndCharMetrics")
line = line + " PCC %s %s %s ;" % (basechar, xoffset, yoffset)
lines.append(line)
lines.append("EndComposites")
lines.append("EndFontMetrics") # write kerning info
lines.append("StartKernData")
lines.append("StartKernPairs " + repr(len(self._kerning)))
items = sorted(self._kerning.items())
for (leftchar, rightchar), value in items:
lines.append("KPX %s %s %d" % (leftchar, rightchar, value))
lines.append("EndKernPairs")
lines.append("EndKernData")
writelines(path, lines, sep) if self._composites:
composites = sorted(self._composites.items())
lines.append("StartComposites %s" % len(self._composites))
for charname, components in composites:
line = "CC %s %s ;" % (charname, len(components))
for basechar, xoffset, yoffset in components:
line = line + " PCC %s %s %s ;" % (basechar, xoffset, yoffset)
lines.append(line)
lines.append("EndComposites")
def has_kernpair(self, pair): lines.append("EndFontMetrics")
"""Returns `True` if the given glyph pair (specified as a tuple) exists
in the kerning dictionary."""
return pair in self._kerning
def kernpairs(self): writelines(path, lines, sep)
"""Returns a list of all kern pairs in the kerning dictionary."""
return list(self._kerning.keys())
def has_char(self, char): def has_kernpair(self, pair):
"""Returns `True` if the given glyph exists in the font.""" """Returns `True` if the given glyph pair (specified as a tuple) exists
return char in self._chars in the kerning dictionary."""
return pair in self._kerning
def chars(self): def kernpairs(self):
"""Returns a list of all glyph names in the font.""" """Returns a list of all kern pairs in the kerning dictionary."""
return list(self._chars.keys()) return list(self._kerning.keys())
def comments(self): def has_char(self, char):
"""Returns all comments from the file.""" """Returns `True` if the given glyph exists in the font."""
return self._comments return char in self._chars
def addComment(self, comment): def chars(self):
"""Adds a new comment to the file.""" """Returns a list of all glyph names in the font."""
self._comments.append(comment) return list(self._chars.keys())
def addComposite(self, glyphName, components): def comments(self):
"""Specifies that the glyph `glyphName` is made up of the given components. """Returns all comments from the file."""
The components list should be of the following form:: return self._comments
[ def addComment(self, comment):
(glyphname, xOffset, yOffset), """Adds a new comment to the file."""
... self._comments.append(comment)
]
""" def addComposite(self, glyphName, components):
self._composites[glyphName] = components """Specifies that the glyph `glyphName` is made up of the given components.
The components list should be of the following form::
def __getattr__(self, attr): [
if attr in self._attrs: (glyphname, xOffset, yOffset),
return self._attrs[attr] ...
else: ]
raise AttributeError(attr)
def __setattr__(self, attr, value): """
# all attrs *not* starting with "_" are consider to be AFM keywords self._composites[glyphName] = components
if attr[:1] == "_":
self.__dict__[attr] = value
else:
self._attrs[attr] = value
def __delattr__(self, attr): def __getattr__(self, attr):
# all attrs *not* starting with "_" are consider to be AFM keywords if attr in self._attrs:
if attr[:1] == "_": return self._attrs[attr]
try: else:
del self.__dict__[attr] raise AttributeError(attr)
except KeyError:
raise AttributeError(attr)
else:
try:
del self._attrs[attr]
except KeyError:
raise AttributeError(attr)
def __getitem__(self, key): def __setattr__(self, attr, value):
if isinstance(key, tuple): # all attrs *not* starting with "_" are consider to be AFM keywords
# key is a tuple, return the kernpair if attr[:1] == "_":
return self._kerning[key] self.__dict__[attr] = value
else: else:
# return the metrics instead self._attrs[attr] = value
return self._chars[key]
def __setitem__(self, key, value): def __delattr__(self, attr):
if isinstance(key, tuple): # all attrs *not* starting with "_" are consider to be AFM keywords
# key is a tuple, set kernpair if attr[:1] == "_":
self._kerning[key] = value try:
else: del self.__dict__[attr]
# set char metrics except KeyError:
self._chars[key] = value raise AttributeError(attr)
else:
try:
del self._attrs[attr]
except KeyError:
raise AttributeError(attr)
def __delitem__(self, key): def __getitem__(self, key):
if isinstance(key, tuple): if isinstance(key, tuple):
# key is a tuple, del kernpair # key is a tuple, return the kernpair
del self._kerning[key] return self._kerning[key]
else: else:
# del char metrics # return the metrics instead
del self._chars[key] return self._chars[key]
def __repr__(self): def __setitem__(self, key, value):
if hasattr(self, "FullName"): if isinstance(key, tuple):
return '<AFM object for %s>' % self.FullName # key is a tuple, set kernpair
else: self._kerning[key] = value
return '<AFM object at %x>' % id(self) else:
# set char metrics
self._chars[key] = value
def __delitem__(self, key):
if isinstance(key, tuple):
# key is a tuple, del kernpair
del self._kerning[key]
else:
# del char metrics
del self._chars[key]
def __repr__(self):
if hasattr(self, "FullName"):
return "<AFM object for %s>" % self.FullName
else:
return "<AFM object at %x>" % id(self)
def readlines(path): def readlines(path):
with open(path, "r", encoding="ascii") as f: with open(path, "r", encoding="ascii") as f:
data = f.read() data = f.read()
return data.splitlines() return data.splitlines()
def writelines(path, lines, sep='\r'):
with open(path, "w", encoding="ascii", newline=sep) as f: def writelines(path, lines, sep="\r"):
f.write("\n".join(lines) + "\n") with open(path, "w", encoding="ascii", newline=sep) as f:
f.write("\n".join(lines) + "\n")
if __name__ == "__main__": if __name__ == "__main__":
import EasyDialogs import EasyDialogs
path = EasyDialogs.AskFileForOpen()
if path: path = EasyDialogs.AskFileForOpen()
afm = AFM(path) if path:
char = 'A' afm = AFM(path)
if afm.has_char(char): char = "A"
print(afm[char]) # print charnum, width and boundingbox if afm.has_char(char):
pair = ('A', 'V') print(afm[char]) # print charnum, width and boundingbox
if afm.has_kernpair(pair): pair = ("A", "V")
print(afm[pair]) # print kerning value for pair if afm.has_kernpair(pair):
print(afm.Version) # various other afm entries have become attributes print(afm[pair]) # print kerning value for pair
print(afm.Weight) print(afm.Version) # various other afm entries have become attributes
# afm.comments() returns a list of all Comment lines found in the AFM print(afm.Weight)
print(afm.comments()) # afm.comments() returns a list of all Comment lines found in the AFM
#print afm.chars() print(afm.comments())
#print afm.kernpairs() # print afm.chars()
print(afm) # print afm.kernpairs()
afm.write(path + ".muck") print(afm)
afm.write(path + ".muck")

View File

@ -5059,174 +5059,175 @@ _aglfnText = """\
class AGLError(Exception): class AGLError(Exception):
pass pass
LEGACY_AGL2UV = {} LEGACY_AGL2UV = {}
AGL2UV = {} AGL2UV = {}
UV2AGL = {} UV2AGL = {}
def _builddicts(): def _builddicts():
import re import re
lines = _aglText.splitlines() lines = _aglText.splitlines()
parseAGL_RE = re.compile("([A-Za-z0-9]+);((?:[0-9A-F]{4})(?: (?:[0-9A-F]{4}))*)$") parseAGL_RE = re.compile("([A-Za-z0-9]+);((?:[0-9A-F]{4})(?: (?:[0-9A-F]{4}))*)$")
for line in lines: for line in lines:
if not line or line[:1] == '#': if not line or line[:1] == "#":
continue continue
m = parseAGL_RE.match(line) m = parseAGL_RE.match(line)
if not m: if not m:
raise AGLError("syntax error in glyphlist.txt: %s" % repr(line[:20])) raise AGLError("syntax error in glyphlist.txt: %s" % repr(line[:20]))
unicodes = m.group(2) unicodes = m.group(2)
assert len(unicodes) % 5 == 4 assert len(unicodes) % 5 == 4
unicodes = [int(unicode, 16) for unicode in unicodes.split()] unicodes = [int(unicode, 16) for unicode in unicodes.split()]
glyphName = tostr(m.group(1)) glyphName = tostr(m.group(1))
LEGACY_AGL2UV[glyphName] = unicodes LEGACY_AGL2UV[glyphName] = unicodes
lines = _aglfnText.splitlines() lines = _aglfnText.splitlines()
parseAGLFN_RE = re.compile("([0-9A-F]{4});([A-Za-z0-9]+);.*?$") parseAGLFN_RE = re.compile("([0-9A-F]{4});([A-Za-z0-9]+);.*?$")
for line in lines:
if not line or line[:1] == "#":
continue
m = parseAGLFN_RE.match(line)
if not m:
raise AGLError("syntax error in aglfn.txt: %s" % repr(line[:20]))
unicode = m.group(1)
assert len(unicode) == 4
unicode = int(unicode, 16)
glyphName = tostr(m.group(2))
AGL2UV[glyphName] = unicode
UV2AGL[unicode] = glyphName
for line in lines:
if not line or line[:1] == '#':
continue
m = parseAGLFN_RE.match(line)
if not m:
raise AGLError("syntax error in aglfn.txt: %s" % repr(line[:20]))
unicode = m.group(1)
assert len(unicode) == 4
unicode = int(unicode, 16)
glyphName = tostr(m.group(2))
AGL2UV[glyphName] = unicode
UV2AGL[unicode] = glyphName
_builddicts() _builddicts()
def toUnicode(glyph, isZapfDingbats=False): def toUnicode(glyph, isZapfDingbats=False):
"""Convert glyph names to Unicode, such as ``'longs_t.oldstyle'`` --> ``u'ſt'`` """Convert glyph names to Unicode, such as ``'longs_t.oldstyle'`` --> ``u'ſt'``
If ``isZapfDingbats`` is ``True``, the implementation recognizes additional If ``isZapfDingbats`` is ``True``, the implementation recognizes additional
glyph names (as required by the AGL specification). glyph names (as required by the AGL specification).
""" """
# https://github.com/adobe-type-tools/agl-specification#2-the-mapping # https://github.com/adobe-type-tools/agl-specification#2-the-mapping
# #
# 1. Drop all the characters from the glyph name starting with # 1. Drop all the characters from the glyph name starting with
# the first occurrence of a period (U+002E; FULL STOP), if any. # the first occurrence of a period (U+002E; FULL STOP), if any.
glyph = glyph.split(".", 1)[0] glyph = glyph.split(".", 1)[0]
# 2. Split the remaining string into a sequence of components, # 2. Split the remaining string into a sequence of components,
# using underscore (U+005F; LOW LINE) as the delimiter. # using underscore (U+005F; LOW LINE) as the delimiter.
components = glyph.split("_") components = glyph.split("_")
# 3. Map each component to a character string according to the # 3. Map each component to a character string according to the
# procedure below, and concatenate those strings; the result # procedure below, and concatenate those strings; the result
# is the character string to which the glyph name is mapped. # is the character string to which the glyph name is mapped.
result = [_glyphComponentToUnicode(c, isZapfDingbats) result = [_glyphComponentToUnicode(c, isZapfDingbats) for c in components]
for c in components] return "".join(result)
return "".join(result)
def _glyphComponentToUnicode(component, isZapfDingbats): def _glyphComponentToUnicode(component, isZapfDingbats):
# If the font is Zapf Dingbats (PostScript FontName: ZapfDingbats), # If the font is Zapf Dingbats (PostScript FontName: ZapfDingbats),
# and the component is in the ITC Zapf Dingbats Glyph List, then # and the component is in the ITC Zapf Dingbats Glyph List, then
# map it to the corresponding character in that list. # map it to the corresponding character in that list.
dingbat = _zapfDingbatsToUnicode(component) if isZapfDingbats else None dingbat = _zapfDingbatsToUnicode(component) if isZapfDingbats else None
if dingbat: if dingbat:
return dingbat return dingbat
# Otherwise, if the component is in AGL, then map it # Otherwise, if the component is in AGL, then map it
# to the corresponding character in that list. # to the corresponding character in that list.
uchars = LEGACY_AGL2UV.get(component) uchars = LEGACY_AGL2UV.get(component)
if uchars: if uchars:
return "".join(map(chr, uchars)) return "".join(map(chr, uchars))
# Otherwise, if the component is of the form "uni" (U+0075, # Otherwise, if the component is of the form "uni" (U+0075,
# U+006E, and U+0069) followed by a sequence of uppercase # U+006E, and U+0069) followed by a sequence of uppercase
# hexadecimal digits (09 and AF, meaning U+0030 through # hexadecimal digits (09 and AF, meaning U+0030 through
# U+0039 and U+0041 through U+0046), if the length of that # U+0039 and U+0041 through U+0046), if the length of that
# sequence is a multiple of four, and if each group of four # sequence is a multiple of four, and if each group of four
# digits represents a value in the ranges 0000 through D7FF # digits represents a value in the ranges 0000 through D7FF
# or E000 through FFFF, then interpret each as a Unicode scalar # or E000 through FFFF, then interpret each as a Unicode scalar
# value and map the component to the string made of those # value and map the component to the string made of those
# scalar values. Note that the range and digit-length # scalar values. Note that the range and digit-length
# restrictions mean that the "uni" glyph name prefix can be # restrictions mean that the "uni" glyph name prefix can be
# used only with UVs in the Basic Multilingual Plane (BMP). # used only with UVs in the Basic Multilingual Plane (BMP).
uni = _uniToUnicode(component) uni = _uniToUnicode(component)
if uni: if uni:
return uni return uni
# Otherwise, if the component is of the form "u" (U+0075) # Otherwise, if the component is of the form "u" (U+0075)
# followed by a sequence of four to six uppercase hexadecimal # followed by a sequence of four to six uppercase hexadecimal
# digits (09 and AF, meaning U+0030 through U+0039 and # digits (09 and AF, meaning U+0030 through U+0039 and
# U+0041 through U+0046), and those digits represents a value # U+0041 through U+0046), and those digits represents a value
# in the ranges 0000 through D7FF or E000 through 10FFFF, then # in the ranges 0000 through D7FF or E000 through 10FFFF, then
# interpret it as a Unicode scalar value and map the component # interpret it as a Unicode scalar value and map the component
# to the string made of this scalar value. # to the string made of this scalar value.
uni = _uToUnicode(component) uni = _uToUnicode(component)
if uni: if uni:
return uni return uni
# Otherwise, map the component to an empty string. # Otherwise, map the component to an empty string.
return '' return ""
# https://github.com/adobe-type-tools/agl-aglfn/blob/master/zapfdingbats.txt # https://github.com/adobe-type-tools/agl-aglfn/blob/master/zapfdingbats.txt
_AGL_ZAPF_DINGBATS = ( _AGL_ZAPF_DINGBATS = (
" ✁✂✄☎✆✝✞✟✠✡☛☞✌✍✎✏✑✒✓✔✕✖✗✘✙✚✛✜✢✣✤✥✦✧★✩✪✫✬✭✮✯✰✱✲✳✴✵✶✷✸✹✺✻✼✽✾✿❀" " ✁✂✄☎✆✝✞✟✠✡☛☞✌✍✎✏✑✒✓✔✕✖✗✘✙✚✛✜✢✣✤✥✦✧★✩✪✫✬✭✮✯✰✱✲✳✴✵✶✷✸✹✺✻✼✽✾✿❀"
"❁❂❃❄❅❆❇❈❉❊❋●❍■❏❑▲▼◆❖ ◗❘❙❚❯❱❲❳❨❩❬❭❪❫❴❵❛❜❝❞❡❢❣❤✐❥❦❧♠♥♦♣ ✉✈✇" "❁❂❃❄❅❆❇❈❉❊❋●❍■❏❑▲▼◆❖ ◗❘❙❚❯❱❲❳❨❩❬❭❪❫❴❵❛❜❝❞❡❢❣❤✐❥❦❧♠♥♦♣ ✉✈✇"
"①②③④⑤⑥⑦⑧⑨⑩❶❷❸❹❺❻❼❽❾❿➀➁➂➃➄➅➆➇➈➉➊➋➌➍➎➏➐➑➒➓➔→➣↔" "①②③④⑤⑥⑦⑧⑨⑩❶❷❸❹❺❻❼❽❾❿➀➁➂➃➄➅➆➇➈➉➊➋➌➍➎➏➐➑➒➓➔→➣↔"
"↕➙➛➜➝➞➟➠➡➢➤➥➦➧➨➩➫➭➯➲➳➵➸➺➻➼➽➾➚➪➶➹➘➴➷➬➮➱✃❐❒❮❰") "↕➙➛➜➝➞➟➠➡➢➤➥➦➧➨➩➫➭➯➲➳➵➸➺➻➼➽➾➚➪➶➹➘➴➷➬➮➱✃❐❒❮❰"
)
def _zapfDingbatsToUnicode(glyph): def _zapfDingbatsToUnicode(glyph):
"""Helper for toUnicode().""" """Helper for toUnicode()."""
if len(glyph) < 2 or glyph[0] != 'a': if len(glyph) < 2 or glyph[0] != "a":
return None return None
try: try:
gid = int(glyph[1:]) gid = int(glyph[1:])
except ValueError: except ValueError:
return None return None
if gid < 0 or gid >= len(_AGL_ZAPF_DINGBATS): if gid < 0 or gid >= len(_AGL_ZAPF_DINGBATS):
return None return None
uchar = _AGL_ZAPF_DINGBATS[gid] uchar = _AGL_ZAPF_DINGBATS[gid]
return uchar if uchar != ' ' else None return uchar if uchar != " " else None
_re_uni = re.compile("^uni([0-9A-F]+)$") _re_uni = re.compile("^uni([0-9A-F]+)$")
def _uniToUnicode(component): def _uniToUnicode(component):
"""Helper for toUnicode() to handle "uniABCD" components.""" """Helper for toUnicode() to handle "uniABCD" components."""
match = _re_uni.match(component) match = _re_uni.match(component)
if match is None: if match is None:
return None return None
digits = match.group(1) digits = match.group(1)
if len(digits) % 4 != 0: if len(digits) % 4 != 0:
return None return None
chars = [int(digits[i : i + 4], 16) chars = [int(digits[i : i + 4], 16) for i in range(0, len(digits), 4)]
for i in range(0, len(digits), 4)] if any(c >= 0xD800 and c <= 0xDFFF for c in chars):
if any(c >= 0xD800 and c <= 0xDFFF for c in chars): # The AGL specification explicitly excluded surrogate pairs.
# The AGL specification explicitly excluded surrogate pairs. return None
return None return "".join([chr(c) for c in chars])
return ''.join([chr(c) for c in chars])
_re_u = re.compile("^u([0-9A-F]{4,6})$") _re_u = re.compile("^u([0-9A-F]{4,6})$")
def _uToUnicode(component): def _uToUnicode(component):
"""Helper for toUnicode() to handle "u1ABCD" components.""" """Helper for toUnicode() to handle "u1ABCD" components."""
match = _re_u.match(component) match = _re_u.match(component)
if match is None: if match is None:
return None return None
digits = match.group(1) digits = match.group(1)
try: try:
value = int(digits, 16) value = int(digits, 16)
except ValueError: except ValueError:
return None return None
if ((value >= 0x0000 and value <= 0xD7FF) or if (value >= 0x0000 and value <= 0xD7FF) or (value >= 0xE000 and value <= 0x10FFFF):
(value >= 0xE000 and value <= 0x10FFFF)): return chr(value)
return chr(value) return None
return None

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -14,170 +14,196 @@ from functools import reduce
class missingdict(dict): class missingdict(dict):
def __init__(self, missing_func): def __init__(self, missing_func):
self.missing_func = missing_func self.missing_func = missing_func
def __missing__(self, v):
return self.missing_func(v) def __missing__(self, v):
return self.missing_func(v)
def cumSum(f, op=add, start=0, decreasing=False): def cumSum(f, op=add, start=0, decreasing=False):
keys = sorted(f.keys()) keys = sorted(f.keys())
minx, maxx = keys[0], keys[-1] minx, maxx = keys[0], keys[-1]
total = reduce(op, f.values(), start) total = reduce(op, f.values(), start)
if decreasing: if decreasing:
missing = lambda x: start if x > maxx else total missing = lambda x: start if x > maxx else total
domain = range(maxx, minx - 1, -1) domain = range(maxx, minx - 1, -1)
else: else:
missing = lambda x: start if x < minx else total missing = lambda x: start if x < minx else total
domain = range(minx, maxx + 1) domain = range(minx, maxx + 1)
out = missingdict(missing) out = missingdict(missing)
v = start v = start
for x in domain: for x in domain:
v = op(v, f[x]) v = op(v, f[x])
out[x] = v out[x] = v
return out
return out
def byteCost(widths, default, nominal): def byteCost(widths, default, nominal):
if not hasattr(widths, 'items'): if not hasattr(widths, "items"):
d = defaultdict(int) d = defaultdict(int)
for w in widths: for w in widths:
d[w] += 1 d[w] += 1
widths = d widths = d
cost = 0 cost = 0
for w,freq in widths.items(): for w, freq in widths.items():
if w == default: continue if w == default:
diff = abs(w - nominal) continue
if diff <= 107: diff = abs(w - nominal)
cost += freq if diff <= 107:
elif diff <= 1131: cost += freq
cost += freq * 2 elif diff <= 1131:
else: cost += freq * 2
cost += freq * 5 else:
return cost cost += freq * 5
return cost
def optimizeWidthsBruteforce(widths): def optimizeWidthsBruteforce(widths):
"""Bruteforce version. Veeeeeeeeeeeeeeeeery slow. Only works for smallests of fonts.""" """Bruteforce version. Veeeeeeeeeeeeeeeeery slow. Only works for smallests of fonts."""
d = defaultdict(int) d = defaultdict(int)
for w in widths: for w in widths:
d[w] += 1 d[w] += 1
# Maximum number of bytes using default can possibly save # Maximum number of bytes using default can possibly save
maxDefaultAdvantage = 5 * max(d.values()) maxDefaultAdvantage = 5 * max(d.values())
minw, maxw = min(widths), max(widths) minw, maxw = min(widths), max(widths)
domain = list(range(minw, maxw+1)) domain = list(range(minw, maxw + 1))
bestCostWithoutDefault = min(byteCost(widths, None, nominal) for nominal in domain) bestCostWithoutDefault = min(byteCost(widths, None, nominal) for nominal in domain)
bestCost = len(widths) * 5 + 1 bestCost = len(widths) * 5 + 1
for nominal in domain: for nominal in domain:
if byteCost(widths, None, nominal) > bestCost + maxDefaultAdvantage: if byteCost(widths, None, nominal) > bestCost + maxDefaultAdvantage:
continue continue
for default in domain: for default in domain:
cost = byteCost(widths, default, nominal) cost = byteCost(widths, default, nominal)
if cost < bestCost: if cost < bestCost:
bestCost = cost bestCost = cost
bestDefault = default bestDefault = default
bestNominal = nominal bestNominal = nominal
return bestDefault, bestNominal return bestDefault, bestNominal
def optimizeWidths(widths): def optimizeWidths(widths):
"""Given a list of glyph widths, or dictionary mapping glyph width to number of """Given a list of glyph widths, or dictionary mapping glyph width to number of
glyphs having that, returns a tuple of best CFF default and nominal glyph widths. glyphs having that, returns a tuple of best CFF default and nominal glyph widths.
This algorithm is linear in UPEM+numGlyphs.""" This algorithm is linear in UPEM+numGlyphs."""
if not hasattr(widths, 'items'): if not hasattr(widths, "items"):
d = defaultdict(int) d = defaultdict(int)
for w in widths: for w in widths:
d[w] += 1 d[w] += 1
widths = d widths = d
keys = sorted(widths.keys()) keys = sorted(widths.keys())
minw, maxw = keys[0], keys[-1] minw, maxw = keys[0], keys[-1]
domain = list(range(minw, maxw+1)) domain = list(range(minw, maxw + 1))
# Cumulative sum/max forward/backward. # Cumulative sum/max forward/backward.
cumFrqU = cumSum(widths, op=add) cumFrqU = cumSum(widths, op=add)
cumMaxU = cumSum(widths, op=max) cumMaxU = cumSum(widths, op=max)
cumFrqD = cumSum(widths, op=add, decreasing=True) cumFrqD = cumSum(widths, op=add, decreasing=True)
cumMaxD = cumSum(widths, op=max, decreasing=True) cumMaxD = cumSum(widths, op=max, decreasing=True)
# Cost per nominal choice, without default consideration. # Cost per nominal choice, without default consideration.
nomnCostU = missingdict(lambda x: cumFrqU[x] + cumFrqU[x-108] + cumFrqU[x-1132]*3) nomnCostU = missingdict(
nomnCostD = missingdict(lambda x: cumFrqD[x] + cumFrqD[x+108] + cumFrqD[x+1132]*3) lambda x: cumFrqU[x] + cumFrqU[x - 108] + cumFrqU[x - 1132] * 3
nomnCost = missingdict(lambda x: nomnCostU[x] + nomnCostD[x] - widths[x]) )
nomnCostD = missingdict(
lambda x: cumFrqD[x] + cumFrqD[x + 108] + cumFrqD[x + 1132] * 3
)
nomnCost = missingdict(lambda x: nomnCostU[x] + nomnCostD[x] - widths[x])
# Cost-saving per nominal choice, by best default choice. # Cost-saving per nominal choice, by best default choice.
dfltCostU = missingdict(lambda x: max(cumMaxU[x], cumMaxU[x-108]*2, cumMaxU[x-1132]*5)) dfltCostU = missingdict(
dfltCostD = missingdict(lambda x: max(cumMaxD[x], cumMaxD[x+108]*2, cumMaxD[x+1132]*5)) lambda x: max(cumMaxU[x], cumMaxU[x - 108] * 2, cumMaxU[x - 1132] * 5)
dfltCost = missingdict(lambda x: max(dfltCostU[x], dfltCostD[x])) )
dfltCostD = missingdict(
lambda x: max(cumMaxD[x], cumMaxD[x + 108] * 2, cumMaxD[x + 1132] * 5)
)
dfltCost = missingdict(lambda x: max(dfltCostU[x], dfltCostD[x]))
# Combined cost per nominal choice. # Combined cost per nominal choice.
bestCost = missingdict(lambda x: nomnCost[x] - dfltCost[x]) bestCost = missingdict(lambda x: nomnCost[x] - dfltCost[x])
# Best nominal. # Best nominal.
nominal = min(domain, key=lambda x: bestCost[x]) nominal = min(domain, key=lambda x: bestCost[x])
# Work back the best default. # Work back the best default.
bestC = bestCost[nominal] bestC = bestCost[nominal]
dfltC = nomnCost[nominal] - bestCost[nominal] dfltC = nomnCost[nominal] - bestCost[nominal]
ends = [] ends = []
if dfltC == dfltCostU[nominal]: if dfltC == dfltCostU[nominal]:
starts = [nominal, nominal-108, nominal-1132] starts = [nominal, nominal - 108, nominal - 1132]
for start in starts: for start in starts:
while cumMaxU[start] and cumMaxU[start] == cumMaxU[start-1]: while cumMaxU[start] and cumMaxU[start] == cumMaxU[start - 1]:
start -= 1 start -= 1
ends.append(start) ends.append(start)
else: else:
starts = [nominal, nominal+108, nominal+1132] starts = [nominal, nominal + 108, nominal + 1132]
for start in starts: for start in starts:
while cumMaxD[start] and cumMaxD[start] == cumMaxD[start+1]: while cumMaxD[start] and cumMaxD[start] == cumMaxD[start + 1]:
start += 1 start += 1
ends.append(start) ends.append(start)
default = min(ends, key=lambda default: byteCost(widths, default, nominal)) default = min(ends, key=lambda default: byteCost(widths, default, nominal))
return default, nominal
return default, nominal
def main(args=None): def main(args=None):
"""Calculate optimum defaultWidthX/nominalWidthX values""" """Calculate optimum defaultWidthX/nominalWidthX values"""
import argparse import argparse
parser = argparse.ArgumentParser(
"fonttools cffLib.width",
description=main.__doc__,
)
parser.add_argument('inputs', metavar='FILE', type=str, nargs='+',
help="Input TTF files")
parser.add_argument('-b', '--brute-force', dest="brute", action="store_true",
help="Use brute-force approach (VERY slow)")
args = parser.parse_args(args) parser = argparse.ArgumentParser(
"fonttools cffLib.width",
description=main.__doc__,
)
parser.add_argument(
"inputs", metavar="FILE", type=str, nargs="+", help="Input TTF files"
)
parser.add_argument(
"-b",
"--brute-force",
dest="brute",
action="store_true",
help="Use brute-force approach (VERY slow)",
)
for fontfile in args.inputs: args = parser.parse_args(args)
font = TTFont(fontfile)
hmtx = font['hmtx']
widths = [m[0] for m in hmtx.metrics.values()]
if args.brute:
default, nominal = optimizeWidthsBruteforce(widths)
else:
default, nominal = optimizeWidths(widths)
print("glyphs=%d default=%d nominal=%d byteCost=%d" % (len(widths), default, nominal, byteCost(widths, default, nominal)))
if __name__ == '__main__': for fontfile in args.inputs:
import sys font = TTFont(fontfile)
if len(sys.argv) == 1: hmtx = font["hmtx"]
import doctest widths = [m[0] for m in hmtx.metrics.values()]
sys.exit(doctest.testmod().failed) if args.brute:
main() default, nominal = optimizeWidthsBruteforce(widths)
else:
default, nominal = optimizeWidths(widths)
print(
"glyphs=%d default=%d nominal=%d byteCost=%d"
% (len(widths), default, nominal, byteCost(widths, default, nominal))
)
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
import doctest
sys.exit(doctest.testmod().failed)
main()

View File

@ -1,3 +1,2 @@
class ColorLibError(Exception): class ColorLibError(Exception):
pass pass

View File

@ -67,9 +67,7 @@ def _split_format(cls, source):
assert isinstance( assert isinstance(
fmt, collections.abc.Hashable fmt, collections.abc.Hashable
), f"{cls} Format is not hashable: {fmt!r}" ), f"{cls} Format is not hashable: {fmt!r}"
assert ( assert fmt in cls.convertersByName, f"{cls} invalid Format: {fmt!r}"
fmt in cls.convertersByName
), f"{cls} invalid Format: {fmt!r}"
return fmt, remainder return fmt, remainder

View File

@ -4,46 +4,52 @@ from .cu2qu import *
import random import random
import timeit import timeit
MAX_ERR = 5 MAX_ERR = 0.05
def generate_curve(): def generate_curve():
return [ return [
tuple(float(random.randint(0, 2048)) for coord in range(2)) tuple(float(random.randint(0, 2048)) for coord in range(2))
for point in range(4)] for point in range(4)
]
def setup_curve_to_quadratic(): def setup_curve_to_quadratic():
return generate_curve(), MAX_ERR return generate_curve(), MAX_ERR
def setup_curves_to_quadratic(): def setup_curves_to_quadratic():
num_curves = 3 num_curves = 3
return ( return ([generate_curve() for curve in range(num_curves)], [MAX_ERR] * num_curves)
[generate_curve() for curve in range(num_curves)],
[MAX_ERR] * num_curves)
def run_benchmark(
benchmark_module, module, function, setup_suffix='', repeat=5, number=1000): def run_benchmark(module, function, setup_suffix="", repeat=5, number=1000):
setup_func = 'setup_' + function setup_func = "setup_" + function
if setup_suffix: if setup_suffix:
print('%s with %s:' % (function, setup_suffix), end='') print("%s with %s:" % (function, setup_suffix), end="")
setup_func += '_' + setup_suffix setup_func += "_" + setup_suffix
else: else:
print('%s:' % function, end='') print("%s:" % function, end="")
def wrapper(function, setup_func): def wrapper(function, setup_func):
function = globals()[function] function = globals()[function]
setup_func = globals()[setup_func] setup_func = globals()[setup_func]
def wrapped(): def wrapped():
return function(*setup_func()) return function(*setup_func())
return wrapped return wrapped
results = timeit.repeat(wrapper(function, setup_func), repeat=repeat, number=number) results = timeit.repeat(wrapper(function, setup_func), repeat=repeat, number=number)
print('\t%5.1fus' % (min(results) * 1000000. / number)) print("\t%5.1fus" % (min(results) * 1000000.0 / number))
def main(): def main():
"""Benchmark the cu2qu algorithm performance.""" """Benchmark the cu2qu algorithm performance."""
run_benchmark('cu2qu.benchmark', 'cu2qu', 'curve_to_quadratic') run_benchmark("cu2qu", "curve_to_quadratic")
run_benchmark('cu2qu.benchmark', 'cu2qu', 'curves_to_quadratic') run_benchmark("cu2qu", "curves_to_quadratic")
if __name__ == '__main__': if __name__ == "__main__":
random.seed(1) random.seed(1)
main() main()

View File

@ -37,7 +37,7 @@ def open_ufo(path):
def _font_to_quadratic(input_path, output_path=None, **kwargs): def _font_to_quadratic(input_path, output_path=None, **kwargs):
ufo = open_ufo(input_path) ufo = open_ufo(input_path)
logger.info('Converting curves for %s', input_path) logger.info("Converting curves for %s", input_path)
if font_to_quadratic(ufo, **kwargs): if font_to_quadratic(ufo, **kwargs):
logger.info("Saving %s", output_path) logger.info("Saving %s", output_path)
if output_path: if output_path:
@ -67,13 +67,13 @@ def _copytree(input_path, output_path):
def main(args=None): def main(args=None):
"""Convert a UFO font from cubic to quadratic curves""" """Convert a UFO font from cubic to quadratic curves"""
parser = argparse.ArgumentParser(prog="cu2qu") parser = argparse.ArgumentParser(prog="cu2qu")
parser.add_argument( parser.add_argument("--version", action="version", version=fontTools.__version__)
"--version", action="version", version=fontTools.__version__)
parser.add_argument( parser.add_argument(
"infiles", "infiles",
nargs="+", nargs="+",
metavar="INPUT", metavar="INPUT",
help="one or more input UFO source file(s).") help="one or more input UFO source file(s).",
)
parser.add_argument("-v", "--verbose", action="count", default=0) parser.add_argument("-v", "--verbose", action="count", default=0)
parser.add_argument( parser.add_argument(
"-e", "-e",
@ -81,19 +81,28 @@ def main(args=None):
type=float, type=float,
metavar="ERROR", metavar="ERROR",
default=None, default=None,
help="maxiumum approximation error measured in EM (default: 0.001)") help="maxiumum approximation error measured in EM (default: 0.001)",
)
parser.add_argument(
"-m",
"--mixed",
default=False,
action="store_true",
help="whether to used mixed quadratic and cubic curves",
)
parser.add_argument( parser.add_argument(
"--keep-direction", "--keep-direction",
dest="reverse_direction", dest="reverse_direction",
action="store_false", action="store_false",
help="do not reverse the contour direction") help="do not reverse the contour direction",
)
mode_parser = parser.add_mutually_exclusive_group() mode_parser = parser.add_mutually_exclusive_group()
mode_parser.add_argument( mode_parser.add_argument(
"-i", "-i",
"--interpolatable", "--interpolatable",
action="store_true", action="store_true",
help="whether curve conversion should keep interpolation compatibility" help="whether curve conversion should keep interpolation compatibility",
) )
mode_parser.add_argument( mode_parser.add_argument(
"-j", "-j",
@ -103,7 +112,8 @@ def main(args=None):
default=1, default=1,
const=_cpu_count(), const=_cpu_count(),
metavar="N", metavar="N",
help="Convert using N multiple processes (default: %(default)s)") help="Convert using N multiple processes (default: %(default)s)",
)
output_parser = parser.add_mutually_exclusive_group() output_parser = parser.add_mutually_exclusive_group()
output_parser.add_argument( output_parser.add_argument(
@ -111,14 +121,18 @@ def main(args=None):
"--output-file", "--output-file",
default=None, default=None,
metavar="OUTPUT", metavar="OUTPUT",
help=("output filename for the converted UFO. By default fonts are " help=(
"modified in place. This only works with a single input.")) "output filename for the converted UFO. By default fonts are "
"modified in place. This only works with a single input."
),
)
output_parser.add_argument( output_parser.add_argument(
"-d", "-d",
"--output-dir", "--output-dir",
default=None, default=None,
metavar="DIRECTORY", metavar="DIRECTORY",
help="output directory where to save converted UFOs") help="output directory where to save converted UFOs",
)
options = parser.parse_args(args) options = parser.parse_args(args)
@ -143,8 +157,7 @@ def main(args=None):
elif not os.path.isdir(output_dir): elif not os.path.isdir(output_dir):
parser.error("'%s' is not a directory" % output_dir) parser.error("'%s' is not a directory" % output_dir)
output_paths = [ output_paths = [
os.path.join(output_dir, os.path.basename(p)) os.path.join(output_dir, os.path.basename(p)) for p in options.infiles
for p in options.infiles
] ]
elif options.output_file: elif options.output_file:
output_paths = [options.output_file] output_paths = [options.output_file]
@ -152,12 +165,15 @@ def main(args=None):
# save in-place # save in-place
output_paths = [None] * len(options.infiles) output_paths = [None] * len(options.infiles)
kwargs = dict(dump_stats=options.verbose > 0, kwargs = dict(
max_err_em=options.conversion_error, dump_stats=options.verbose > 0,
reverse_direction=options.reverse_direction) max_err_em=options.conversion_error,
reverse_direction=options.reverse_direction,
all_quadratic=False if options.mixed else True,
)
if options.interpolatable: if options.interpolatable:
logger.info('Converting curves compatibly') logger.info("Converting curves compatibly")
ufos = [open_ufo(infile) for infile in options.infiles] ufos = [open_ufo(infile) for infile in options.infiles]
if fonts_to_quadratic(ufos, **kwargs): if fonts_to_quadratic(ufos, **kwargs):
for ufo, output_path in zip(ufos, output_paths): for ufo, output_path in zip(ufos, output_paths):
@ -171,11 +187,10 @@ def main(args=None):
if output_path: if output_path:
_copytree(input_path, output_path) _copytree(input_path, output_path)
else: else:
jobs = min(len(options.infiles), jobs = min(len(options.infiles), options.jobs) if options.jobs > 1 else 1
options.jobs) if options.jobs > 1 else 1
if jobs > 1: if jobs > 1:
func = partial(_font_to_quadratic, **kwargs) func = partial(_font_to_quadratic, **kwargs)
logger.info('Running %d parallel processes', jobs) logger.info("Running %d parallel processes", jobs)
with closing(mp.Pool(jobs)) as pool: with closing(mp.Pool(jobs)) as pool:
pool.starmap(func, zip(options.infiles, output_paths)) pool.starmap(func, zip(options.infiles, output_paths))
else: else:

View File

@ -1,5 +1,5 @@
#cython: language_level=3 # cython: language_level=3
#distutils: define_macros=CYTHON_TRACE_NOGIL=1 # distutils: define_macros=CYTHON_TRACE_NOGIL=1
# Copyright 2015 Google Inc. All Rights Reserved. # Copyright 2015 Google Inc. All Rights Reserved.
# #
@ -17,30 +17,26 @@
try: try:
import cython import cython
except ImportError:
COMPILED = cython.compiled
except (AttributeError, ImportError):
# if cython not installed, use mock module with no-op decorators and types # if cython not installed, use mock module with no-op decorators and types
from fontTools.misc import cython from fontTools.misc import cython
COMPILED = False
import math import math
from .errors import Error as Cu2QuError, ApproxNotFoundError from .errors import Error as Cu2QuError, ApproxNotFoundError
__all__ = ['curve_to_quadratic', 'curves_to_quadratic'] __all__ = ["curve_to_quadratic", "curves_to_quadratic"]
MAX_N = 100 MAX_N = 100
NAN = float("NaN") NAN = float("NaN")
if cython.compiled:
# Yep, I'm compiled.
COMPILED = True
else:
# Just a lowly interpreted script.
COMPILED = False
@cython.cfunc @cython.cfunc
@cython.inline @cython.inline
@cython.returns(cython.double) @cython.returns(cython.double)
@ -61,7 +57,9 @@ def dot(v1, v2):
@cython.cfunc @cython.cfunc
@cython.inline @cython.inline
@cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex) @cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex)
@cython.locals(_1=cython.complex, _2=cython.complex, _3=cython.complex, _4=cython.complex) @cython.locals(
_1=cython.complex, _2=cython.complex, _3=cython.complex, _4=cython.complex
)
def calc_cubic_points(a, b, c, d): def calc_cubic_points(a, b, c, d):
_1 = d _1 = d
_2 = (c / 3.0) + d _2 = (c / 3.0) + d
@ -72,7 +70,9 @@ def calc_cubic_points(a, b, c, d):
@cython.cfunc @cython.cfunc
@cython.inline @cython.inline
@cython.locals(p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex) @cython.locals(
p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex
)
@cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex) @cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex)
def calc_cubic_parameters(p0, p1, p2, p3): def calc_cubic_parameters(p0, p1, p2, p3):
c = (p1 - p0) * 3.0 c = (p1 - p0) * 3.0
@ -83,7 +83,9 @@ def calc_cubic_parameters(p0, p1, p2, p3):
@cython.cfunc @cython.cfunc
@cython.locals(p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex) @cython.locals(
p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex
)
def split_cubic_into_n_iter(p0, p1, p2, p3, n): def split_cubic_into_n_iter(p0, p1, p2, p3, n):
"""Split a cubic Bezier into n equal parts. """Split a cubic Bezier into n equal parts.
@ -112,13 +114,23 @@ def split_cubic_into_n_iter(p0, p1, p2, p3, n):
a, b = split_cubic_into_two(p0, p1, p2, p3) a, b = split_cubic_into_two(p0, p1, p2, p3)
return iter(split_cubic_into_three(*a) + split_cubic_into_three(*b)) return iter(split_cubic_into_three(*a) + split_cubic_into_three(*b))
return _split_cubic_into_n_gen(p0,p1,p2,p3,n) return _split_cubic_into_n_gen(p0, p1, p2, p3, n)
@cython.locals(p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex, n=cython.int) @cython.locals(
p0=cython.complex,
p1=cython.complex,
p2=cython.complex,
p3=cython.complex,
n=cython.int,
)
@cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex) @cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex)
@cython.locals(dt=cython.double, delta_2=cython.double, delta_3=cython.double, i=cython.int) @cython.locals(
@cython.locals(a1=cython.complex, b1=cython.complex, c1=cython.complex, d1=cython.complex) dt=cython.double, delta_2=cython.double, delta_3=cython.double, i=cython.int
)
@cython.locals(
a1=cython.complex, b1=cython.complex, c1=cython.complex, d1=cython.complex
)
def _split_cubic_into_n_gen(p0, p1, p2, p3, n): def _split_cubic_into_n_gen(p0, p1, p2, p3, n):
a, b, c, d = calc_cubic_parameters(p0, p1, p2, p3) a, b, c, d = calc_cubic_parameters(p0, p1, p2, p3)
dt = 1 / n dt = 1 / n
@ -129,13 +141,15 @@ def _split_cubic_into_n_gen(p0, p1, p2, p3, n):
t1_2 = t1 * t1 t1_2 = t1 * t1
# calc new a, b, c and d # calc new a, b, c and d
a1 = a * delta_3 a1 = a * delta_3
b1 = (3*a*t1 + b) * delta_2 b1 = (3 * a * t1 + b) * delta_2
c1 = (2*b*t1 + c + 3*a*t1_2) * dt c1 = (2 * b * t1 + c + 3 * a * t1_2) * dt
d1 = a*t1*t1_2 + b*t1_2 + c*t1 + d d1 = a * t1 * t1_2 + b * t1_2 + c * t1 + d
yield calc_cubic_points(a1, b1, c1, d1) yield calc_cubic_points(a1, b1, c1, d1)
@cython.locals(p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex) @cython.locals(
p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex
)
@cython.locals(mid=cython.complex, deriv3=cython.complex) @cython.locals(mid=cython.complex, deriv3=cython.complex)
def split_cubic_into_two(p0, p1, p2, p3): def split_cubic_into_two(p0, p1, p2, p3):
"""Split a cubic Bezier into two equal parts. """Split a cubic Bezier into two equal parts.
@ -152,15 +166,28 @@ def split_cubic_into_two(p0, p1, p2, p3):
tuple: Two cubic Beziers (each expressed as a tuple of four complex tuple: Two cubic Beziers (each expressed as a tuple of four complex
values). values).
""" """
mid = (p0 + 3 * (p1 + p2) + p3) * .125 mid = (p0 + 3 * (p1 + p2) + p3) * 0.125
deriv3 = (p3 + p2 - p1 - p0) * .125 deriv3 = (p3 + p2 - p1 - p0) * 0.125
return ((p0, (p0 + p1) * .5, mid - deriv3, mid), return (
(mid, mid + deriv3, (p2 + p3) * .5, p3)) (p0, (p0 + p1) * 0.5, mid - deriv3, mid),
(mid, mid + deriv3, (p2 + p3) * 0.5, p3),
)
@cython.locals(p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex, _27=cython.double) @cython.locals(
@cython.locals(mid1=cython.complex, deriv1=cython.complex, mid2=cython.complex, deriv2=cython.complex) p0=cython.complex,
def split_cubic_into_three(p0, p1, p2, p3, _27=1/27): p1=cython.complex,
p2=cython.complex,
p3=cython.complex,
_27=cython.double,
)
@cython.locals(
mid1=cython.complex,
deriv1=cython.complex,
mid2=cython.complex,
deriv2=cython.complex,
)
def split_cubic_into_three(p0, p1, p2, p3, _27=1 / 27):
"""Split a cubic Bezier into three equal parts. """Split a cubic Bezier into three equal parts.
Splits the curve into three equal parts at t = 1/3 and t = 2/3 Splits the curve into three equal parts at t = 1/3 and t = 2/3
@ -177,17 +204,25 @@ def split_cubic_into_three(p0, p1, p2, p3, _27=1/27):
""" """
# we define 1/27 as a keyword argument so that it will be evaluated only # we define 1/27 as a keyword argument so that it will be evaluated only
# once but still in the scope of this function # once but still in the scope of this function
mid1 = (8*p0 + 12*p1 + 6*p2 + p3) * _27 mid1 = (8 * p0 + 12 * p1 + 6 * p2 + p3) * _27
deriv1 = (p3 + 3*p2 - 4*p0) * _27 deriv1 = (p3 + 3 * p2 - 4 * p0) * _27
mid2 = (p0 + 6*p1 + 12*p2 + 8*p3) * _27 mid2 = (p0 + 6 * p1 + 12 * p2 + 8 * p3) * _27
deriv2 = (4*p3 - 3*p1 - p0) * _27 deriv2 = (4 * p3 - 3 * p1 - p0) * _27
return ((p0, (2*p0 + p1) / 3.0, mid1 - deriv1, mid1), return (
(mid1, mid1 + deriv1, mid2 - deriv2, mid2), (p0, (2 * p0 + p1) / 3.0, mid1 - deriv1, mid1),
(mid2, mid2 + deriv2, (p2 + 2*p3) / 3.0, p3)) (mid1, mid1 + deriv1, mid2 - deriv2, mid2),
(mid2, mid2 + deriv2, (p2 + 2 * p3) / 3.0, p3),
)
@cython.returns(cython.complex) @cython.returns(cython.complex)
@cython.locals(t=cython.double, p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex) @cython.locals(
t=cython.double,
p0=cython.complex,
p1=cython.complex,
p2=cython.complex,
p3=cython.complex,
)
@cython.locals(_p1=cython.complex, _p2=cython.complex) @cython.locals(_p1=cython.complex, _p2=cython.complex)
def cubic_approx_control(t, p0, p1, p2, p3): def cubic_approx_control(t, p0, p1, p2, p3):
"""Approximate a cubic Bezier using a quadratic one. """Approximate a cubic Bezier using a quadratic one.
@ -235,7 +270,13 @@ def calc_intersect(a, b, c, d):
@cython.cfunc @cython.cfunc
@cython.returns(cython.int) @cython.returns(cython.int)
@cython.locals(tolerance=cython.double, p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex) @cython.locals(
tolerance=cython.double,
p0=cython.complex,
p1=cython.complex,
p2=cython.complex,
p3=cython.complex,
)
@cython.locals(mid=cython.complex, deriv3=cython.complex) @cython.locals(mid=cython.complex, deriv3=cython.complex)
def cubic_farthest_fit_inside(p0, p1, p2, p3, tolerance): def cubic_farthest_fit_inside(p0, p1, p2, p3, tolerance):
"""Check if a cubic Bezier lies within a given distance of the origin. """Check if a cubic Bezier lies within a given distance of the origin.
@ -260,18 +301,25 @@ def cubic_farthest_fit_inside(p0, p1, p2, p3, tolerance):
return True return True
# Split. # Split.
mid = (p0 + 3 * (p1 + p2) + p3) * .125 mid = (p0 + 3 * (p1 + p2) + p3) * 0.125
if abs(mid) > tolerance: if abs(mid) > tolerance:
return False return False
deriv3 = (p3 + p2 - p1 - p0) * .125 deriv3 = (p3 + p2 - p1 - p0) * 0.125
return (cubic_farthest_fit_inside(p0, (p0+p1)*.5, mid-deriv3, mid, tolerance) and return cubic_farthest_fit_inside(
cubic_farthest_fit_inside(mid, mid+deriv3, (p2+p3)*.5, p3, tolerance)) p0, (p0 + p1) * 0.5, mid - deriv3, mid, tolerance
) and cubic_farthest_fit_inside(mid, mid + deriv3, (p2 + p3) * 0.5, p3, tolerance)
@cython.cfunc @cython.cfunc
@cython.locals(tolerance=cython.double, _2_3=cython.double) @cython.locals(tolerance=cython.double, _2_3=cython.double)
@cython.locals(q1=cython.complex, c0=cython.complex, c1=cython.complex, c2=cython.complex, c3=cython.complex) @cython.locals(
def cubic_approx_quadratic(cubic, tolerance, _2_3=2/3): q1=cython.complex,
c0=cython.complex,
c1=cython.complex,
c2=cython.complex,
c3=cython.complex,
)
def cubic_approx_quadratic(cubic, tolerance, _2_3=2 / 3):
"""Approximate a cubic Bezier with a single quadratic within a given tolerance. """Approximate a cubic Bezier with a single quadratic within a given tolerance.
Args: Args:
@ -294,10 +342,7 @@ def cubic_approx_quadratic(cubic, tolerance, _2_3=2/3):
c3 = cubic[3] c3 = cubic[3]
c1 = c0 + (q1 - c0) * _2_3 c1 = c0 + (q1 - c0) * _2_3
c2 = c3 + (q1 - c3) * _2_3 c2 = c3 + (q1 - c3) * _2_3
if not cubic_farthest_fit_inside(0, if not cubic_farthest_fit_inside(0, c1 - cubic[1], c2 - cubic[2], 0, tolerance):
c1 - cubic[1],
c2 - cubic[2],
0, tolerance):
return None return None
return c0, q1, c3 return c0, q1, c3
@ -305,9 +350,17 @@ def cubic_approx_quadratic(cubic, tolerance, _2_3=2/3):
@cython.cfunc @cython.cfunc
@cython.locals(n=cython.int, tolerance=cython.double, _2_3=cython.double) @cython.locals(n=cython.int, tolerance=cython.double, _2_3=cython.double)
@cython.locals(i=cython.int) @cython.locals(i=cython.int)
@cython.locals(c0=cython.complex, c1=cython.complex, c2=cython.complex, c3=cython.complex) @cython.locals(
@cython.locals(q0=cython.complex, q1=cython.complex, next_q1=cython.complex, q2=cython.complex, d1=cython.complex) c0=cython.complex, c1=cython.complex, c2=cython.complex, c3=cython.complex
def cubic_approx_spline(cubic, n, tolerance, _2_3=2/3): )
@cython.locals(
q0=cython.complex,
q1=cython.complex,
next_q1=cython.complex,
q2=cython.complex,
d1=cython.complex,
)
def cubic_approx_spline(cubic, n, tolerance, all_quadratic, _2_3=2 / 3):
"""Approximate a cubic Bezier curve with a spline of n quadratics. """Approximate a cubic Bezier curve with a spline of n quadratics.
Args: Args:
@ -326,6 +379,8 @@ def cubic_approx_spline(cubic, n, tolerance, _2_3=2/3):
if n == 1: if n == 1:
return cubic_approx_quadratic(cubic, tolerance) return cubic_approx_quadratic(cubic, tolerance)
if n == 2 and all_quadratic == False:
return cubic
cubics = split_cubic_into_n_iter(cubic[0], cubic[1], cubic[2], cubic[3], n) cubics = split_cubic_into_n_iter(cubic[0], cubic[1], cubic[2], cubic[3], n)
@ -335,7 +390,7 @@ def cubic_approx_spline(cubic, n, tolerance, _2_3=2/3):
q2 = cubic[0] q2 = cubic[0]
d1 = 0j d1 = 0j
spline = [cubic[0], next_q1] spline = [cubic[0], next_q1]
for i in range(1, n+1): for i in range(1, n + 1):
# Current cubic to convert # Current cubic to convert
c0, c1, c2, c3 = next_cubic c0, c1, c2, c3 = next_cubic
@ -345,9 +400,9 @@ def cubic_approx_spline(cubic, n, tolerance, _2_3=2/3):
q1 = next_q1 q1 = next_q1
if i < n: if i < n:
next_cubic = next(cubics) next_cubic = next(cubics)
next_q1 = cubic_approx_control(i / (n-1), *next_cubic) next_q1 = cubic_approx_control(i / (n - 1), *next_cubic)
spline.append(next_q1) spline.append(next_q1)
q2 = (q1 + next_q1) * .5 q2 = (q1 + next_q1) * 0.5
else: else:
q2 = c3 q2 = c3
@ -355,12 +410,9 @@ def cubic_approx_spline(cubic, n, tolerance, _2_3=2/3):
d0 = d1 d0 = d1
d1 = q2 - c3 d1 = q2 - c3
if (abs(d1) > tolerance or if abs(d1) > tolerance or not cubic_farthest_fit_inside(
not cubic_farthest_fit_inside(d0, d0, q0 + (q1 - q0) * _2_3 - c1, q2 + (q1 - q2) * _2_3 - c2, d1, tolerance
q0 + (q1 - q0) * _2_3 - c1, ):
q2 + (q1 - q2) * _2_3 - c2,
d1,
tolerance)):
return None return None
spline.append(cubic[3]) spline.append(cubic[3])
@ -369,24 +421,31 @@ def cubic_approx_spline(cubic, n, tolerance, _2_3=2/3):
@cython.locals(max_err=cython.double) @cython.locals(max_err=cython.double)
@cython.locals(n=cython.int) @cython.locals(n=cython.int)
def curve_to_quadratic(curve, max_err): def curve_to_quadratic(curve, max_err, all_quadratic=True):
"""Approximate a cubic Bezier curve with a spline of n quadratics. """Approximate a cubic Bezier curve with a spline of n quadratics.
Args: Args:
cubic (sequence): Four 2D tuples representing control points of cubic (sequence): Four 2D tuples representing control points of
the cubic Bezier curve. the cubic Bezier curve.
max_err (double): Permitted deviation from the original curve. max_err (double): Permitted deviation from the original curve.
all_quadratic (bool): If True (default) returned value is a
quadratic spline. If False, it's either a single quadratic
curve or a single cubic curve.
Returns: Returns:
A list of 2D tuples, representing control points of the quadratic If all_quadratic is True: A list of 2D tuples, representing
spline if it fits within the given tolerance, or ``None`` if no control points of the quadratic spline if it fits within the
suitable spline could be calculated. given tolerance, or ``None`` if no suitable spline could be
calculated.
If all_quadratic is False: Either a quadratic curve (if length
of output is 3), or a cubic curve (if length of output is 4).
""" """
curve = [complex(*p) for p in curve] curve = [complex(*p) for p in curve]
for n in range(1, MAX_N + 1): for n in range(1, MAX_N + 1):
spline = cubic_approx_spline(curve, n, max_err) spline = cubic_approx_spline(curve, n, max_err, all_quadratic)
if spline is not None: if spline is not None:
# done. go home # done. go home
return [(s.real, s.imag) for s in spline] return [(s.real, s.imag) for s in spline]
@ -394,9 +453,8 @@ def curve_to_quadratic(curve, max_err):
raise ApproxNotFoundError(curve) raise ApproxNotFoundError(curve)
@cython.locals(l=cython.int, last_i=cython.int, i=cython.int) @cython.locals(l=cython.int, last_i=cython.int, i=cython.int)
def curves_to_quadratic(curves, max_errors): def curves_to_quadratic(curves, max_errors, all_quadratic=True):
"""Return quadratic Bezier splines approximating the input cubic Beziers. """Return quadratic Bezier splines approximating the input cubic Beziers.
Args: Args:
@ -404,6 +462,9 @@ def curves_to_quadratic(curves, max_errors):
2D tuples. 2D tuples.
max_errors: A sequence of *n* floats representing the maximum permissible max_errors: A sequence of *n* floats representing the maximum permissible
deviation from each of the cubic Bezier curves. deviation from each of the cubic Bezier curves.
all_quadratic (bool): If True (default) returned values are a
quadratic spline. If False, they are either a single quadratic
curve or a single cubic curve.
Example:: Example::
@ -419,7 +480,11 @@ def curves_to_quadratic(curves, max_errors):
( (75 + 125)/2 , (120 + 91.666..)/2 ) = (100, 83.333...). ( (75 + 125)/2 , (120 + 91.666..)/2 ) = (100, 83.333...).
Returns: Returns:
A list of splines, each spline being a list of 2D tuples. If all_quadratic is True, a list of splines, each spline being a list
of 2D tuples.
If all_quadratic is False, a list of curves, each curve being a quadratic
(length 3), or cubic (length 4).
Raises: Raises:
fontTools.cu2qu.Errors.ApproxNotFoundError: if no suitable approximation fontTools.cu2qu.Errors.ApproxNotFoundError: if no suitable approximation
@ -434,7 +499,7 @@ def curves_to_quadratic(curves, max_errors):
last_i = i = 0 last_i = i = 0
n = 1 n = 1
while True: while True:
spline = cubic_approx_spline(curves[i], n, max_errors[i]) spline = cubic_approx_spline(curves[i], n, max_errors[i], all_quadratic)
if spline is None: if spline is None:
if n == MAX_N: if n == MAX_N:
break break
@ -448,5 +513,3 @@ def curves_to_quadratic(curves, max_errors):
return [[(s.real, s.imag) for s in spline] for spline in splines] return [[(s.real, s.imag) for s in spline] for spline in splines]
raise ApproxNotFoundError(curves) raise ApproxNotFoundError(curves)

View File

@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
class Error(Exception): class Error(Exception):
"""Base Cu2Qu exception class for all other errors.""" """Base Cu2Qu exception class for all other errors."""

View File

@ -30,12 +30,15 @@ from fontTools.pens.reverseContourPen import ReverseContourPen
from . import curves_to_quadratic from . import curves_to_quadratic
from .errors import ( from .errors import (
UnequalZipLengthsError, IncompatibleSegmentNumberError, UnequalZipLengthsError,
IncompatibleSegmentTypesError, IncompatibleGlyphsError, IncompatibleSegmentNumberError,
IncompatibleFontsError) IncompatibleSegmentTypesError,
IncompatibleGlyphsError,
IncompatibleFontsError,
)
__all__ = ['fonts_to_quadratic', 'font_to_quadratic'] __all__ = ["fonts_to_quadratic", "font_to_quadratic"]
# The default approximation error below is a relative value (1/1000 of the EM square). # The default approximation error below is a relative value (1/1000 of the EM square).
# Later on, we convert it to absolute font units by multiplying it by a font's UPEM # Later on, we convert it to absolute font units by multiplying it by a font's UPEM
@ -47,6 +50,8 @@ logger = logging.getLogger(__name__)
_zip = zip _zip = zip
def zip(*args): def zip(*args):
"""Ensure each argument to zip has the same length. Also make sure a list is """Ensure each argument to zip has the same length. Also make sure a list is
returned for python 2/3 compatibility. returned for python 2/3 compatibility.
@ -69,27 +74,27 @@ class GetSegmentsPen(AbstractPen):
self.segments = [] self.segments = []
def _add_segment(self, tag, *args): def _add_segment(self, tag, *args):
if tag in ['move', 'line', 'qcurve', 'curve']: if tag in ["move", "line", "qcurve", "curve"]:
self._last_pt = args[-1] self._last_pt = args[-1]
self.segments.append((tag, args)) self.segments.append((tag, args))
def moveTo(self, pt): def moveTo(self, pt):
self._add_segment('move', pt) self._add_segment("move", pt)
def lineTo(self, pt): def lineTo(self, pt):
self._add_segment('line', pt) self._add_segment("line", pt)
def qCurveTo(self, *points): def qCurveTo(self, *points):
self._add_segment('qcurve', self._last_pt, *points) self._add_segment("qcurve", self._last_pt, *points)
def curveTo(self, *points): def curveTo(self, *points):
self._add_segment('curve', self._last_pt, *points) self._add_segment("curve", self._last_pt, *points)
def closePath(self): def closePath(self):
self._add_segment('close') self._add_segment("close")
def endPath(self): def endPath(self):
self._add_segment('end') self._add_segment("end")
def addComponent(self, glyphName, transformation): def addComponent(self, glyphName, transformation):
pass pass
@ -122,38 +127,41 @@ def _set_segments(glyph, segments, reverse_direction):
if reverse_direction: if reverse_direction:
pen = ReverseContourPen(pen) pen = ReverseContourPen(pen)
for tag, args in segments: for tag, args in segments:
if tag == 'move': if tag == "move":
pen.moveTo(*args) pen.moveTo(*args)
elif tag == 'line': elif tag == "line":
pen.lineTo(*args) pen.lineTo(*args)
elif tag == 'curve': elif tag == "curve":
pen.curveTo(*args[1:]) pen.curveTo(*args[1:])
elif tag == 'qcurve': elif tag == "qcurve":
pen.qCurveTo(*args[1:]) pen.qCurveTo(*args[1:])
elif tag == 'close': elif tag == "close":
pen.closePath() pen.closePath()
elif tag == 'end': elif tag == "end":
pen.endPath() pen.endPath()
else: else:
raise AssertionError('Unhandled segment type "%s"' % tag) raise AssertionError('Unhandled segment type "%s"' % tag)
def _segments_to_quadratic(segments, max_err, stats): def _segments_to_quadratic(segments, max_err, stats, all_quadratic=True):
"""Return quadratic approximations of cubic segments.""" """Return quadratic approximations of cubic segments."""
assert all(s[0] == 'curve' for s in segments), 'Non-cubic given to convert' assert all(s[0] == "curve" for s in segments), "Non-cubic given to convert"
new_points = curves_to_quadratic([s[1] for s in segments], max_err) new_points = curves_to_quadratic([s[1] for s in segments], max_err, all_quadratic)
n = len(new_points[0]) n = len(new_points[0])
assert all(len(s) == n for s in new_points[1:]), 'Converted incompatibly' assert all(len(s) == n for s in new_points[1:]), "Converted incompatibly"
spline_length = str(n - 2) spline_length = str(n - 2)
stats[spline_length] = stats.get(spline_length, 0) + 1 stats[spline_length] = stats.get(spline_length, 0) + 1
return [('qcurve', p) for p in new_points] if all_quadratic or n == 3:
return [("qcurve", p) for p in new_points]
else:
return [("curve", p) for p in new_points]
def _glyphs_to_quadratic(glyphs, max_err, reverse_direction, stats): def _glyphs_to_quadratic(glyphs, max_err, reverse_direction, stats, all_quadratic=True):
"""Do the actual conversion of a set of compatible glyphs, after arguments """Do the actual conversion of a set of compatible glyphs, after arguments
have been set up. have been set up.
@ -176,9 +184,13 @@ def _glyphs_to_quadratic(glyphs, max_err, reverse_direction, stats):
tag = segments[0][0] tag = segments[0][0]
if not all(s[0] == tag for s in segments[1:]): if not all(s[0] == tag for s in segments[1:]):
incompatible[i] = [s[0] for s in segments] incompatible[i] = [s[0] for s in segments]
elif tag == 'curve': elif tag == "curve":
segments = _segments_to_quadratic(segments, max_err, stats) new_segments = _segments_to_quadratic(
glyphs_modified = True segments, max_err, stats, all_quadratic
)
if all_quadratic or new_segments != segments:
glyphs_modified = True
segments = new_segments
new_segments_by_location.append(segments) new_segments_by_location.append(segments)
if glyphs_modified: if glyphs_modified:
@ -192,7 +204,8 @@ def _glyphs_to_quadratic(glyphs, max_err, reverse_direction, stats):
def glyphs_to_quadratic( def glyphs_to_quadratic(
glyphs, max_err=None, reverse_direction=False, stats=None): glyphs, max_err=None, reverse_direction=False, stats=None, all_quadratic=True
):
"""Convert the curves of a set of compatible of glyphs to quadratic. """Convert the curves of a set of compatible of glyphs to quadratic.
All curves will be converted to quadratic at once, ensuring interpolation All curves will be converted to quadratic at once, ensuring interpolation
@ -216,12 +229,21 @@ def glyphs_to_quadratic(
max_errors = [max_err] * len(glyphs) max_errors = [max_err] * len(glyphs)
assert len(max_errors) == len(glyphs) assert len(max_errors) == len(glyphs)
return _glyphs_to_quadratic(glyphs, max_errors, reverse_direction, stats) return _glyphs_to_quadratic(
glyphs, max_errors, reverse_direction, stats, all_quadratic
)
def fonts_to_quadratic( def fonts_to_quadratic(
fonts, max_err_em=None, max_err=None, reverse_direction=False, fonts,
stats=None, dump_stats=False, remember_curve_type=True): max_err_em=None,
max_err=None,
reverse_direction=False,
stats=None,
dump_stats=False,
remember_curve_type=True,
all_quadratic=True,
):
"""Convert the curves of a collection of fonts to quadratic. """Convert the curves of a collection of fonts to quadratic.
All curves will be converted to quadratic at once, ensuring interpolation All curves will be converted to quadratic at once, ensuring interpolation
@ -243,7 +265,7 @@ def fonts_to_quadratic(
curve_types = {f.lib.get(CURVE_TYPE_LIB_KEY, "cubic") for f in fonts} curve_types = {f.lib.get(CURVE_TYPE_LIB_KEY, "cubic") for f in fonts}
if len(curve_types) == 1: if len(curve_types) == 1:
curve_type = next(iter(curve_types)) curve_type = next(iter(curve_types))
if curve_type == "quadratic": if curve_type in ("quadratic", "mixed"):
logger.info("Curves already converted to quadratic") logger.info("Curves already converted to quadratic")
return False return False
elif curve_type == "cubic": elif curve_type == "cubic":
@ -258,7 +280,7 @@ def fonts_to_quadratic(
stats = {} stats = {}
if max_err_em and max_err: if max_err_em and max_err:
raise TypeError('Only one of max_err and max_err_em can be specified.') raise TypeError("Only one of max_err and max_err_em can be specified.")
if not (max_err_em or max_err): if not (max_err_em or max_err):
max_err_em = DEFAULT_MAX_ERR max_err_em = DEFAULT_MAX_ERR
@ -270,8 +292,7 @@ def fonts_to_quadratic(
if isinstance(max_err_em, (list, tuple)): if isinstance(max_err_em, (list, tuple)):
assert len(fonts) == len(max_err_em) assert len(fonts) == len(max_err_em)
max_errors = [f.info.unitsPerEm * e max_errors = [f.info.unitsPerEm * e for f, e in zip(fonts, max_err_em)]
for f, e in zip(fonts, max_err_em)]
elif max_err_em: elif max_err_em:
max_errors = [f.info.unitsPerEm * max_err_em for f in fonts] max_errors = [f.info.unitsPerEm * max_err_em for f in fonts]
@ -286,7 +307,8 @@ def fonts_to_quadratic(
cur_max_errors.append(error) cur_max_errors.append(error)
try: try:
modified |= _glyphs_to_quadratic( modified |= _glyphs_to_quadratic(
glyphs, cur_max_errors, reverse_direction, stats) glyphs, cur_max_errors, reverse_direction, stats, all_quadratic
)
except IncompatibleGlyphsError as exc: except IncompatibleGlyphsError as exc:
logger.error(exc) logger.error(exc)
glyph_errors[name] = exc glyph_errors[name] = exc
@ -296,14 +318,17 @@ def fonts_to_quadratic(
if modified and dump_stats: if modified and dump_stats:
spline_lengths = sorted(stats.keys()) spline_lengths = sorted(stats.keys())
logger.info('New spline lengths: %s' % (', '.join( logger.info(
'%s: %d' % (l, stats[l]) for l in spline_lengths))) "New spline lengths: %s"
% (", ".join("%s: %d" % (l, stats[l]) for l in spline_lengths))
)
if remember_curve_type: if remember_curve_type:
for font in fonts: for font in fonts:
curve_type = font.lib.get(CURVE_TYPE_LIB_KEY, "cubic") curve_type = font.lib.get(CURVE_TYPE_LIB_KEY, "cubic")
if curve_type != "quadratic": new_curve_type = "quadratic" if all_quadratic else "mixed"
font.lib[CURVE_TYPE_LIB_KEY] = "quadratic" if curve_type != new_curve_type:
font.lib[CURVE_TYPE_LIB_KEY] = new_curve_type
modified = True modified = True
return modified return modified

File diff suppressed because it is too large Load Diff

View File

@ -1,36 +1,258 @@
MacRoman = [ MacRoman = [
'NUL', 'Eth', 'eth', 'Lslash', 'lslash', 'Scaron', 'scaron', 'Yacute', "NUL",
'yacute', 'HT', 'LF', 'Thorn', 'thorn', 'CR', 'Zcaron', 'zcaron', 'DLE', 'DC1', "Eth",
'DC2', 'DC3', 'DC4', 'onehalf', 'onequarter', 'onesuperior', 'threequarters', "eth",
'threesuperior', 'twosuperior', 'brokenbar', 'minus', 'multiply', 'RS', 'US', "Lslash",
'space', 'exclam', 'quotedbl', 'numbersign', 'dollar', 'percent', 'ampersand', "lslash",
'quotesingle', 'parenleft', 'parenright', 'asterisk', 'plus', 'comma', "Scaron",
'hyphen', 'period', 'slash', 'zero', 'one', 'two', 'three', 'four', 'five', "scaron",
'six', 'seven', 'eight', 'nine', 'colon', 'semicolon', 'less', 'equal', "Yacute",
'greater', 'question', 'at', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', "yacute",
'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', "HT",
'bracketleft', 'backslash', 'bracketright', 'asciicircum', 'underscore', "LF",
'grave', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', "Thorn",
'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'braceleft', 'bar', "thorn",
'braceright', 'asciitilde', 'DEL', 'Adieresis', 'Aring', 'Ccedilla', 'Eacute', "CR",
'Ntilde', 'Odieresis', 'Udieresis', 'aacute', 'agrave', 'acircumflex', "Zcaron",
'adieresis', 'atilde', 'aring', 'ccedilla', 'eacute', 'egrave', 'ecircumflex', "zcaron",
'edieresis', 'iacute', 'igrave', 'icircumflex', 'idieresis', 'ntilde', "DLE",
'oacute', 'ograve', 'ocircumflex', 'odieresis', 'otilde', 'uacute', 'ugrave', "DC1",
'ucircumflex', 'udieresis', 'dagger', 'degree', 'cent', 'sterling', 'section', "DC2",
'bullet', 'paragraph', 'germandbls', 'registered', 'copyright', 'trademark', "DC3",
'acute', 'dieresis', 'notequal', 'AE', 'Oslash', 'infinity', 'plusminus', "DC4",
'lessequal', 'greaterequal', 'yen', 'mu', 'partialdiff', 'summation', "onehalf",
'product', 'pi', 'integral', 'ordfeminine', 'ordmasculine', 'Omega', 'ae', "onequarter",
'oslash', 'questiondown', 'exclamdown', 'logicalnot', 'radical', 'florin', "onesuperior",
'approxequal', 'Delta', 'guillemotleft', 'guillemotright', 'ellipsis', "threequarters",
'nbspace', 'Agrave', 'Atilde', 'Otilde', 'OE', 'oe', 'endash', 'emdash', "threesuperior",
'quotedblleft', 'quotedblright', 'quoteleft', 'quoteright', 'divide', 'lozenge', "twosuperior",
'ydieresis', 'Ydieresis', 'fraction', 'currency', 'guilsinglleft', "brokenbar",
'guilsinglright', 'fi', 'fl', 'daggerdbl', 'periodcentered', 'quotesinglbase', "minus",
'quotedblbase', 'perthousand', 'Acircumflex', 'Ecircumflex', 'Aacute', "multiply",
'Edieresis', 'Egrave', 'Iacute', 'Icircumflex', 'Idieresis', 'Igrave', 'Oacute', "RS",
'Ocircumflex', 'apple', 'Ograve', 'Uacute', 'Ucircumflex', 'Ugrave', 'dotlessi', "US",
'circumflex', 'tilde', 'macron', 'breve', 'dotaccent', 'ring', 'cedilla', "space",
'hungarumlaut', 'ogonek', 'caron' "exclam",
] "quotedbl",
"numbersign",
"dollar",
"percent",
"ampersand",
"quotesingle",
"parenleft",
"parenright",
"asterisk",
"plus",
"comma",
"hyphen",
"period",
"slash",
"zero",
"one",
"two",
"three",
"four",
"five",
"six",
"seven",
"eight",
"nine",
"colon",
"semicolon",
"less",
"equal",
"greater",
"question",
"at",
"A",
"B",
"C",
"D",
"E",
"F",
"G",
"H",
"I",
"J",
"K",
"L",
"M",
"N",
"O",
"P",
"Q",
"R",
"S",
"T",
"U",
"V",
"W",
"X",
"Y",
"Z",
"bracketleft",
"backslash",
"bracketright",
"asciicircum",
"underscore",
"grave",
"a",
"b",
"c",
"d",
"e",
"f",
"g",
"h",
"i",
"j",
"k",
"l",
"m",
"n",
"o",
"p",
"q",
"r",
"s",
"t",
"u",
"v",
"w",
"x",
"y",
"z",
"braceleft",
"bar",
"braceright",
"asciitilde",
"DEL",
"Adieresis",
"Aring",
"Ccedilla",
"Eacute",
"Ntilde",
"Odieresis",
"Udieresis",
"aacute",
"agrave",
"acircumflex",
"adieresis",
"atilde",
"aring",
"ccedilla",
"eacute",
"egrave",
"ecircumflex",
"edieresis",
"iacute",
"igrave",
"icircumflex",
"idieresis",
"ntilde",
"oacute",
"ograve",
"ocircumflex",
"odieresis",
"otilde",
"uacute",
"ugrave",
"ucircumflex",
"udieresis",
"dagger",
"degree",
"cent",
"sterling",
"section",
"bullet",
"paragraph",
"germandbls",
"registered",
"copyright",
"trademark",
"acute",
"dieresis",
"notequal",
"AE",
"Oslash",
"infinity",
"plusminus",
"lessequal",
"greaterequal",
"yen",
"mu",
"partialdiff",
"summation",
"product",
"pi",
"integral",
"ordfeminine",
"ordmasculine",
"Omega",
"ae",
"oslash",
"questiondown",
"exclamdown",
"logicalnot",
"radical",
"florin",
"approxequal",
"Delta",
"guillemotleft",
"guillemotright",
"ellipsis",
"nbspace",
"Agrave",
"Atilde",
"Otilde",
"OE",
"oe",
"endash",
"emdash",
"quotedblleft",
"quotedblright",
"quoteleft",
"quoteright",
"divide",
"lozenge",
"ydieresis",
"Ydieresis",
"fraction",
"currency",
"guilsinglleft",
"guilsinglright",
"fi",
"fl",
"daggerdbl",
"periodcentered",
"quotesinglbase",
"quotedblbase",
"perthousand",
"Acircumflex",
"Ecircumflex",
"Aacute",
"Edieresis",
"Egrave",
"Iacute",
"Icircumflex",
"Idieresis",
"Igrave",
"Oacute",
"Ocircumflex",
"apple",
"Ograve",
"Uacute",
"Ucircumflex",
"Ugrave",
"dotlessi",
"circumflex",
"tilde",
"macron",
"breve",
"dotaccent",
"ring",
"cedilla",
"hungarumlaut",
"ogonek",
"caron",
]

View File

@ -1,48 +1,258 @@
StandardEncoding = [ StandardEncoding = [
'.notdef', '.notdef', '.notdef', '.notdef', '.notdef', ".notdef",
'.notdef', '.notdef', '.notdef', '.notdef', '.notdef', ".notdef",
'.notdef', '.notdef', '.notdef', '.notdef', '.notdef', ".notdef",
'.notdef', '.notdef', '.notdef', '.notdef', '.notdef', ".notdef",
'.notdef', '.notdef', '.notdef', '.notdef', '.notdef', ".notdef",
'.notdef', '.notdef', '.notdef', '.notdef', '.notdef', ".notdef",
'.notdef', '.notdef', 'space', 'exclam', 'quotedbl', ".notdef",
'numbersign', 'dollar', 'percent', 'ampersand', ".notdef",
'quoteright', 'parenleft', 'parenright', 'asterisk', 'plus', ".notdef",
'comma', 'hyphen', 'period', 'slash', 'zero', 'one', 'two', ".notdef",
'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', ".notdef",
'colon', 'semicolon', 'less', 'equal', 'greater', ".notdef",
'question', 'at', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', ".notdef",
'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', ".notdef",
'U', 'V', 'W', 'X', 'Y', 'Z', 'bracketleft', 'backslash', ".notdef",
'bracketright', 'asciicircum', 'underscore', 'quoteleft', ".notdef",
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', ".notdef",
'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', ".notdef",
'y', 'z', 'braceleft', 'bar', 'braceright', 'asciitilde', ".notdef",
'.notdef', '.notdef', '.notdef', '.notdef', '.notdef', ".notdef",
'.notdef', '.notdef', '.notdef', '.notdef', '.notdef', ".notdef",
'.notdef', '.notdef', '.notdef', '.notdef', '.notdef', ".notdef",
'.notdef', '.notdef', '.notdef', '.notdef', '.notdef', ".notdef",
'.notdef', '.notdef', '.notdef', '.notdef', '.notdef', ".notdef",
'.notdef', '.notdef', '.notdef', '.notdef', '.notdef', ".notdef",
'.notdef', '.notdef', '.notdef', '.notdef', 'exclamdown', ".notdef",
'cent', 'sterling', 'fraction', 'yen', 'florin', 'section', ".notdef",
'currency', 'quotesingle', 'quotedblleft', 'guillemotleft', ".notdef",
'guilsinglleft', 'guilsinglright', 'fi', 'fl', '.notdef', ".notdef",
'endash', 'dagger', 'daggerdbl', 'periodcentered', ".notdef",
'.notdef', 'paragraph', 'bullet', 'quotesinglbase', ".notdef",
'quotedblbase', 'quotedblright', 'guillemotright', ".notdef",
'ellipsis', 'perthousand', '.notdef', 'questiondown', "space",
'.notdef', 'grave', 'acute', 'circumflex', 'tilde', "exclam",
'macron', 'breve', 'dotaccent', 'dieresis', '.notdef', "quotedbl",
'ring', 'cedilla', '.notdef', 'hungarumlaut', 'ogonek', "numbersign",
'caron', 'emdash', '.notdef', '.notdef', '.notdef', "dollar",
'.notdef', '.notdef', '.notdef', '.notdef', '.notdef', "percent",
'.notdef', '.notdef', '.notdef', '.notdef', '.notdef', "ampersand",
'.notdef', '.notdef', '.notdef', 'AE', '.notdef', "quoteright",
'ordfeminine', '.notdef', '.notdef', '.notdef', '.notdef', "parenleft",
'Lslash', 'Oslash', 'OE', 'ordmasculine', '.notdef', "parenright",
'.notdef', '.notdef', '.notdef', '.notdef', 'ae', '.notdef', "asterisk",
'.notdef', '.notdef', 'dotlessi', '.notdef', '.notdef', "plus",
'lslash', 'oslash', 'oe', 'germandbls', '.notdef', "comma",
'.notdef', '.notdef', '.notdef' "hyphen",
] "period",
"slash",
"zero",
"one",
"two",
"three",
"four",
"five",
"six",
"seven",
"eight",
"nine",
"colon",
"semicolon",
"less",
"equal",
"greater",
"question",
"at",
"A",
"B",
"C",
"D",
"E",
"F",
"G",
"H",
"I",
"J",
"K",
"L",
"M",
"N",
"O",
"P",
"Q",
"R",
"S",
"T",
"U",
"V",
"W",
"X",
"Y",
"Z",
"bracketleft",
"backslash",
"bracketright",
"asciicircum",
"underscore",
"quoteleft",
"a",
"b",
"c",
"d",
"e",
"f",
"g",
"h",
"i",
"j",
"k",
"l",
"m",
"n",
"o",
"p",
"q",
"r",
"s",
"t",
"u",
"v",
"w",
"x",
"y",
"z",
"braceleft",
"bar",
"braceright",
"asciitilde",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
"exclamdown",
"cent",
"sterling",
"fraction",
"yen",
"florin",
"section",
"currency",
"quotesingle",
"quotedblleft",
"guillemotleft",
"guilsinglleft",
"guilsinglright",
"fi",
"fl",
".notdef",
"endash",
"dagger",
"daggerdbl",
"periodcentered",
".notdef",
"paragraph",
"bullet",
"quotesinglbase",
"quotedblbase",
"quotedblright",
"guillemotright",
"ellipsis",
"perthousand",
".notdef",
"questiondown",
".notdef",
"grave",
"acute",
"circumflex",
"tilde",
"macron",
"breve",
"dotaccent",
"dieresis",
".notdef",
"ring",
"cedilla",
".notdef",
"hungarumlaut",
"ogonek",
"caron",
"emdash",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
"AE",
".notdef",
"ordfeminine",
".notdef",
".notdef",
".notdef",
".notdef",
"Lslash",
"Oslash",
"OE",
"ordmasculine",
".notdef",
".notdef",
".notdef",
".notdef",
".notdef",
"ae",
".notdef",
".notdef",
".notdef",
"dotlessi",
".notdef",
".notdef",
"lslash",
"oslash",
"oe",
"germandbls",
".notdef",
".notdef",
".notdef",
".notdef",
]

View File

@ -4,116 +4,132 @@ but missing from Python. See https://github.com/fonttools/fonttools/issues/236
import codecs import codecs
import encodings import encodings
class ExtendCodec(codecs.Codec): class ExtendCodec(codecs.Codec):
def __init__(self, name, base_encoding, mapping):
self.name = name
self.base_encoding = base_encoding
self.mapping = mapping
self.reverse = {v: k for k, v in mapping.items()}
self.max_len = max(len(v) for v in mapping.values())
self.info = codecs.CodecInfo(
name=self.name, encode=self.encode, decode=self.decode
)
codecs.register_error(name, self.error)
def __init__(self, name, base_encoding, mapping): def _map(self, mapper, output_type, exc_type, input, errors):
self.name = name base_error_handler = codecs.lookup_error(errors)
self.base_encoding = base_encoding length = len(input)
self.mapping = mapping out = output_type()
self.reverse = {v:k for k,v in mapping.items()} while input:
self.max_len = max(len(v) for v in mapping.values()) # first try to use self.error as the error handler
self.info = codecs.CodecInfo(name=self.name, encode=self.encode, decode=self.decode) try:
codecs.register_error(name, self.error) part = mapper(input, self.base_encoding, errors=self.name)
out += part
break # All converted
except exc_type as e:
# else convert the correct part, handle error as requested and continue
out += mapper(input[: e.start], self.base_encoding, self.name)
replacement, pos = base_error_handler(e)
out += replacement
input = input[pos:]
return out, length
def _map(self, mapper, output_type, exc_type, input, errors): def encode(self, input, errors="strict"):
base_error_handler = codecs.lookup_error(errors) return self._map(codecs.encode, bytes, UnicodeEncodeError, input, errors)
length = len(input)
out = output_type()
while input:
# first try to use self.error as the error handler
try:
part = mapper(input, self.base_encoding, errors=self.name)
out += part
break # All converted
except exc_type as e:
# else convert the correct part, handle error as requested and continue
out += mapper(input[:e.start], self.base_encoding, self.name)
replacement, pos = base_error_handler(e)
out += replacement
input = input[pos:]
return out, length
def encode(self, input, errors='strict'): def decode(self, input, errors="strict"):
return self._map(codecs.encode, bytes, UnicodeEncodeError, input, errors) return self._map(codecs.decode, str, UnicodeDecodeError, input, errors)
def decode(self, input, errors='strict'): def error(self, e):
return self._map(codecs.decode, str, UnicodeDecodeError, input, errors) if isinstance(e, UnicodeDecodeError):
for end in range(e.start + 1, e.end + 1):
def error(self, e): s = e.object[e.start : end]
if isinstance(e, UnicodeDecodeError): if s in self.mapping:
for end in range(e.start + 1, e.end + 1): return self.mapping[s], end
s = e.object[e.start:end] elif isinstance(e, UnicodeEncodeError):
if s in self.mapping: for end in range(e.start + 1, e.start + self.max_len + 1):
return self.mapping[s], end s = e.object[e.start : end]
elif isinstance(e, UnicodeEncodeError): if s in self.reverse:
for end in range(e.start + 1, e.start + self.max_len + 1): return self.reverse[s], end
s = e.object[e.start:end] e.encoding = self.name
if s in self.reverse: raise e
return self.reverse[s], end
e.encoding = self.name
raise e
_extended_encodings = { _extended_encodings = {
"x_mac_japanese_ttx": ("shift_jis", { "x_mac_japanese_ttx": (
b"\xFC": chr(0x007C), "shift_jis",
b"\x7E": chr(0x007E), {
b"\x80": chr(0x005C), b"\xFC": chr(0x007C),
b"\xA0": chr(0x00A0), b"\x7E": chr(0x007E),
b"\xFD": chr(0x00A9), b"\x80": chr(0x005C),
b"\xFE": chr(0x2122), b"\xA0": chr(0x00A0),
b"\xFF": chr(0x2026), b"\xFD": chr(0x00A9),
}), b"\xFE": chr(0x2122),
"x_mac_trad_chinese_ttx": ("big5", { b"\xFF": chr(0x2026),
b"\x80": chr(0x005C), },
b"\xA0": chr(0x00A0), ),
b"\xFD": chr(0x00A9), "x_mac_trad_chinese_ttx": (
b"\xFE": chr(0x2122), "big5",
b"\xFF": chr(0x2026), {
}), b"\x80": chr(0x005C),
"x_mac_korean_ttx": ("euc_kr", { b"\xA0": chr(0x00A0),
b"\x80": chr(0x00A0), b"\xFD": chr(0x00A9),
b"\x81": chr(0x20A9), b"\xFE": chr(0x2122),
b"\x82": chr(0x2014), b"\xFF": chr(0x2026),
b"\x83": chr(0x00A9), },
b"\xFE": chr(0x2122), ),
b"\xFF": chr(0x2026), "x_mac_korean_ttx": (
}), "euc_kr",
"x_mac_simp_chinese_ttx": ("gb2312", { {
b"\x80": chr(0x00FC), b"\x80": chr(0x00A0),
b"\xA0": chr(0x00A0), b"\x81": chr(0x20A9),
b"\xFD": chr(0x00A9), b"\x82": chr(0x2014),
b"\xFE": chr(0x2122), b"\x83": chr(0x00A9),
b"\xFF": chr(0x2026), b"\xFE": chr(0x2122),
}), b"\xFF": chr(0x2026),
},
),
"x_mac_simp_chinese_ttx": (
"gb2312",
{
b"\x80": chr(0x00FC),
b"\xA0": chr(0x00A0),
b"\xFD": chr(0x00A9),
b"\xFE": chr(0x2122),
b"\xFF": chr(0x2026),
},
),
} }
_cache = {} _cache = {}
def search_function(name):
name = encodings.normalize_encoding(name) # Rather undocumented...
if name in _extended_encodings:
if name not in _cache:
base_encoding, mapping = _extended_encodings[name]
assert(name[-4:] == "_ttx")
# Python 2 didn't have any of the encodings that we are implementing
# in this file. Python 3 added aliases for the East Asian ones, mapping
# them "temporarily" to the same base encoding as us, with a comment
# suggesting that full implementation will appear some time later.
# As such, try the Python version of the x_mac_... first, if that is found,
# use *that* as our base encoding. This would make our encoding upgrade
# to the full encoding when and if Python finally implements that.
# http://bugs.python.org/issue24041
base_encodings = [name[:-4], base_encoding]
for base_encoding in base_encodings:
try:
codecs.lookup(base_encoding)
except LookupError:
continue
_cache[name] = ExtendCodec(name, base_encoding, mapping)
break
return _cache[name].info
return None def search_function(name):
name = encodings.normalize_encoding(name) # Rather undocumented...
if name in _extended_encodings:
if name not in _cache:
base_encoding, mapping = _extended_encodings[name]
assert name[-4:] == "_ttx"
# Python 2 didn't have any of the encodings that we are implementing
# in this file. Python 3 added aliases for the East Asian ones, mapping
# them "temporarily" to the same base encoding as us, with a comment
# suggesting that full implementation will appear some time later.
# As such, try the Python version of the x_mac_... first, if that is found,
# use *that* as our base encoding. This would make our encoding upgrade
# to the full encoding when and if Python finally implements that.
# http://bugs.python.org/issue24041
base_encodings = [name[:-4], base_encoding]
for base_encoding in base_encodings:
try:
codecs.lookup(base_encoding)
except LookupError:
continue
_cache[name] = ExtendCodec(name, base_encoding, mapping)
break
return _cache[name].info
return None
codecs.register(search_function) codecs.register(search_function)

View File

@ -912,14 +912,11 @@ class IgnoreSubstStatement(Statement):
contexts = [] contexts = []
for prefix, glyphs, suffix in self.chainContexts: for prefix, glyphs, suffix in self.chainContexts:
res = "" res = ""
if len(prefix) or len(suffix): if len(prefix):
if len(prefix): res += " ".join(map(asFea, prefix)) + " "
res += " ".join(map(asFea, prefix)) + " " res += " ".join(g.asFea() + "'" for g in glyphs)
res += " ".join(g.asFea() + "'" for g in glyphs) if len(suffix):
if len(suffix): res += " " + " ".join(map(asFea, suffix))
res += " " + " ".join(map(asFea, suffix))
else:
res += " ".join(map(asFea, glyphs))
contexts.append(res) contexts.append(res)
return "ignore sub " + ", ".join(contexts) + ";" return "ignore sub " + ", ".join(contexts) + ";"

View File

@ -446,6 +446,7 @@ class Builder(object):
assert self.cv_parameters_ids_[tag] is not None assert self.cv_parameters_ids_[tag] is not None
nameID = self.cv_parameters_ids_[tag] nameID = self.cv_parameters_ids_[tag]
table.setName(string, nameID, platformID, platEncID, langID) table.setName(string, nameID, platformID, platEncID, langID)
table.names.sort()
def build_OS_2(self): def build_OS_2(self):
if not self.os2_: if not self.os2_:
@ -768,8 +769,8 @@ class Builder(object):
varidx_map = store.optimize() varidx_map = store.optimize()
gdef.remap_device_varidxes(varidx_map) gdef.remap_device_varidxes(varidx_map)
if 'GPOS' in self.font: if "GPOS" in self.font:
self.font['GPOS'].table.remap_device_varidxes(varidx_map) self.font["GPOS"].table.remap_device_varidxes(varidx_map)
VariableScalar.clear_cache() VariableScalar.clear_cache()
if any( if any(
( (
@ -1339,7 +1340,9 @@ class Builder(object):
# GSUB 5/6 # GSUB 5/6
def add_chain_context_subst(self, location, prefix, glyphs, suffix, lookups): def add_chain_context_subst(self, location, prefix, glyphs, suffix, lookups):
if not all(glyphs) or not all(prefix) or not all(suffix): if not all(glyphs) or not all(prefix) or not all(suffix):
raise FeatureLibError("Empty glyph class in contextual substitution", location) raise FeatureLibError(
"Empty glyph class in contextual substitution", location
)
lookup = self.get_lookup_(location, ChainContextSubstBuilder) lookup = self.get_lookup_(location, ChainContextSubstBuilder)
lookup.rules.append( lookup.rules.append(
ChainContextualRule( ChainContextualRule(
@ -1349,10 +1352,13 @@ class Builder(object):
def add_single_subst_chained_(self, location, prefix, suffix, mapping): def add_single_subst_chained_(self, location, prefix, suffix, mapping):
if not mapping or not all(prefix) or not all(suffix): if not mapping or not all(prefix) or not all(suffix):
raise FeatureLibError("Empty glyph class in contextual substitution", location) raise FeatureLibError(
"Empty glyph class in contextual substitution", location
)
# https://github.com/fonttools/fonttools/issues/512 # https://github.com/fonttools/fonttools/issues/512
# https://github.com/fonttools/fonttools/issues/2150
chain = self.get_lookup_(location, ChainContextSubstBuilder) chain = self.get_lookup_(location, ChainContextSubstBuilder)
sub = chain.find_chainable_single_subst(set(mapping.keys())) sub = chain.find_chainable_single_subst(mapping)
if sub is None: if sub is None:
sub = self.get_chained_lookup_(location, SingleSubstBuilder) sub = self.get_chained_lookup_(location, SingleSubstBuilder)
sub.mapping.update(mapping) sub.mapping.update(mapping)
@ -1377,8 +1383,12 @@ class Builder(object):
lookup = self.get_lookup_(location, SinglePosBuilder) lookup = self.get_lookup_(location, SinglePosBuilder)
for glyphs, value in pos: for glyphs, value in pos:
if not glyphs: if not glyphs:
raise FeatureLibError("Empty glyph class in positioning rule", location) raise FeatureLibError(
otValueRecord = self.makeOpenTypeValueRecord(location, value, pairPosContext=False) "Empty glyph class in positioning rule", location
)
otValueRecord = self.makeOpenTypeValueRecord(
location, value, pairPosContext=False
)
for glyph in glyphs: for glyph in glyphs:
try: try:
lookup.add_pos(location, glyph, otValueRecord) lookup.add_pos(location, glyph, otValueRecord)
@ -1388,9 +1398,7 @@ class Builder(object):
# GPOS 2 # GPOS 2
def add_class_pair_pos(self, location, glyphclass1, value1, glyphclass2, value2): def add_class_pair_pos(self, location, glyphclass1, value1, glyphclass2, value2):
if not glyphclass1 or not glyphclass2: if not glyphclass1 or not glyphclass2:
raise FeatureLibError( raise FeatureLibError("Empty glyph class in positioning rule", location)
"Empty glyph class in positioning rule", location
)
lookup = self.get_lookup_(location, PairPosBuilder) lookup = self.get_lookup_(location, PairPosBuilder)
v1 = self.makeOpenTypeValueRecord(location, value1, pairPosContext=True) v1 = self.makeOpenTypeValueRecord(location, value1, pairPosContext=True)
v2 = self.makeOpenTypeValueRecord(location, value2, pairPosContext=True) v2 = self.makeOpenTypeValueRecord(location, value2, pairPosContext=True)
@ -1458,7 +1466,9 @@ class Builder(object):
# GPOS 7/8 # GPOS 7/8
def add_chain_context_pos(self, location, prefix, glyphs, suffix, lookups): def add_chain_context_pos(self, location, prefix, glyphs, suffix, lookups):
if not all(glyphs) or not all(prefix) or not all(suffix): if not all(glyphs) or not all(prefix) or not all(suffix):
raise FeatureLibError("Empty glyph class in contextual positioning rule", location) raise FeatureLibError(
"Empty glyph class in contextual positioning rule", location
)
lookup = self.get_lookup_(location, ChainContextPosBuilder) lookup = self.get_lookup_(location, ChainContextPosBuilder)
lookup.rules.append( lookup.rules.append(
ChainContextualRule( ChainContextualRule(
@ -1468,7 +1478,9 @@ class Builder(object):
def add_single_pos_chained_(self, location, prefix, suffix, pos): def add_single_pos_chained_(self, location, prefix, suffix, pos):
if not pos or not all(prefix) or not all(suffix): if not pos or not all(prefix) or not all(suffix):
raise FeatureLibError("Empty glyph class in contextual positioning rule", location) raise FeatureLibError(
"Empty glyph class in contextual positioning rule", location
)
# https://github.com/fonttools/fonttools/issues/514 # https://github.com/fonttools/fonttools/issues/514
chain = self.get_lookup_(location, ChainContextPosBuilder) chain = self.get_lookup_(location, ChainContextPosBuilder)
targets = [] targets = []
@ -1479,7 +1491,9 @@ class Builder(object):
if value is None: if value is None:
subs.append(None) subs.append(None)
continue continue
otValue = self.makeOpenTypeValueRecord(location, value, pairPosContext=False) otValue = self.makeOpenTypeValueRecord(
location, value, pairPosContext=False
)
sub = chain.find_chainable_single_pos(targets, glyphs, otValue) sub = chain.find_chainable_single_pos(targets, glyphs, otValue)
if sub is None: if sub is None:
sub = self.get_chained_lookup_(location, SinglePosBuilder) sub = self.get_chained_lookup_(location, SinglePosBuilder)
@ -1498,7 +1512,9 @@ class Builder(object):
for markClassDef in markClass.definitions: for markClassDef in markClass.definitions:
for mark in markClassDef.glyphs.glyphSet(): for mark in markClassDef.glyphs.glyphSet():
if mark not in lookupBuilder.marks: if mark not in lookupBuilder.marks:
otMarkAnchor = self.makeOpenTypeAnchor(location, markClassDef.anchor) otMarkAnchor = self.makeOpenTypeAnchor(
location, markClassDef.anchor
)
lookupBuilder.marks[mark] = (markClass.name, otMarkAnchor) lookupBuilder.marks[mark] = (markClass.name, otMarkAnchor)
else: else:
existingMarkClass = lookupBuilder.marks[mark][0] existingMarkClass = lookupBuilder.marks[mark][0]
@ -1591,11 +1607,15 @@ class Builder(object):
for dim in ("x", "y"): for dim in ("x", "y"):
if not isinstance(getattr(anchor, dim), VariableScalar): if not isinstance(getattr(anchor, dim), VariableScalar):
continue continue
if getattr(anchor, dim+"DeviceTable") is not None: if getattr(anchor, dim + "DeviceTable") is not None:
raise FeatureLibError("Can't define a device coordinate and variable scalar", location) raise FeatureLibError(
"Can't define a device coordinate and variable scalar", location
)
if not self.varstorebuilder: if not self.varstorebuilder:
raise FeatureLibError("Can't define a variable scalar in a non-variable font", location) raise FeatureLibError(
varscalar = getattr(anchor,dim) "Can't define a variable scalar in a non-variable font", location
)
varscalar = getattr(anchor, dim)
varscalar.axes = self.axes varscalar.axes = self.axes
default, index = varscalar.add_to_variation_store(self.varstorebuilder) default, index = varscalar.add_to_variation_store(self.varstorebuilder)
setattr(anchor, dim, default) setattr(anchor, dim, default)
@ -1606,7 +1626,9 @@ class Builder(object):
deviceY = buildVarDevTable(index) deviceY = buildVarDevTable(index)
variable = True variable = True
otlanchor = otl.buildAnchor(anchor.x, anchor.y, anchor.contourpoint, deviceX, deviceY) otlanchor = otl.buildAnchor(
anchor.x, anchor.y, anchor.contourpoint, deviceX, deviceY
)
if variable: if variable:
otlanchor.Format = 3 otlanchor.Format = 3
return otlanchor return otlanchor
@ -1617,7 +1639,6 @@ class Builder(object):
if not name.startswith("Reserved") if not name.startswith("Reserved")
} }
def makeOpenTypeValueRecord(self, location, v, pairPosContext): def makeOpenTypeValueRecord(self, location, v, pairPosContext):
"""ast.ValueRecord --> otBase.ValueRecord""" """ast.ValueRecord --> otBase.ValueRecord"""
if not v: if not v:
@ -1635,9 +1656,14 @@ class Builder(object):
otDeviceName = otName[0:4] + "Device" otDeviceName = otName[0:4] + "Device"
feaDeviceName = otDeviceName[0].lower() + otDeviceName[1:] feaDeviceName = otDeviceName[0].lower() + otDeviceName[1:]
if getattr(v, feaDeviceName): if getattr(v, feaDeviceName):
raise FeatureLibError("Can't define a device coordinate and variable scalar", location) raise FeatureLibError(
"Can't define a device coordinate and variable scalar", location
)
if not self.varstorebuilder: if not self.varstorebuilder:
raise FeatureLibError("Can't define a variable scalar in a non-variable font", location) raise FeatureLibError(
"Can't define a variable scalar in a non-variable font",
location,
)
val.axes = self.axes val.axes = self.axes
default, index = val.add_to_variation_store(self.varstorebuilder) default, index = val.add_to_variation_store(self.varstorebuilder)
vr[otName] = default vr[otName] = default

View File

@ -1,7 +1,8 @@
from typing import NamedTuple from typing import NamedTuple
LOOKUP_DEBUG_INFO_KEY = "com.github.fonttools.feaLib" LOOKUP_DEBUG_INFO_KEY = "com.github.fonttools.feaLib"
LOOKUP_DEBUG_ENV_VAR = "FONTTOOLS_LOOKUP_DEBUGGING" LOOKUP_DEBUG_ENV_VAR = "FONTTOOLS_LOOKUP_DEBUGGING"
class LookupDebugInfo(NamedTuple): class LookupDebugInfo(NamedTuple):
"""Information about where a lookup came from, to be embedded in a font""" """Information about where a lookup came from, to be embedded in a font"""

View File

@ -134,7 +134,8 @@ class Parser(object):
] ]
raise FeatureLibError( raise FeatureLibError(
"The following glyph names are referenced but are missing from the " "The following glyph names are referenced but are missing from the "
"glyph set:\n" + ("\n".join(error)), None "glyph set:\n" + ("\n".join(error)),
None,
) )
return self.doc_ return self.doc_
@ -396,7 +397,8 @@ class Parser(object):
self.expect_symbol_("-") self.expect_symbol_("-")
range_end = self.expect_cid_() range_end = self.expect_cid_()
self.check_glyph_name_in_glyph_set( self.check_glyph_name_in_glyph_set(
f"cid{range_start:05d}", f"cid{range_end:05d}", f"cid{range_start:05d}",
f"cid{range_end:05d}",
) )
glyphs.add_cid_range( glyphs.add_cid_range(
range_start, range_start,
@ -522,27 +524,33 @@ class Parser(object):
) )
return (prefix, glyphs, lookups, values, suffix, hasMarks) return (prefix, glyphs, lookups, values, suffix, hasMarks)
def parse_chain_context_(self): def parse_ignore_glyph_pattern_(self, sub):
location = self.cur_token_location_ location = self.cur_token_location_
prefix, glyphs, lookups, values, suffix, hasMarks = self.parse_glyph_pattern_( prefix, glyphs, lookups, values, suffix, hasMarks = self.parse_glyph_pattern_(
vertical=False vertical=False
) )
chainContext = [(prefix, glyphs, suffix)] if any(lookups):
hasLookups = any(lookups) raise FeatureLibError(
f'No lookups can be specified for "ignore {sub}"', location
)
if not hasMarks:
error = FeatureLibError(
f'Ambiguous "ignore {sub}", there should be least one marked glyph',
location,
)
log.warning(str(error))
suffix, glyphs = glyphs[1:], glyphs[0:1]
chainContext = (prefix, glyphs, suffix)
return chainContext
def parse_ignore_context_(self, sub):
location = self.cur_token_location_
chainContext = [self.parse_ignore_glyph_pattern_(sub)]
while self.next_token_ == ",": while self.next_token_ == ",":
self.expect_symbol_(",") self.expect_symbol_(",")
( chainContext.append(self.parse_ignore_glyph_pattern_(sub))
prefix,
glyphs,
lookups,
values,
suffix,
hasMarks,
) = self.parse_glyph_pattern_(vertical=False)
chainContext.append((prefix, glyphs, suffix))
hasLookups = hasLookups or any(lookups)
self.expect_symbol_(";") self.expect_symbol_(";")
return chainContext, hasLookups return chainContext
def parse_ignore_(self): def parse_ignore_(self):
# Parses an ignore sub/pos rule. # Parses an ignore sub/pos rule.
@ -550,18 +558,10 @@ class Parser(object):
location = self.cur_token_location_ location = self.cur_token_location_
self.advance_lexer_() self.advance_lexer_()
if self.cur_token_ in ["substitute", "sub"]: if self.cur_token_ in ["substitute", "sub"]:
chainContext, hasLookups = self.parse_chain_context_() chainContext = self.parse_ignore_context_("sub")
if hasLookups:
raise FeatureLibError(
'No lookups can be specified for "ignore sub"', location
)
return self.ast.IgnoreSubstStatement(chainContext, location=location) return self.ast.IgnoreSubstStatement(chainContext, location=location)
if self.cur_token_ in ["position", "pos"]: if self.cur_token_ in ["position", "pos"]:
chainContext, hasLookups = self.parse_chain_context_() chainContext = self.parse_ignore_context_("pos")
if hasLookups:
raise FeatureLibError(
'No lookups can be specified for "ignore pos"', location
)
return self.ast.IgnorePosStatement(chainContext, location=location) return self.ast.IgnorePosStatement(chainContext, location=location)
raise FeatureLibError( raise FeatureLibError(
'Expected "substitute" or "position"', self.cur_token_location_ 'Expected "substitute" or "position"', self.cur_token_location_
@ -696,7 +696,9 @@ class Parser(object):
location = self.cur_token_location_ location = self.cur_token_location_
glyphs = self.parse_glyphclass_(accept_glyphname=True) glyphs = self.parse_glyphclass_(accept_glyphname=True)
if not glyphs.glyphSet(): if not glyphs.glyphSet():
raise FeatureLibError("Empty glyph class in mark class definition", location) raise FeatureLibError(
"Empty glyph class in mark class definition", location
)
anchor = self.parse_anchor_() anchor = self.parse_anchor_()
name = self.expect_class_name_() name = self.expect_class_name_()
self.expect_symbol_(";") self.expect_symbol_(";")

View File

@ -800,7 +800,7 @@ class FontBuilder(object):
) )
self._initTableWithValues("DSIG", {}, values) self._initTableWithValues("DSIG", {}, values)
def addOpenTypeFeatures(self, features, filename=None, tables=None): def addOpenTypeFeatures(self, features, filename=None, tables=None, debug=False):
"""Add OpenType features to the font from a string containing """Add OpenType features to the font from a string containing
Feature File syntax. Feature File syntax.
@ -810,11 +810,14 @@ class FontBuilder(object):
The optional `tables` argument can be a list of OTL tables tags to The optional `tables` argument can be a list of OTL tables tags to
build, allowing the caller to only build selected OTL tables. See build, allowing the caller to only build selected OTL tables. See
`fontTools.feaLib` for details. `fontTools.feaLib` for details.
The optional `debug` argument controls whether to add source debugging
information to the font in the `Debg` table.
""" """
from .feaLib.builder import addOpenTypeFeaturesFromString from .feaLib.builder import addOpenTypeFeaturesFromString
addOpenTypeFeaturesFromString( addOpenTypeFeaturesFromString(
self.font, features, filename=filename, tables=tables self.font, features, filename=filename, tables=tables, debug=debug
) )
def addFeatureVariations(self, conditionalSubstitutions, featureTag="rvrn"): def addFeatureVariations(self, conditionalSubstitutions, featureTag="rvrn"):

View File

@ -4,7 +4,11 @@
from fontTools import ttLib from fontTools import ttLib
import fontTools.merge.base import fontTools.merge.base
from fontTools.merge.cmap import computeMegaGlyphOrder, computeMegaCmap, renameCFFCharStrings from fontTools.merge.cmap import (
computeMegaGlyphOrder,
computeMegaCmap,
renameCFFCharStrings,
)
from fontTools.merge.layout import layoutPreMerge, layoutPostMerge from fontTools.merge.layout import layoutPreMerge, layoutPostMerge
from fontTools.merge.options import Options from fontTools.merge.options import Options
import fontTools.merge.tables import fontTools.merge.tables
@ -15,191 +19,193 @@ import logging
log = logging.getLogger("fontTools.merge") log = logging.getLogger("fontTools.merge")
timer = Timer(logger=logging.getLogger(__name__+".timer"), level=logging.INFO) timer = Timer(logger=logging.getLogger(__name__ + ".timer"), level=logging.INFO)
class Merger(object): class Merger(object):
"""Font merger. """Font merger.
This class merges multiple files into a single OpenType font, taking into This class merges multiple files into a single OpenType font, taking into
account complexities such as OpenType layout (``GSUB``/``GPOS``) tables and account complexities such as OpenType layout (``GSUB``/``GPOS``) tables and
cross-font metrics (e.g. ``hhea.ascent`` is set to the maximum value across cross-font metrics (e.g. ``hhea.ascent`` is set to the maximum value across
all the fonts). all the fonts).
If multiple glyphs map to the same Unicode value, and the glyphs are considered If multiple glyphs map to the same Unicode value, and the glyphs are considered
sufficiently different (that is, they differ in any of paths, widths, or sufficiently different (that is, they differ in any of paths, widths, or
height), then subsequent glyphs are renamed and a lookup in the ``locl`` height), then subsequent glyphs are renamed and a lookup in the ``locl``
feature will be created to disambiguate them. For example, if the arguments feature will be created to disambiguate them. For example, if the arguments
are an Arabic font and a Latin font and both contain a set of parentheses, are an Arabic font and a Latin font and both contain a set of parentheses,
the Latin glyphs will be renamed to ``parenleft#1`` and ``parenright#1``, the Latin glyphs will be renamed to ``parenleft#1`` and ``parenright#1``,
and a lookup will be inserted into the to ``locl`` feature (creating it if and a lookup will be inserted into the to ``locl`` feature (creating it if
necessary) under the ``latn`` script to substitute ``parenleft`` with necessary) under the ``latn`` script to substitute ``parenleft`` with
``parenleft#1`` etc. ``parenleft#1`` etc.
Restrictions: Restrictions:
- All fonts must have the same units per em. - All fonts must have the same units per em.
- If duplicate glyph disambiguation takes place as described above then the - If duplicate glyph disambiguation takes place as described above then the
fonts must have a ``GSUB`` table. fonts must have a ``GSUB`` table.
Attributes: Attributes:
options: Currently unused. options: Currently unused.
""" """
def __init__(self, options=None): def __init__(self, options=None):
if not options: if not options:
options = Options() options = Options()
self.options = options self.options = options
def _openFonts(self, fontfiles): def _openFonts(self, fontfiles):
fonts = [ttLib.TTFont(fontfile) for fontfile in fontfiles] fonts = [ttLib.TTFont(fontfile) for fontfile in fontfiles]
for font,fontfile in zip(fonts, fontfiles): for font, fontfile in zip(fonts, fontfiles):
font._merger__fontfile = fontfile font._merger__fontfile = fontfile
font._merger__name = font['name'].getDebugName(4) font._merger__name = font["name"].getDebugName(4)
return fonts return fonts
def merge(self, fontfiles): def merge(self, fontfiles):
"""Merges fonts together. """Merges fonts together.
Args: Args:
fontfiles: A list of file names to be merged fontfiles: A list of file names to be merged
Returns: Returns:
A :class:`fontTools.ttLib.TTFont` object. Call the ``save`` method on A :class:`fontTools.ttLib.TTFont` object. Call the ``save`` method on
this to write it out to an OTF file. this to write it out to an OTF file.
""" """
# #
# Settle on a mega glyph order. # Settle on a mega glyph order.
# #
fonts = self._openFonts(fontfiles) fonts = self._openFonts(fontfiles)
glyphOrders = [list(font.getGlyphOrder()) for font in fonts] glyphOrders = [list(font.getGlyphOrder()) for font in fonts]
computeMegaGlyphOrder(self, glyphOrders) computeMegaGlyphOrder(self, glyphOrders)
# Take first input file sfntVersion # Take first input file sfntVersion
sfntVersion = fonts[0].sfntVersion sfntVersion = fonts[0].sfntVersion
# Reload fonts and set new glyph names on them. # Reload fonts and set new glyph names on them.
fonts = self._openFonts(fontfiles) fonts = self._openFonts(fontfiles)
for font,glyphOrder in zip(fonts, glyphOrders): for font, glyphOrder in zip(fonts, glyphOrders):
font.setGlyphOrder(glyphOrder) font.setGlyphOrder(glyphOrder)
if 'CFF ' in font: if "CFF " in font:
renameCFFCharStrings(self, glyphOrder, font['CFF ']) renameCFFCharStrings(self, glyphOrder, font["CFF "])
cmaps = [font['cmap'] for font in fonts] cmaps = [font["cmap"] for font in fonts]
self.duplicateGlyphsPerFont = [{} for _ in fonts] self.duplicateGlyphsPerFont = [{} for _ in fonts]
computeMegaCmap(self, cmaps) computeMegaCmap(self, cmaps)
mega = ttLib.TTFont(sfntVersion=sfntVersion) mega = ttLib.TTFont(sfntVersion=sfntVersion)
mega.setGlyphOrder(self.glyphOrder) mega.setGlyphOrder(self.glyphOrder)
for font in fonts: for font in fonts:
self._preMerge(font) self._preMerge(font)
self.fonts = fonts self.fonts = fonts
allTags = reduce(set.union, (list(font.keys()) for font in fonts), set()) allTags = reduce(set.union, (list(font.keys()) for font in fonts), set())
allTags.remove('GlyphOrder') allTags.remove("GlyphOrder")
for tag in allTags: for tag in sorted(allTags):
if tag in self.options.drop_tables: if tag in self.options.drop_tables:
continue continue
with timer("merge '%s'" % tag): with timer("merge '%s'" % tag):
tables = [font.get(tag, NotImplemented) for font in fonts] tables = [font.get(tag, NotImplemented) for font in fonts]
log.info("Merging '%s'.", tag) log.info("Merging '%s'.", tag)
clazz = ttLib.getTableClass(tag) clazz = ttLib.getTableClass(tag)
table = clazz(tag).merge(self, tables) table = clazz(tag).merge(self, tables)
# XXX Clean this up and use: table = mergeObjects(tables) # XXX Clean this up and use: table = mergeObjects(tables)
if table is not NotImplemented and table is not False: if table is not NotImplemented and table is not False:
mega[tag] = table mega[tag] = table
log.info("Merged '%s'.", tag) log.info("Merged '%s'.", tag)
else: else:
log.info("Dropped '%s'.", tag) log.info("Dropped '%s'.", tag)
del self.duplicateGlyphsPerFont del self.duplicateGlyphsPerFont
del self.fonts del self.fonts
self._postMerge(mega) self._postMerge(mega)
return mega return mega
def mergeObjects(self, returnTable, logic, tables): def mergeObjects(self, returnTable, logic, tables):
# Right now we don't use self at all. Will use in the future # Right now we don't use self at all. Will use in the future
# for options and logging. # for options and logging.
allKeys = set.union(set(), *(vars(table).keys() for table in tables if table is not NotImplemented)) allKeys = set.union(
for key in allKeys: set(),
try: *(vars(table).keys() for table in tables if table is not NotImplemented),
mergeLogic = logic[key] )
except KeyError: for key in allKeys:
try: try:
mergeLogic = logic['*'] mergeLogic = logic[key]
except KeyError: except KeyError:
raise Exception("Don't know how to merge key %s of class %s" % try:
(key, returnTable.__class__.__name__)) mergeLogic = logic["*"]
if mergeLogic is NotImplemented: except KeyError:
continue raise Exception(
value = mergeLogic(getattr(table, key, NotImplemented) for table in tables) "Don't know how to merge key %s of class %s"
if value is not NotImplemented: % (key, returnTable.__class__.__name__)
setattr(returnTable, key, value) )
if mergeLogic is NotImplemented:
continue
value = mergeLogic(getattr(table, key, NotImplemented) for table in tables)
if value is not NotImplemented:
setattr(returnTable, key, value)
return returnTable return returnTable
def _preMerge(self, font): def _preMerge(self, font):
layoutPreMerge(font) layoutPreMerge(font)
def _postMerge(self, font): def _postMerge(self, font):
layoutPostMerge(font) layoutPostMerge(font)
if "OS/2" in font: if "OS/2" in font:
# https://github.com/fonttools/fonttools/issues/2538 # https://github.com/fonttools/fonttools/issues/2538
# TODO: Add an option to disable this? # TODO: Add an option to disable this?
font["OS/2"].recalcAvgCharWidth(font) font["OS/2"].recalcAvgCharWidth(font)
__all__ = [ __all__ = ["Options", "Merger", "main"]
'Options',
'Merger',
'main'
]
@timer("make one with everything (TOTAL TIME)") @timer("make one with everything (TOTAL TIME)")
def main(args=None): def main(args=None):
"""Merge multiple fonts into one""" """Merge multiple fonts into one"""
from fontTools import configLogger from fontTools import configLogger
if args is None: if args is None:
args = sys.argv[1:] args = sys.argv[1:]
options = Options() options = Options()
args = options.parse_opts(args, ignore_unknown=['output-file']) args = options.parse_opts(args, ignore_unknown=["output-file"])
outfile = 'merged.ttf' outfile = "merged.ttf"
fontfiles = [] fontfiles = []
for g in args: for g in args:
if g.startswith('--output-file='): if g.startswith("--output-file="):
outfile = g[14:] outfile = g[14:]
continue continue
fontfiles.append(g) fontfiles.append(g)
if len(args) < 1: if len(args) < 1:
print("usage: pyftmerge font...", file=sys.stderr) print("usage: pyftmerge font...", file=sys.stderr)
return 1 return 1
configLogger(level=logging.INFO if options.verbose else logging.WARNING) configLogger(level=logging.INFO if options.verbose else logging.WARNING)
if options.timing: if options.timing:
timer.logger.setLevel(logging.DEBUG) timer.logger.setLevel(logging.DEBUG)
else: else:
timer.logger.disabled = True timer.logger.disabled = True
merger = Merger(options=options) merger = Merger(options=options)
font = merger.merge(fontfiles) font = merger.merge(fontfiles)
with timer("compile and save font"): with timer("compile and save font"):
font.save(outfile) font.save(outfile)
if __name__ == "__main__": if __name__ == "__main__":
sys.exit(main()) sys.exit(main())

View File

@ -2,5 +2,5 @@ import sys
from fontTools.merge import main from fontTools.merge import main
if __name__ == '__main__': if __name__ == "__main__":
sys.exit(main()) sys.exit(main())

View File

@ -10,67 +10,72 @@ log = logging.getLogger("fontTools.merge")
def add_method(*clazzes, **kwargs): def add_method(*clazzes, **kwargs):
"""Returns a decorator function that adds a new method to one or """Returns a decorator function that adds a new method to one or
more classes.""" more classes."""
allowDefault = kwargs.get('allowDefaultTable', False) allowDefault = kwargs.get("allowDefaultTable", False)
def wrapper(method):
done = [] def wrapper(method):
for clazz in clazzes: done = []
if clazz in done: continue # Support multiple names of a clazz for clazz in clazzes:
done.append(clazz) if clazz in done:
assert allowDefault or clazz != DefaultTable, 'Oops, table class not found.' continue # Support multiple names of a clazz
assert method.__name__ not in clazz.__dict__, \ done.append(clazz)
"Oops, class '%s' has method '%s'." % (clazz.__name__, method.__name__) assert allowDefault or clazz != DefaultTable, "Oops, table class not found."
setattr(clazz, method.__name__, method) assert (
return None method.__name__ not in clazz.__dict__
return wrapper ), "Oops, class '%s' has method '%s'." % (clazz.__name__, method.__name__)
setattr(clazz, method.__name__, method)
return None
return wrapper
def mergeObjects(lst): def mergeObjects(lst):
lst = [item for item in lst if item is not NotImplemented] lst = [item for item in lst if item is not NotImplemented]
if not lst: if not lst:
return NotImplemented return NotImplemented
lst = [item for item in lst if item is not None] lst = [item for item in lst if item is not None]
if not lst: if not lst:
return None return None
clazz = lst[0].__class__ clazz = lst[0].__class__
assert all(type(item) == clazz for item in lst), lst assert all(type(item) == clazz for item in lst), lst
logic = clazz.mergeMap logic = clazz.mergeMap
returnTable = clazz() returnTable = clazz()
returnDict = {} returnDict = {}
allKeys = set.union(set(), *(vars(table).keys() for table in lst)) allKeys = set.union(set(), *(vars(table).keys() for table in lst))
for key in allKeys: for key in allKeys:
try: try:
mergeLogic = logic[key] mergeLogic = logic[key]
except KeyError: except KeyError:
try: try:
mergeLogic = logic['*'] mergeLogic = logic["*"]
except KeyError: except KeyError:
raise Exception("Don't know how to merge key %s of class %s" % raise Exception(
(key, clazz.__name__)) "Don't know how to merge key %s of class %s" % (key, clazz.__name__)
if mergeLogic is NotImplemented: )
continue if mergeLogic is NotImplemented:
value = mergeLogic(getattr(table, key, NotImplemented) for table in lst) continue
if value is not NotImplemented: value = mergeLogic(getattr(table, key, NotImplemented) for table in lst)
returnDict[key] = value if value is not NotImplemented:
returnDict[key] = value
returnTable.__dict__ = returnDict returnTable.__dict__ = returnDict
return returnTable
return returnTable
@add_method(DefaultTable, allowDefaultTable=True) @add_method(DefaultTable, allowDefaultTable=True)
def merge(self, m, tables): def merge(self, m, tables):
if not hasattr(self, 'mergeMap'): if not hasattr(self, "mergeMap"):
log.info("Don't know how to merge '%s'.", self.tableTag) log.info("Don't know how to merge '%s'.", self.tableTag)
return NotImplemented return NotImplemented
logic = self.mergeMap
if isinstance(logic, dict):
return m.mergeObjects(self, self.mergeMap, tables)
else:
return logic(tables)
logic = self.mergeMap
if isinstance(logic, dict):
return m.mergeObjects(self, self.mergeMap, tables)
else:
return logic(tables)

View File

@ -11,119 +11,131 @@ log = logging.getLogger("fontTools.merge")
def computeMegaGlyphOrder(merger, glyphOrders): def computeMegaGlyphOrder(merger, glyphOrders):
"""Modifies passed-in glyphOrders to reflect new glyph names. """Modifies passed-in glyphOrders to reflect new glyph names.
Stores merger.glyphOrder.""" Stores merger.glyphOrder."""
megaOrder = {} megaOrder = {}
for glyphOrder in glyphOrders: for glyphOrder in glyphOrders:
for i,glyphName in enumerate(glyphOrder): for i, glyphName in enumerate(glyphOrder):
if glyphName in megaOrder: if glyphName in megaOrder:
n = megaOrder[glyphName] n = megaOrder[glyphName]
while (glyphName + "." + repr(n)) in megaOrder: while (glyphName + "." + repr(n)) in megaOrder:
n += 1 n += 1
megaOrder[glyphName] = n megaOrder[glyphName] = n
glyphName += "." + repr(n) glyphName += "." + repr(n)
glyphOrder[i] = glyphName glyphOrder[i] = glyphName
megaOrder[glyphName] = 1 megaOrder[glyphName] = 1
merger.glyphOrder = megaOrder = list(megaOrder.keys()) merger.glyphOrder = megaOrder = list(megaOrder.keys())
def _glyphsAreSame(glyphSet1, glyphSet2, glyph1, glyph2, def _glyphsAreSame(
advanceTolerance=.05, glyphSet1,
advanceToleranceEmpty=.20): glyphSet2,
pen1 = DecomposingRecordingPen(glyphSet1) glyph1,
pen2 = DecomposingRecordingPen(glyphSet2) glyph2,
g1 = glyphSet1[glyph1] advanceTolerance=0.05,
g2 = glyphSet2[glyph2] advanceToleranceEmpty=0.20,
g1.draw(pen1) ):
g2.draw(pen2) pen1 = DecomposingRecordingPen(glyphSet1)
if pen1.value != pen2.value: pen2 = DecomposingRecordingPen(glyphSet2)
return False g1 = glyphSet1[glyph1]
# Allow more width tolerance for glyphs with no ink g2 = glyphSet2[glyph2]
tolerance = advanceTolerance if pen1.value else advanceToleranceEmpty g1.draw(pen1)
g2.draw(pen2)
if pen1.value != pen2.value:
return False
# Allow more width tolerance for glyphs with no ink
tolerance = advanceTolerance if pen1.value else advanceToleranceEmpty
# TODO Warn if advances not the same but within tolerance. # TODO Warn if advances not the same but within tolerance.
if abs(g1.width - g2.width) > g1.width * tolerance: if abs(g1.width - g2.width) > g1.width * tolerance:
return False return False
if hasattr(g1, 'height') and g1.height is not None: if hasattr(g1, "height") and g1.height is not None:
if abs(g1.height - g2.height) > g1.height * tolerance: if abs(g1.height - g2.height) > g1.height * tolerance:
return False return False
return True return True
# Valid (format, platformID, platEncID) triplets for cmap subtables containing # Valid (format, platformID, platEncID) triplets for cmap subtables containing
# Unicode BMP-only and Unicode Full Repertoire semantics. # Unicode BMP-only and Unicode Full Repertoire semantics.
# Cf. OpenType spec for "Platform specific encodings": # Cf. OpenType spec for "Platform specific encodings":
# https://docs.microsoft.com/en-us/typography/opentype/spec/name # https://docs.microsoft.com/en-us/typography/opentype/spec/name
class _CmapUnicodePlatEncodings: class _CmapUnicodePlatEncodings:
BMP = {(4, 3, 1), (4, 0, 3), (4, 0, 4), (4, 0, 6)} BMP = {(4, 3, 1), (4, 0, 3), (4, 0, 4), (4, 0, 6)}
FullRepertoire = {(12, 3, 10), (12, 0, 4), (12, 0, 6)} FullRepertoire = {(12, 3, 10), (12, 0, 4), (12, 0, 6)}
def computeMegaCmap(merger, cmapTables): def computeMegaCmap(merger, cmapTables):
"""Sets merger.cmap and merger.glyphOrder.""" """Sets merger.cmap and merger.glyphOrder."""
# TODO Handle format=14. # TODO Handle format=14.
# Only merge format 4 and 12 Unicode subtables, ignores all other subtables # Only merge format 4 and 12 Unicode subtables, ignores all other subtables
# If there is a format 12 table for a font, ignore the format 4 table of it # If there is a format 12 table for a font, ignore the format 4 table of it
chosenCmapTables = [] chosenCmapTables = []
for fontIdx,table in enumerate(cmapTables): for fontIdx, table in enumerate(cmapTables):
format4 = None format4 = None
format12 = None format12 = None
for subtable in table.tables: for subtable in table.tables:
properties = (subtable.format, subtable.platformID, subtable.platEncID) properties = (subtable.format, subtable.platformID, subtable.platEncID)
if properties in _CmapUnicodePlatEncodings.BMP: if properties in _CmapUnicodePlatEncodings.BMP:
format4 = subtable format4 = subtable
elif properties in _CmapUnicodePlatEncodings.FullRepertoire: elif properties in _CmapUnicodePlatEncodings.FullRepertoire:
format12 = subtable format12 = subtable
else: else:
log.warning( log.warning(
"Dropped cmap subtable from font '%s':\t" "Dropped cmap subtable from font '%s':\t"
"format %2s, platformID %2s, platEncID %2s", "format %2s, platformID %2s, platEncID %2s",
fontIdx, subtable.format, subtable.platformID, subtable.platEncID fontIdx,
) subtable.format,
if format12 is not None: subtable.platformID,
chosenCmapTables.append((format12, fontIdx)) subtable.platEncID,
elif format4 is not None: )
chosenCmapTables.append((format4, fontIdx)) if format12 is not None:
chosenCmapTables.append((format12, fontIdx))
elif format4 is not None:
chosenCmapTables.append((format4, fontIdx))
# Build the unicode mapping # Build the unicode mapping
merger.cmap = cmap = {} merger.cmap = cmap = {}
fontIndexForGlyph = {} fontIndexForGlyph = {}
glyphSets = [None for f in merger.fonts] if hasattr(merger, 'fonts') else None glyphSets = [None for f in merger.fonts] if hasattr(merger, "fonts") else None
for table,fontIdx in chosenCmapTables: for table, fontIdx in chosenCmapTables:
# handle duplicates # handle duplicates
for uni,gid in table.cmap.items(): for uni, gid in table.cmap.items():
oldgid = cmap.get(uni, None) oldgid = cmap.get(uni, None)
if oldgid is None: if oldgid is None:
cmap[uni] = gid cmap[uni] = gid
fontIndexForGlyph[gid] = fontIdx fontIndexForGlyph[gid] = fontIdx
elif is_Default_Ignorable(uni) or uni in (0x25CC,): # U+25CC DOTTED CIRCLE elif is_Default_Ignorable(uni) or uni in (0x25CC,): # U+25CC DOTTED CIRCLE
continue continue
elif oldgid != gid: elif oldgid != gid:
# Char previously mapped to oldgid, now to gid. # Char previously mapped to oldgid, now to gid.
# Record, to fix up in GSUB 'locl' later. # Record, to fix up in GSUB 'locl' later.
if merger.duplicateGlyphsPerFont[fontIdx].get(oldgid) is None: if merger.duplicateGlyphsPerFont[fontIdx].get(oldgid) is None:
if glyphSets is not None: if glyphSets is not None:
oldFontIdx = fontIndexForGlyph[oldgid] oldFontIdx = fontIndexForGlyph[oldgid]
for idx in (fontIdx, oldFontIdx): for idx in (fontIdx, oldFontIdx):
if glyphSets[idx] is None: if glyphSets[idx] is None:
glyphSets[idx] = merger.fonts[idx].getGlyphSet() glyphSets[idx] = merger.fonts[idx].getGlyphSet()
#if _glyphsAreSame(glyphSets[oldFontIdx], glyphSets[fontIdx], oldgid, gid): # if _glyphsAreSame(glyphSets[oldFontIdx], glyphSets[fontIdx], oldgid, gid):
# continue # continue
merger.duplicateGlyphsPerFont[fontIdx][oldgid] = gid merger.duplicateGlyphsPerFont[fontIdx][oldgid] = gid
elif merger.duplicateGlyphsPerFont[fontIdx][oldgid] != gid: elif merger.duplicateGlyphsPerFont[fontIdx][oldgid] != gid:
# Char previously mapped to oldgid but oldgid is already remapped to a different # Char previously mapped to oldgid but oldgid is already remapped to a different
# gid, because of another Unicode character. # gid, because of another Unicode character.
# TODO: Try harder to do something about these. # TODO: Try harder to do something about these.
log.warning("Dropped mapping from codepoint %#06X to glyphId '%s'", uni, gid) log.warning(
"Dropped mapping from codepoint %#06X to glyphId '%s'", uni, gid
)
def renameCFFCharStrings(merger, glyphOrder, cffTable): def renameCFFCharStrings(merger, glyphOrder, cffTable):
"""Rename topDictIndex charStrings based on glyphOrder.""" """Rename topDictIndex charStrings based on glyphOrder."""
td = cffTable.cff.topDictIndex[0] td = cffTable.cff.topDictIndex[0]
charStrings = {} charStrings = {}
for i, v in enumerate(td.CharStrings.charStrings.values()): for i, v in enumerate(td.CharStrings.charStrings.values()):
glyphName = glyphOrder[i] glyphName = glyphOrder[i]
charStrings[glyphName] = v charStrings[glyphName] = v
td.CharStrings.charStrings = charStrings td.CharStrings.charStrings = charStrings
td.charset = list(glyphOrder) td.charset = list(glyphOrder)

View File

@ -14,453 +14,516 @@ log = logging.getLogger("fontTools.merge")
def mergeLookupLists(lst): def mergeLookupLists(lst):
# TODO Do smarter merge. # TODO Do smarter merge.
return sumLists(lst) return sumLists(lst)
def mergeFeatures(lst): def mergeFeatures(lst):
assert lst assert lst
self = otTables.Feature() self = otTables.Feature()
self.FeatureParams = None self.FeatureParams = None
self.LookupListIndex = mergeLookupLists([l.LookupListIndex for l in lst if l.LookupListIndex]) self.LookupListIndex = mergeLookupLists(
self.LookupCount = len(self.LookupListIndex) [l.LookupListIndex for l in lst if l.LookupListIndex]
return self )
self.LookupCount = len(self.LookupListIndex)
return self
def mergeFeatureLists(lst): def mergeFeatureLists(lst):
d = {} d = {}
for l in lst: for l in lst:
for f in l: for f in l:
tag = f.FeatureTag tag = f.FeatureTag
if tag not in d: if tag not in d:
d[tag] = [] d[tag] = []
d[tag].append(f.Feature) d[tag].append(f.Feature)
ret = [] ret = []
for tag in sorted(d.keys()): for tag in sorted(d.keys()):
rec = otTables.FeatureRecord() rec = otTables.FeatureRecord()
rec.FeatureTag = tag rec.FeatureTag = tag
rec.Feature = mergeFeatures(d[tag]) rec.Feature = mergeFeatures(d[tag])
ret.append(rec) ret.append(rec)
return ret return ret
def mergeLangSyses(lst): def mergeLangSyses(lst):
assert lst assert lst
# TODO Support merging ReqFeatureIndex # TODO Support merging ReqFeatureIndex
assert all(l.ReqFeatureIndex == 0xFFFF for l in lst) assert all(l.ReqFeatureIndex == 0xFFFF for l in lst)
self = otTables.LangSys()
self.LookupOrder = None
self.ReqFeatureIndex = 0xFFFF
self.FeatureIndex = mergeFeatureLists(
[l.FeatureIndex for l in lst if l.FeatureIndex]
)
self.FeatureCount = len(self.FeatureIndex)
return self
self = otTables.LangSys()
self.LookupOrder = None
self.ReqFeatureIndex = 0xFFFF
self.FeatureIndex = mergeFeatureLists([l.FeatureIndex for l in lst if l.FeatureIndex])
self.FeatureCount = len(self.FeatureIndex)
return self
def mergeScripts(lst): def mergeScripts(lst):
assert lst assert lst
if len(lst) == 1: if len(lst) == 1:
return lst[0] return lst[0]
langSyses = {} langSyses = {}
for sr in lst: for sr in lst:
for lsr in sr.LangSysRecord: for lsr in sr.LangSysRecord:
if lsr.LangSysTag not in langSyses: if lsr.LangSysTag not in langSyses:
langSyses[lsr.LangSysTag] = [] langSyses[lsr.LangSysTag] = []
langSyses[lsr.LangSysTag].append(lsr.LangSys) langSyses[lsr.LangSysTag].append(lsr.LangSys)
lsrecords = [] lsrecords = []
for tag, langSys_list in sorted(langSyses.items()): for tag, langSys_list in sorted(langSyses.items()):
lsr = otTables.LangSysRecord() lsr = otTables.LangSysRecord()
lsr.LangSys = mergeLangSyses(langSys_list) lsr.LangSys = mergeLangSyses(langSys_list)
lsr.LangSysTag = tag lsr.LangSysTag = tag
lsrecords.append(lsr) lsrecords.append(lsr)
self = otTables.Script()
self.LangSysRecord = lsrecords
self.LangSysCount = len(lsrecords)
dfltLangSyses = [s.DefaultLangSys for s in lst if s.DefaultLangSys]
if dfltLangSyses:
self.DefaultLangSys = mergeLangSyses(dfltLangSyses)
else:
self.DefaultLangSys = None
return self
self = otTables.Script()
self.LangSysRecord = lsrecords
self.LangSysCount = len(lsrecords)
dfltLangSyses = [s.DefaultLangSys for s in lst if s.DefaultLangSys]
if dfltLangSyses:
self.DefaultLangSys = mergeLangSyses(dfltLangSyses)
else:
self.DefaultLangSys = None
return self
def mergeScriptRecords(lst): def mergeScriptRecords(lst):
d = {} d = {}
for l in lst: for l in lst:
for s in l: for s in l:
tag = s.ScriptTag tag = s.ScriptTag
if tag not in d: if tag not in d:
d[tag] = [] d[tag] = []
d[tag].append(s.Script) d[tag].append(s.Script)
ret = [] ret = []
for tag in sorted(d.keys()): for tag in sorted(d.keys()):
rec = otTables.ScriptRecord() rec = otTables.ScriptRecord()
rec.ScriptTag = tag rec.ScriptTag = tag
rec.Script = mergeScripts(d[tag]) rec.Script = mergeScripts(d[tag])
ret.append(rec) ret.append(rec)
return ret return ret
otTables.ScriptList.mergeMap = { otTables.ScriptList.mergeMap = {
'ScriptCount': lambda lst: None, # TODO "ScriptCount": lambda lst: None, # TODO
'ScriptRecord': mergeScriptRecords, "ScriptRecord": mergeScriptRecords,
} }
otTables.BaseScriptList.mergeMap = { otTables.BaseScriptList.mergeMap = {
'BaseScriptCount': lambda lst: None, # TODO "BaseScriptCount": lambda lst: None, # TODO
# TODO: Merge duplicate entries # TODO: Merge duplicate entries
'BaseScriptRecord': lambda lst: sorted(sumLists(lst), key=lambda s: s.BaseScriptTag), "BaseScriptRecord": lambda lst: sorted(
sumLists(lst), key=lambda s: s.BaseScriptTag
),
} }
otTables.FeatureList.mergeMap = { otTables.FeatureList.mergeMap = {
'FeatureCount': sum, "FeatureCount": sum,
'FeatureRecord': lambda lst: sorted(sumLists(lst), key=lambda s: s.FeatureTag), "FeatureRecord": lambda lst: sorted(sumLists(lst), key=lambda s: s.FeatureTag),
} }
otTables.LookupList.mergeMap = { otTables.LookupList.mergeMap = {
'LookupCount': sum, "LookupCount": sum,
'Lookup': sumLists, "Lookup": sumLists,
} }
otTables.Coverage.mergeMap = { otTables.Coverage.mergeMap = {
'Format': min, "Format": min,
'glyphs': sumLists, "glyphs": sumLists,
} }
otTables.ClassDef.mergeMap = { otTables.ClassDef.mergeMap = {
'Format': min, "Format": min,
'classDefs': sumDicts, "classDefs": sumDicts,
} }
otTables.LigCaretList.mergeMap = { otTables.LigCaretList.mergeMap = {
'Coverage': mergeObjects, "Coverage": mergeObjects,
'LigGlyphCount': sum, "LigGlyphCount": sum,
'LigGlyph': sumLists, "LigGlyph": sumLists,
} }
otTables.AttachList.mergeMap = { otTables.AttachList.mergeMap = {
'Coverage': mergeObjects, "Coverage": mergeObjects,
'GlyphCount': sum, "GlyphCount": sum,
'AttachPoint': sumLists, "AttachPoint": sumLists,
} }
# XXX Renumber MarkFilterSets of lookups # XXX Renumber MarkFilterSets of lookups
otTables.MarkGlyphSetsDef.mergeMap = { otTables.MarkGlyphSetsDef.mergeMap = {
'MarkSetTableFormat': equal, "MarkSetTableFormat": equal,
'MarkSetCount': sum, "MarkSetCount": sum,
'Coverage': sumLists, "Coverage": sumLists,
} }
otTables.Axis.mergeMap = { otTables.Axis.mergeMap = {
'*': mergeObjects, "*": mergeObjects,
} }
# XXX Fix BASE table merging # XXX Fix BASE table merging
otTables.BaseTagList.mergeMap = { otTables.BaseTagList.mergeMap = {
'BaseTagCount': sum, "BaseTagCount": sum,
'BaselineTag': sumLists, "BaselineTag": sumLists,
} }
otTables.GDEF.mergeMap = \ otTables.GDEF.mergeMap = (
otTables.GSUB.mergeMap = \ otTables.GSUB.mergeMap
otTables.GPOS.mergeMap = \ ) = (
otTables.BASE.mergeMap = \ otTables.GPOS.mergeMap
otTables.JSTF.mergeMap = \ ) = otTables.BASE.mergeMap = otTables.JSTF.mergeMap = otTables.MATH.mergeMap = {
otTables.MATH.mergeMap = \ "*": mergeObjects,
{ "Version": max,
'*': mergeObjects,
'Version': max,
} }
ttLib.getTableClass('GDEF').mergeMap = \ ttLib.getTableClass("GDEF").mergeMap = ttLib.getTableClass(
ttLib.getTableClass('GSUB').mergeMap = \ "GSUB"
ttLib.getTableClass('GPOS').mergeMap = \ ).mergeMap = ttLib.getTableClass("GPOS").mergeMap = ttLib.getTableClass(
ttLib.getTableClass('BASE').mergeMap = \ "BASE"
ttLib.getTableClass('JSTF').mergeMap = \ ).mergeMap = ttLib.getTableClass(
ttLib.getTableClass('MATH').mergeMap = \ "JSTF"
{ ).mergeMap = ttLib.getTableClass(
'tableTag': onlyExisting(equal), # XXX clean me up "MATH"
'table': mergeObjects, ).mergeMap = {
"tableTag": onlyExisting(equal), # XXX clean me up
"table": mergeObjects,
} }
@add_method(ttLib.getTableClass('GSUB'))
@add_method(ttLib.getTableClass("GSUB"))
def merge(self, m, tables): def merge(self, m, tables):
assert len(tables) == len(m.duplicateGlyphsPerFont) assert len(tables) == len(m.duplicateGlyphsPerFont)
for i,(table,dups) in enumerate(zip(tables, m.duplicateGlyphsPerFont)): for i, (table, dups) in enumerate(zip(tables, m.duplicateGlyphsPerFont)):
if not dups: continue if not dups:
if table is None or table is NotImplemented: continue
log.warning("Have non-identical duplicates to resolve for '%s' but no GSUB. Are duplicates intended?: %s", m.fonts[i]._merger__name, dups) if table is None or table is NotImplemented:
continue log.warning(
"Have non-identical duplicates to resolve for '%s' but no GSUB. Are duplicates intended?: %s",
m.fonts[i]._merger__name,
dups,
)
continue
synthFeature = None synthFeature = None
synthLookup = None synthLookup = None
for script in table.table.ScriptList.ScriptRecord: for script in table.table.ScriptList.ScriptRecord:
if script.ScriptTag == 'DFLT': continue # XXX if script.ScriptTag == "DFLT":
for langsys in [script.Script.DefaultLangSys] + [l.LangSys for l in script.Script.LangSysRecord]: continue # XXX
if langsys is None: continue # XXX Create! for langsys in [script.Script.DefaultLangSys] + [
feature = [v for v in langsys.FeatureIndex if v.FeatureTag == 'locl'] l.LangSys for l in script.Script.LangSysRecord
assert len(feature) <= 1 ]:
if feature: if langsys is None:
feature = feature[0] continue # XXX Create!
else: feature = [v for v in langsys.FeatureIndex if v.FeatureTag == "locl"]
if not synthFeature: assert len(feature) <= 1
synthFeature = otTables.FeatureRecord() if feature:
synthFeature.FeatureTag = 'locl' feature = feature[0]
f = synthFeature.Feature = otTables.Feature() else:
f.FeatureParams = None if not synthFeature:
f.LookupCount = 0 synthFeature = otTables.FeatureRecord()
f.LookupListIndex = [] synthFeature.FeatureTag = "locl"
table.table.FeatureList.FeatureRecord.append(synthFeature) f = synthFeature.Feature = otTables.Feature()
table.table.FeatureList.FeatureCount += 1 f.FeatureParams = None
feature = synthFeature f.LookupCount = 0
langsys.FeatureIndex.append(feature) f.LookupListIndex = []
langsys.FeatureIndex.sort(key=lambda v: v.FeatureTag) table.table.FeatureList.FeatureRecord.append(synthFeature)
table.table.FeatureList.FeatureCount += 1
feature = synthFeature
langsys.FeatureIndex.append(feature)
langsys.FeatureIndex.sort(key=lambda v: v.FeatureTag)
if not synthLookup: if not synthLookup:
subtable = otTables.SingleSubst() subtable = otTables.SingleSubst()
subtable.mapping = dups subtable.mapping = dups
synthLookup = otTables.Lookup() synthLookup = otTables.Lookup()
synthLookup.LookupFlag = 0 synthLookup.LookupFlag = 0
synthLookup.LookupType = 1 synthLookup.LookupType = 1
synthLookup.SubTableCount = 1 synthLookup.SubTableCount = 1
synthLookup.SubTable = [subtable] synthLookup.SubTable = [subtable]
if table.table.LookupList is None: if table.table.LookupList is None:
# mtiLib uses None as default value for LookupList, # mtiLib uses None as default value for LookupList,
# while feaLib points to an empty array with count 0 # while feaLib points to an empty array with count 0
# TODO: make them do the same # TODO: make them do the same
table.table.LookupList = otTables.LookupList() table.table.LookupList = otTables.LookupList()
table.table.LookupList.Lookup = [] table.table.LookupList.Lookup = []
table.table.LookupList.LookupCount = 0 table.table.LookupList.LookupCount = 0
table.table.LookupList.Lookup.append(synthLookup) table.table.LookupList.Lookup.append(synthLookup)
table.table.LookupList.LookupCount += 1 table.table.LookupList.LookupCount += 1
if feature.Feature.LookupListIndex[:1] != [synthLookup]: if feature.Feature.LookupListIndex[:1] != [synthLookup]:
feature.Feature.LookupListIndex[:0] = [synthLookup] feature.Feature.LookupListIndex[:0] = [synthLookup]
feature.Feature.LookupCount += 1 feature.Feature.LookupCount += 1
DefaultTable.merge(self, m, tables) DefaultTable.merge(self, m, tables)
return self return self
@add_method(otTables.SingleSubst,
otTables.MultipleSubst, @add_method(
otTables.AlternateSubst, otTables.SingleSubst,
otTables.LigatureSubst, otTables.MultipleSubst,
otTables.ReverseChainSingleSubst, otTables.AlternateSubst,
otTables.SinglePos, otTables.LigatureSubst,
otTables.PairPos, otTables.ReverseChainSingleSubst,
otTables.CursivePos, otTables.SinglePos,
otTables.MarkBasePos, otTables.PairPos,
otTables.MarkLigPos, otTables.CursivePos,
otTables.MarkMarkPos) otTables.MarkBasePos,
otTables.MarkLigPos,
otTables.MarkMarkPos,
)
def mapLookups(self, lookupMap): def mapLookups(self, lookupMap):
pass pass
# Copied and trimmed down from subset.py # Copied and trimmed down from subset.py
@add_method(otTables.ContextSubst, @add_method(
otTables.ChainContextSubst, otTables.ContextSubst,
otTables.ContextPos, otTables.ChainContextSubst,
otTables.ChainContextPos) otTables.ContextPos,
otTables.ChainContextPos,
)
def __merge_classify_context(self): def __merge_classify_context(self):
class ContextHelper(object):
def __init__(self, klass, Format):
if klass.__name__.endswith("Subst"):
Typ = "Sub"
Type = "Subst"
else:
Typ = "Pos"
Type = "Pos"
if klass.__name__.startswith("Chain"):
Chain = "Chain"
else:
Chain = ""
ChainTyp = Chain + Typ
class ContextHelper(object): self.Typ = Typ
def __init__(self, klass, Format): self.Type = Type
if klass.__name__.endswith('Subst'): self.Chain = Chain
Typ = 'Sub' self.ChainTyp = ChainTyp
Type = 'Subst'
else:
Typ = 'Pos'
Type = 'Pos'
if klass.__name__.startswith('Chain'):
Chain = 'Chain'
else:
Chain = ''
ChainTyp = Chain+Typ
self.Typ = Typ self.LookupRecord = Type + "LookupRecord"
self.Type = Type
self.Chain = Chain
self.ChainTyp = ChainTyp
self.LookupRecord = Type+'LookupRecord' if Format == 1:
self.Rule = ChainTyp + "Rule"
self.RuleSet = ChainTyp + "RuleSet"
elif Format == 2:
self.Rule = ChainTyp + "ClassRule"
self.RuleSet = ChainTyp + "ClassSet"
if Format == 1: if self.Format not in [1, 2, 3]:
self.Rule = ChainTyp+'Rule' return None # Don't shoot the messenger; let it go
self.RuleSet = ChainTyp+'RuleSet' if not hasattr(self.__class__, "_merge__ContextHelpers"):
elif Format == 2: self.__class__._merge__ContextHelpers = {}
self.Rule = ChainTyp+'ClassRule' if self.Format not in self.__class__._merge__ContextHelpers:
self.RuleSet = ChainTyp+'ClassSet' helper = ContextHelper(self.__class__, self.Format)
self.__class__._merge__ContextHelpers[self.Format] = helper
if self.Format not in [1, 2, 3]: return self.__class__._merge__ContextHelpers[self.Format]
return None # Don't shoot the messenger; let it go
if not hasattr(self.__class__, "_merge__ContextHelpers"):
self.__class__._merge__ContextHelpers = {}
if self.Format not in self.__class__._merge__ContextHelpers:
helper = ContextHelper(self.__class__, self.Format)
self.__class__._merge__ContextHelpers[self.Format] = helper
return self.__class__._merge__ContextHelpers[self.Format]
@add_method(otTables.ContextSubst, @add_method(
otTables.ChainContextSubst, otTables.ContextSubst,
otTables.ContextPos, otTables.ChainContextSubst,
otTables.ChainContextPos) otTables.ContextPos,
otTables.ChainContextPos,
)
def mapLookups(self, lookupMap): def mapLookups(self, lookupMap):
c = self.__merge_classify_context() c = self.__merge_classify_context()
if self.Format in [1, 2]: if self.Format in [1, 2]:
for rs in getattr(self, c.RuleSet): for rs in getattr(self, c.RuleSet):
if not rs: continue if not rs:
for r in getattr(rs, c.Rule): continue
if not r: continue for r in getattr(rs, c.Rule):
for ll in getattr(r, c.LookupRecord): if not r:
if not ll: continue continue
ll.LookupListIndex = lookupMap[ll.LookupListIndex] for ll in getattr(r, c.LookupRecord):
elif self.Format == 3: if not ll:
for ll in getattr(self, c.LookupRecord): continue
if not ll: continue ll.LookupListIndex = lookupMap[ll.LookupListIndex]
ll.LookupListIndex = lookupMap[ll.LookupListIndex] elif self.Format == 3:
else: for ll in getattr(self, c.LookupRecord):
assert 0, "unknown format: %s" % self.Format if not ll:
continue
ll.LookupListIndex = lookupMap[ll.LookupListIndex]
else:
assert 0, "unknown format: %s" % self.Format
@add_method(otTables.ExtensionSubst,
otTables.ExtensionPos) @add_method(otTables.ExtensionSubst, otTables.ExtensionPos)
def mapLookups(self, lookupMap): def mapLookups(self, lookupMap):
if self.Format == 1: if self.Format == 1:
self.ExtSubTable.mapLookups(lookupMap) self.ExtSubTable.mapLookups(lookupMap)
else: else:
assert 0, "unknown format: %s" % self.Format assert 0, "unknown format: %s" % self.Format
@add_method(otTables.Lookup) @add_method(otTables.Lookup)
def mapLookups(self, lookupMap): def mapLookups(self, lookupMap):
for st in self.SubTable: for st in self.SubTable:
if not st: continue if not st:
st.mapLookups(lookupMap) continue
st.mapLookups(lookupMap)
@add_method(otTables.LookupList) @add_method(otTables.LookupList)
def mapLookups(self, lookupMap): def mapLookups(self, lookupMap):
for l in self.Lookup: for l in self.Lookup:
if not l: continue if not l:
l.mapLookups(lookupMap) continue
l.mapLookups(lookupMap)
@add_method(otTables.Lookup) @add_method(otTables.Lookup)
def mapMarkFilteringSets(self, markFilteringSetMap): def mapMarkFilteringSets(self, markFilteringSetMap):
if self.LookupFlag & 0x0010: if self.LookupFlag & 0x0010:
self.MarkFilteringSet = markFilteringSetMap[self.MarkFilteringSet] self.MarkFilteringSet = markFilteringSetMap[self.MarkFilteringSet]
@add_method(otTables.LookupList) @add_method(otTables.LookupList)
def mapMarkFilteringSets(self, markFilteringSetMap): def mapMarkFilteringSets(self, markFilteringSetMap):
for l in self.Lookup: for l in self.Lookup:
if not l: continue if not l:
l.mapMarkFilteringSets(markFilteringSetMap) continue
l.mapMarkFilteringSets(markFilteringSetMap)
@add_method(otTables.Feature) @add_method(otTables.Feature)
def mapLookups(self, lookupMap): def mapLookups(self, lookupMap):
self.LookupListIndex = [lookupMap[i] for i in self.LookupListIndex] self.LookupListIndex = [lookupMap[i] for i in self.LookupListIndex]
@add_method(otTables.FeatureList) @add_method(otTables.FeatureList)
def mapLookups(self, lookupMap): def mapLookups(self, lookupMap):
for f in self.FeatureRecord: for f in self.FeatureRecord:
if not f or not f.Feature: continue if not f or not f.Feature:
f.Feature.mapLookups(lookupMap) continue
f.Feature.mapLookups(lookupMap)
@add_method(otTables.DefaultLangSys,
otTables.LangSys) @add_method(otTables.DefaultLangSys, otTables.LangSys)
def mapFeatures(self, featureMap): def mapFeatures(self, featureMap):
self.FeatureIndex = [featureMap[i] for i in self.FeatureIndex] self.FeatureIndex = [featureMap[i] for i in self.FeatureIndex]
if self.ReqFeatureIndex != 65535: if self.ReqFeatureIndex != 65535:
self.ReqFeatureIndex = featureMap[self.ReqFeatureIndex] self.ReqFeatureIndex = featureMap[self.ReqFeatureIndex]
@add_method(otTables.Script) @add_method(otTables.Script)
def mapFeatures(self, featureMap): def mapFeatures(self, featureMap):
if self.DefaultLangSys: if self.DefaultLangSys:
self.DefaultLangSys.mapFeatures(featureMap) self.DefaultLangSys.mapFeatures(featureMap)
for l in self.LangSysRecord: for l in self.LangSysRecord:
if not l or not l.LangSys: continue if not l or not l.LangSys:
l.LangSys.mapFeatures(featureMap) continue
l.LangSys.mapFeatures(featureMap)
@add_method(otTables.ScriptList) @add_method(otTables.ScriptList)
def mapFeatures(self, featureMap): def mapFeatures(self, featureMap):
for s in self.ScriptRecord: for s in self.ScriptRecord:
if not s or not s.Script: continue if not s or not s.Script:
s.Script.mapFeatures(featureMap) continue
s.Script.mapFeatures(featureMap)
def layoutPreMerge(font): def layoutPreMerge(font):
# Map indices to references # Map indices to references
GDEF = font.get('GDEF') GDEF = font.get("GDEF")
GSUB = font.get('GSUB') GSUB = font.get("GSUB")
GPOS = font.get('GPOS') GPOS = font.get("GPOS")
for t in [GSUB, GPOS]: for t in [GSUB, GPOS]:
if not t: continue if not t:
continue
if t.table.LookupList: if t.table.LookupList:
lookupMap = {i:v for i,v in enumerate(t.table.LookupList.Lookup)} lookupMap = {i: v for i, v in enumerate(t.table.LookupList.Lookup)}
t.table.LookupList.mapLookups(lookupMap) t.table.LookupList.mapLookups(lookupMap)
t.table.FeatureList.mapLookups(lookupMap) t.table.FeatureList.mapLookups(lookupMap)
if GDEF and GDEF.table.Version >= 0x00010002: if GDEF and GDEF.table.Version >= 0x00010002:
markFilteringSetMap = {i:v for i,v in enumerate(GDEF.table.MarkGlyphSetsDef.Coverage)} markFilteringSetMap = {
t.table.LookupList.mapMarkFilteringSets(markFilteringSetMap) i: v for i, v in enumerate(GDEF.table.MarkGlyphSetsDef.Coverage)
}
t.table.LookupList.mapMarkFilteringSets(markFilteringSetMap)
if t.table.FeatureList and t.table.ScriptList: if t.table.FeatureList and t.table.ScriptList:
featureMap = {i:v for i,v in enumerate(t.table.FeatureList.FeatureRecord)} featureMap = {i: v for i, v in enumerate(t.table.FeatureList.FeatureRecord)}
t.table.ScriptList.mapFeatures(featureMap) t.table.ScriptList.mapFeatures(featureMap)
# TODO FeatureParams nameIDs
# TODO FeatureParams nameIDs
def layoutPostMerge(font): def layoutPostMerge(font):
# Map references back to indices # Map references back to indices
GDEF = font.get('GDEF') GDEF = font.get("GDEF")
GSUB = font.get('GSUB') GSUB = font.get("GSUB")
GPOS = font.get('GPOS') GPOS = font.get("GPOS")
for t in [GSUB, GPOS]: for t in [GSUB, GPOS]:
if not t: continue if not t:
continue
if t.table.FeatureList and t.table.ScriptList: if t.table.FeatureList and t.table.ScriptList:
# Collect unregistered (new) features. # Collect unregistered (new) features.
featureMap = GregariousIdentityDict(t.table.FeatureList.FeatureRecord) featureMap = GregariousIdentityDict(t.table.FeatureList.FeatureRecord)
t.table.ScriptList.mapFeatures(featureMap) t.table.ScriptList.mapFeatures(featureMap)
# Record used features. # Record used features.
featureMap = AttendanceRecordingIdentityDict(t.table.FeatureList.FeatureRecord) featureMap = AttendanceRecordingIdentityDict(
t.table.ScriptList.mapFeatures(featureMap) t.table.FeatureList.FeatureRecord
usedIndices = featureMap.s )
t.table.ScriptList.mapFeatures(featureMap)
usedIndices = featureMap.s
# Remove unused features # Remove unused features
t.table.FeatureList.FeatureRecord = [f for i,f in enumerate(t.table.FeatureList.FeatureRecord) if i in usedIndices] t.table.FeatureList.FeatureRecord = [
f
for i, f in enumerate(t.table.FeatureList.FeatureRecord)
if i in usedIndices
]
# Map back to indices. # Map back to indices.
featureMap = NonhashableDict(t.table.FeatureList.FeatureRecord) featureMap = NonhashableDict(t.table.FeatureList.FeatureRecord)
t.table.ScriptList.mapFeatures(featureMap) t.table.ScriptList.mapFeatures(featureMap)
t.table.FeatureList.FeatureCount = len(t.table.FeatureList.FeatureRecord) t.table.FeatureList.FeatureCount = len(t.table.FeatureList.FeatureRecord)
if t.table.LookupList: if t.table.LookupList:
# Collect unregistered (new) lookups. # Collect unregistered (new) lookups.
lookupMap = GregariousIdentityDict(t.table.LookupList.Lookup) lookupMap = GregariousIdentityDict(t.table.LookupList.Lookup)
t.table.FeatureList.mapLookups(lookupMap) t.table.FeatureList.mapLookups(lookupMap)
t.table.LookupList.mapLookups(lookupMap) t.table.LookupList.mapLookups(lookupMap)
# Record used lookups. # Record used lookups.
lookupMap = AttendanceRecordingIdentityDict(t.table.LookupList.Lookup) lookupMap = AttendanceRecordingIdentityDict(t.table.LookupList.Lookup)
t.table.FeatureList.mapLookups(lookupMap) t.table.FeatureList.mapLookups(lookupMap)
t.table.LookupList.mapLookups(lookupMap) t.table.LookupList.mapLookups(lookupMap)
usedIndices = lookupMap.s usedIndices = lookupMap.s
# Remove unused lookups # Remove unused lookups
t.table.LookupList.Lookup = [l for i,l in enumerate(t.table.LookupList.Lookup) if i in usedIndices] t.table.LookupList.Lookup = [
l for i, l in enumerate(t.table.LookupList.Lookup) if i in usedIndices
]
# Map back to indices. # Map back to indices.
lookupMap = NonhashableDict(t.table.LookupList.Lookup) lookupMap = NonhashableDict(t.table.LookupList.Lookup)
t.table.FeatureList.mapLookups(lookupMap) t.table.FeatureList.mapLookups(lookupMap)
t.table.LookupList.mapLookups(lookupMap) t.table.LookupList.mapLookups(lookupMap)
t.table.LookupList.LookupCount = len(t.table.LookupList.Lookup) t.table.LookupList.LookupCount = len(t.table.LookupList.Lookup)
if GDEF and GDEF.table.Version >= 0x00010002: if GDEF and GDEF.table.Version >= 0x00010002:
markFilteringSetMap = NonhashableDict(GDEF.table.MarkGlyphSetsDef.Coverage) markFilteringSetMap = NonhashableDict(
t.table.LookupList.mapMarkFilteringSets(markFilteringSetMap) GDEF.table.MarkGlyphSetsDef.Coverage
)
t.table.LookupList.mapMarkFilteringSets(markFilteringSetMap)
# TODO FeatureParams nameIDs # TODO FeatureParams nameIDs

View File

@ -4,82 +4,80 @@
class Options(object): class Options(object):
class UnknownOptionError(Exception):
pass
class UnknownOptionError(Exception): def __init__(self, **kwargs):
pass
def __init__(self, **kwargs): self.verbose = False
self.timing = False
self.drop_tables = []
self.verbose = False self.set(**kwargs)
self.timing = False
self.drop_tables = []
self.set(**kwargs) def set(self, **kwargs):
for k, v in kwargs.items():
if not hasattr(self, k):
raise self.UnknownOptionError("Unknown option '%s'" % k)
setattr(self, k, v)
def set(self, **kwargs): def parse_opts(self, argv, ignore_unknown=[]):
for k,v in kwargs.items(): ret = []
if not hasattr(self, k): opts = {}
raise self.UnknownOptionError("Unknown option '%s'" % k) for a in argv:
setattr(self, k, v) orig_a = a
if not a.startswith("--"):
ret.append(a)
continue
a = a[2:]
i = a.find("=")
op = "="
if i == -1:
if a.startswith("no-"):
k = a[3:]
v = False
else:
k = a
v = True
else:
k = a[:i]
if k[-1] in "-+":
op = k[-1] + "=" # Ops is '-=' or '+=' now.
k = k[:-1]
v = a[i + 1 :]
ok = k
k = k.replace("-", "_")
if not hasattr(self, k):
if ignore_unknown is True or ok in ignore_unknown:
ret.append(orig_a)
continue
else:
raise self.UnknownOptionError("Unknown option '%s'" % a)
def parse_opts(self, argv, ignore_unknown=[]): ov = getattr(self, k)
ret = [] if isinstance(ov, bool):
opts = {} v = bool(v)
for a in argv: elif isinstance(ov, int):
orig_a = a v = int(v)
if not a.startswith('--'): elif isinstance(ov, list):
ret.append(a) vv = v.split(",")
continue if vv == [""]:
a = a[2:] vv = []
i = a.find('=') vv = [int(x, 0) if len(x) and x[0] in "0123456789" else x for x in vv]
op = '=' if op == "=":
if i == -1: v = vv
if a.startswith("no-"): elif op == "+=":
k = a[3:] v = ov
v = False v.extend(vv)
else: elif op == "-=":
k = a v = ov
v = True for x in vv:
else: if x in v:
k = a[:i] v.remove(x)
if k[-1] in "-+": else:
op = k[-1]+'=' # Ops is '-=' or '+=' now. assert 0
k = k[:-1]
v = a[i+1:]
ok = k
k = k.replace('-', '_')
if not hasattr(self, k):
if ignore_unknown is True or ok in ignore_unknown:
ret.append(orig_a)
continue
else:
raise self.UnknownOptionError("Unknown option '%s'" % a)
ov = getattr(self, k) opts[k] = v
if isinstance(ov, bool): self.set(**opts)
v = bool(v)
elif isinstance(ov, int):
v = int(v)
elif isinstance(ov, list):
vv = v.split(',')
if vv == ['']:
vv = []
vv = [int(x, 0) if len(x) and x[0] in "0123456789" else x for x in vv]
if op == '=':
v = vv
elif op == '+=':
v = ov
v.extend(vv)
elif op == '-=':
v = ov
for x in vv:
if x in v:
v.remove(x)
else:
assert 0
opts[k] = v
self.set(**opts)
return ret
return ret

View File

@ -13,299 +13,307 @@ import logging
log = logging.getLogger("fontTools.merge") log = logging.getLogger("fontTools.merge")
ttLib.getTableClass('maxp').mergeMap = { ttLib.getTableClass("maxp").mergeMap = {
'*': max, "*": max,
'tableTag': equal, "tableTag": equal,
'tableVersion': equal, "tableVersion": equal,
'numGlyphs': sum, "numGlyphs": sum,
'maxStorage': first, "maxStorage": first,
'maxFunctionDefs': first, "maxFunctionDefs": first,
'maxInstructionDefs': first, "maxInstructionDefs": first,
# TODO When we correctly merge hinting data, update these values: # TODO When we correctly merge hinting data, update these values:
# maxFunctionDefs, maxInstructionDefs, maxSizeOfInstructions # maxFunctionDefs, maxInstructionDefs, maxSizeOfInstructions
} }
headFlagsMergeBitMap = { headFlagsMergeBitMap = {
'size': 16, "size": 16,
'*': bitwise_or, "*": bitwise_or,
1: bitwise_and, # Baseline at y = 0 1: bitwise_and, # Baseline at y = 0
2: bitwise_and, # lsb at x = 0 2: bitwise_and, # lsb at x = 0
3: bitwise_and, # Force ppem to integer values. FIXME? 3: bitwise_and, # Force ppem to integer values. FIXME?
5: bitwise_and, # Font is vertical 5: bitwise_and, # Font is vertical
6: lambda bit: 0, # Always set to zero 6: lambda bit: 0, # Always set to zero
11: bitwise_and, # Font data is 'lossless' 11: bitwise_and, # Font data is 'lossless'
13: bitwise_and, # Optimized for ClearType 13: bitwise_and, # Optimized for ClearType
14: bitwise_and, # Last resort font. FIXME? equal or first may be better 14: bitwise_and, # Last resort font. FIXME? equal or first may be better
15: lambda bit: 0, # Always set to zero 15: lambda bit: 0, # Always set to zero
} }
ttLib.getTableClass('head').mergeMap = { ttLib.getTableClass("head").mergeMap = {
'tableTag': equal, "tableTag": equal,
'tableVersion': max, "tableVersion": max,
'fontRevision': max, "fontRevision": max,
'checkSumAdjustment': lambda lst: 0, # We need *something* here "checkSumAdjustment": lambda lst: 0, # We need *something* here
'magicNumber': equal, "magicNumber": equal,
'flags': mergeBits(headFlagsMergeBitMap), "flags": mergeBits(headFlagsMergeBitMap),
'unitsPerEm': equal, "unitsPerEm": equal,
'created': current_time, "created": current_time,
'modified': current_time, "modified": current_time,
'xMin': min, "xMin": min,
'yMin': min, "yMin": min,
'xMax': max, "xMax": max,
'yMax': max, "yMax": max,
'macStyle': first, "macStyle": first,
'lowestRecPPEM': max, "lowestRecPPEM": max,
'fontDirectionHint': lambda lst: 2, "fontDirectionHint": lambda lst: 2,
'indexToLocFormat': first, "indexToLocFormat": first,
'glyphDataFormat': equal, "glyphDataFormat": equal,
} }
ttLib.getTableClass('hhea').mergeMap = { ttLib.getTableClass("hhea").mergeMap = {
'*': equal, "*": equal,
'tableTag': equal, "tableTag": equal,
'tableVersion': max, "tableVersion": max,
'ascent': max, "ascent": max,
'descent': min, "descent": min,
'lineGap': max, "lineGap": max,
'advanceWidthMax': max, "advanceWidthMax": max,
'minLeftSideBearing': min, "minLeftSideBearing": min,
'minRightSideBearing': min, "minRightSideBearing": min,
'xMaxExtent': max, "xMaxExtent": max,
'caretSlopeRise': first, "caretSlopeRise": first,
'caretSlopeRun': first, "caretSlopeRun": first,
'caretOffset': first, "caretOffset": first,
'numberOfHMetrics': recalculate, "numberOfHMetrics": recalculate,
} }
ttLib.getTableClass('vhea').mergeMap = { ttLib.getTableClass("vhea").mergeMap = {
'*': equal, "*": equal,
'tableTag': equal, "tableTag": equal,
'tableVersion': max, "tableVersion": max,
'ascent': max, "ascent": max,
'descent': min, "descent": min,
'lineGap': max, "lineGap": max,
'advanceHeightMax': max, "advanceHeightMax": max,
'minTopSideBearing': min, "minTopSideBearing": min,
'minBottomSideBearing': min, "minBottomSideBearing": min,
'yMaxExtent': max, "yMaxExtent": max,
'caretSlopeRise': first, "caretSlopeRise": first,
'caretSlopeRun': first, "caretSlopeRun": first,
'caretOffset': first, "caretOffset": first,
'numberOfVMetrics': recalculate, "numberOfVMetrics": recalculate,
} }
os2FsTypeMergeBitMap = { os2FsTypeMergeBitMap = {
'size': 16, "size": 16,
'*': lambda bit: 0, "*": lambda bit: 0,
1: bitwise_or, # no embedding permitted 1: bitwise_or, # no embedding permitted
2: bitwise_and, # allow previewing and printing documents 2: bitwise_and, # allow previewing and printing documents
3: bitwise_and, # allow editing documents 3: bitwise_and, # allow editing documents
8: bitwise_or, # no subsetting permitted 8: bitwise_or, # no subsetting permitted
9: bitwise_or, # no embedding of outlines permitted 9: bitwise_or, # no embedding of outlines permitted
} }
def mergeOs2FsType(lst): def mergeOs2FsType(lst):
lst = list(lst) lst = list(lst)
if all(item == 0 for item in lst): if all(item == 0 for item in lst):
return 0 return 0
# Compute least restrictive logic for each fsType value # Compute least restrictive logic for each fsType value
for i in range(len(lst)): for i in range(len(lst)):
# unset bit 1 (no embedding permitted) if either bit 2 or 3 is set # unset bit 1 (no embedding permitted) if either bit 2 or 3 is set
if lst[i] & 0x000C: if lst[i] & 0x000C:
lst[i] &= ~0x0002 lst[i] &= ~0x0002
# set bit 2 (allow previewing) if bit 3 is set (allow editing) # set bit 2 (allow previewing) if bit 3 is set (allow editing)
elif lst[i] & 0x0008: elif lst[i] & 0x0008:
lst[i] |= 0x0004 lst[i] |= 0x0004
# set bits 2 and 3 if everything is allowed # set bits 2 and 3 if everything is allowed
elif lst[i] == 0: elif lst[i] == 0:
lst[i] = 0x000C lst[i] = 0x000C
fsType = mergeBits(os2FsTypeMergeBitMap)(lst) fsType = mergeBits(os2FsTypeMergeBitMap)(lst)
# unset bits 2 and 3 if bit 1 is set (some font is "no embedding") # unset bits 2 and 3 if bit 1 is set (some font is "no embedding")
if fsType & 0x0002: if fsType & 0x0002:
fsType &= ~0x000C fsType &= ~0x000C
return fsType return fsType
ttLib.getTableClass('OS/2').mergeMap = { ttLib.getTableClass("OS/2").mergeMap = {
'*': first, "*": first,
'tableTag': equal, "tableTag": equal,
'version': max, "version": max,
'xAvgCharWidth': first, # Will be recalculated at the end on the merged font "xAvgCharWidth": first, # Will be recalculated at the end on the merged font
'fsType': mergeOs2FsType, # Will be overwritten "fsType": mergeOs2FsType, # Will be overwritten
'panose': first, # FIXME: should really be the first Latin font "panose": first, # FIXME: should really be the first Latin font
'ulUnicodeRange1': bitwise_or, "ulUnicodeRange1": bitwise_or,
'ulUnicodeRange2': bitwise_or, "ulUnicodeRange2": bitwise_or,
'ulUnicodeRange3': bitwise_or, "ulUnicodeRange3": bitwise_or,
'ulUnicodeRange4': bitwise_or, "ulUnicodeRange4": bitwise_or,
'fsFirstCharIndex': min, "fsFirstCharIndex": min,
'fsLastCharIndex': max, "fsLastCharIndex": max,
'sTypoAscender': max, "sTypoAscender": max,
'sTypoDescender': min, "sTypoDescender": min,
'sTypoLineGap': max, "sTypoLineGap": max,
'usWinAscent': max, "usWinAscent": max,
'usWinDescent': max, "usWinDescent": max,
# Version 1 # Version 1
'ulCodePageRange1': onlyExisting(bitwise_or), "ulCodePageRange1": onlyExisting(bitwise_or),
'ulCodePageRange2': onlyExisting(bitwise_or), "ulCodePageRange2": onlyExisting(bitwise_or),
# Version 2, 3, 4 # Version 2, 3, 4
'sxHeight': onlyExisting(max), "sxHeight": onlyExisting(max),
'sCapHeight': onlyExisting(max), "sCapHeight": onlyExisting(max),
'usDefaultChar': onlyExisting(first), "usDefaultChar": onlyExisting(first),
'usBreakChar': onlyExisting(first), "usBreakChar": onlyExisting(first),
'usMaxContext': onlyExisting(max), "usMaxContext": onlyExisting(max),
# version 5 # version 5
'usLowerOpticalPointSize': onlyExisting(min), "usLowerOpticalPointSize": onlyExisting(min),
'usUpperOpticalPointSize': onlyExisting(max), "usUpperOpticalPointSize": onlyExisting(max),
} }
@add_method(ttLib.getTableClass('OS/2'))
@add_method(ttLib.getTableClass("OS/2"))
def merge(self, m, tables): def merge(self, m, tables):
DefaultTable.merge(self, m, tables) DefaultTable.merge(self, m, tables)
if self.version < 2: if self.version < 2:
# bits 8 and 9 are reserved and should be set to zero # bits 8 and 9 are reserved and should be set to zero
self.fsType &= ~0x0300 self.fsType &= ~0x0300
if self.version >= 3: if self.version >= 3:
# Only one of bits 1, 2, and 3 may be set. We already take # Only one of bits 1, 2, and 3 may be set. We already take
# care of bit 1 implications in mergeOs2FsType. So unset # care of bit 1 implications in mergeOs2FsType. So unset
# bit 2 if bit 3 is already set. # bit 2 if bit 3 is already set.
if self.fsType & 0x0008: if self.fsType & 0x0008:
self.fsType &= ~0x0004 self.fsType &= ~0x0004
return self return self
ttLib.getTableClass('post').mergeMap = {
'*': first, ttLib.getTableClass("post").mergeMap = {
'tableTag': equal, "*": first,
'formatType': max, "tableTag": equal,
'isFixedPitch': min, "formatType": max,
'minMemType42': max, "isFixedPitch": min,
'maxMemType42': lambda lst: 0, "minMemType42": max,
'minMemType1': max, "maxMemType42": lambda lst: 0,
'maxMemType1': lambda lst: 0, "minMemType1": max,
'mapping': onlyExisting(sumDicts), "maxMemType1": lambda lst: 0,
'extraNames': lambda lst: [], "mapping": onlyExisting(sumDicts),
"extraNames": lambda lst: [],
} }
ttLib.getTableClass('vmtx').mergeMap = ttLib.getTableClass('hmtx').mergeMap = { ttLib.getTableClass("vmtx").mergeMap = ttLib.getTableClass("hmtx").mergeMap = {
'tableTag': equal, "tableTag": equal,
'metrics': sumDicts, "metrics": sumDicts,
} }
ttLib.getTableClass('name').mergeMap = { ttLib.getTableClass("name").mergeMap = {
'tableTag': equal, "tableTag": equal,
'names': first, # FIXME? Does mixing name records make sense? "names": first, # FIXME? Does mixing name records make sense?
} }
ttLib.getTableClass('loca').mergeMap = { ttLib.getTableClass("loca").mergeMap = {
'*': recalculate, "*": recalculate,
'tableTag': equal, "tableTag": equal,
} }
ttLib.getTableClass('glyf').mergeMap = { ttLib.getTableClass("glyf").mergeMap = {
'tableTag': equal, "tableTag": equal,
'glyphs': sumDicts, "glyphs": sumDicts,
'glyphOrder': sumLists, "glyphOrder": sumLists,
"axisTags": equal,
} }
@add_method(ttLib.getTableClass('glyf'))
@add_method(ttLib.getTableClass("glyf"))
def merge(self, m, tables): def merge(self, m, tables):
for i,table in enumerate(tables): for i, table in enumerate(tables):
for g in table.glyphs.values(): for g in table.glyphs.values():
if i: if i:
# Drop hints for all but first font, since # Drop hints for all but first font, since
# we don't map functions / CVT values. # we don't map functions / CVT values.
g.removeHinting() g.removeHinting()
# Expand composite glyphs to load their # Expand composite glyphs to load their
# composite glyph names. # composite glyph names.
if g.isComposite(): if g.isComposite() or g.isVarComposite():
g.expand(table) g.expand(table)
return DefaultTable.merge(self, m, tables) return DefaultTable.merge(self, m, tables)
ttLib.getTableClass('prep').mergeMap = lambda self, lst: first(lst)
ttLib.getTableClass('fpgm').mergeMap = lambda self, lst: first(lst)
ttLib.getTableClass('cvt ').mergeMap = lambda self, lst: first(lst)
ttLib.getTableClass('gasp').mergeMap = lambda self, lst: first(lst) # FIXME? Appears irreconcilable
@add_method(ttLib.getTableClass('CFF ')) ttLib.getTableClass("prep").mergeMap = lambda self, lst: first(lst)
ttLib.getTableClass("fpgm").mergeMap = lambda self, lst: first(lst)
ttLib.getTableClass("cvt ").mergeMap = lambda self, lst: first(lst)
ttLib.getTableClass("gasp").mergeMap = lambda self, lst: first(
lst
) # FIXME? Appears irreconcilable
@add_method(ttLib.getTableClass("CFF "))
def merge(self, m, tables): def merge(self, m, tables):
if any(hasattr(table, "FDSelect") for table in tables): if any(hasattr(table, "FDSelect") for table in tables):
raise NotImplementedError( raise NotImplementedError("Merging CID-keyed CFF tables is not supported yet")
"Merging CID-keyed CFF tables is not supported yet"
)
for table in tables: for table in tables:
table.cff.desubroutinize() table.cff.desubroutinize()
newcff = tables[0] newcff = tables[0]
newfont = newcff.cff[0] newfont = newcff.cff[0]
private = newfont.Private private = newfont.Private
storedNamesStrings = [] storedNamesStrings = []
glyphOrderStrings = [] glyphOrderStrings = []
glyphOrder = set(newfont.getGlyphOrder()) glyphOrder = set(newfont.getGlyphOrder())
for name in newfont.strings.strings: for name in newfont.strings.strings:
if name not in glyphOrder: if name not in glyphOrder:
storedNamesStrings.append(name) storedNamesStrings.append(name)
else: else:
glyphOrderStrings.append(name) glyphOrderStrings.append(name)
chrset = list(newfont.charset) chrset = list(newfont.charset)
newcs = newfont.CharStrings newcs = newfont.CharStrings
log.debug("FONT 0 CharStrings: %d.", len(newcs)) log.debug("FONT 0 CharStrings: %d.", len(newcs))
for i, table in enumerate(tables[1:], start=1): for i, table in enumerate(tables[1:], start=1):
font = table.cff[0] font = table.cff[0]
font.Private = private font.Private = private
fontGlyphOrder = set(font.getGlyphOrder()) fontGlyphOrder = set(font.getGlyphOrder())
for name in font.strings.strings: for name in font.strings.strings:
if name in fontGlyphOrder: if name in fontGlyphOrder:
glyphOrderStrings.append(name) glyphOrderStrings.append(name)
cs = font.CharStrings cs = font.CharStrings
gs = table.cff.GlobalSubrs gs = table.cff.GlobalSubrs
log.debug("Font %d CharStrings: %d.", i, len(cs)) log.debug("Font %d CharStrings: %d.", i, len(cs))
chrset.extend(font.charset) chrset.extend(font.charset)
if newcs.charStringsAreIndexed: if newcs.charStringsAreIndexed:
for i, name in enumerate(cs.charStrings, start=len(newcs)): for i, name in enumerate(cs.charStrings, start=len(newcs)):
newcs.charStrings[name] = i newcs.charStrings[name] = i
newcs.charStringsIndex.items.append(None) newcs.charStringsIndex.items.append(None)
for name in cs.charStrings: for name in cs.charStrings:
newcs[name] = cs[name] newcs[name] = cs[name]
newfont.charset = chrset newfont.charset = chrset
newfont.numGlyphs = len(chrset) newfont.numGlyphs = len(chrset)
newfont.strings.strings = glyphOrderStrings + storedNamesStrings newfont.strings.strings = glyphOrderStrings + storedNamesStrings
return newcff return newcff
@add_method(ttLib.getTableClass('cmap'))
@add_method(ttLib.getTableClass("cmap"))
def merge(self, m, tables): def merge(self, m, tables):
# TODO Handle format=14. # TODO Handle format=14.
if not hasattr(m, 'cmap'): if not hasattr(m, "cmap"):
computeMegaCmap(m, tables) computeMegaCmap(m, tables)
cmap = m.cmap cmap = m.cmap
cmapBmpOnly = {uni: gid for uni,gid in cmap.items() if uni <= 0xFFFF} cmapBmpOnly = {uni: gid for uni, gid in cmap.items() if uni <= 0xFFFF}
self.tables = [] self.tables = []
module = ttLib.getTableModule('cmap') module = ttLib.getTableModule("cmap")
if len(cmapBmpOnly) != len(cmap): if len(cmapBmpOnly) != len(cmap):
# format-12 required. # format-12 required.
cmapTable = module.cmap_classes[12](12) cmapTable = module.cmap_classes[12](12)
cmapTable.platformID = 3 cmapTable.platformID = 3
cmapTable.platEncID = 10 cmapTable.platEncID = 10
cmapTable.language = 0 cmapTable.language = 0
cmapTable.cmap = cmap cmapTable.cmap = cmap
self.tables.append(cmapTable) self.tables.append(cmapTable)
# always create format-4 # always create format-4
cmapTable = module.cmap_classes[4](4) cmapTable = module.cmap_classes[4](4)
cmapTable.platformID = 3 cmapTable.platformID = 3
cmapTable.platEncID = 1 cmapTable.platEncID = 1
cmapTable.language = 0 cmapTable.language = 0
cmapTable.cmap = cmapBmpOnly cmapTable.cmap = cmapBmpOnly
# ordered by platform then encoding # ordered by platform then encoding
self.tables.insert(0, cmapTable) self.tables.insert(0, cmapTable)
self.tableVersion = 0 self.tableVersion = 0
self.numSubTables = len(self.tables) self.numSubTables = len(self.tables)
return self return self

View File

@ -1,65 +1,78 @@
# Copyright 2021 Behdad Esfahbod. All Rights Reserved. # Copyright 2021 Behdad Esfahbod. All Rights Reserved.
def is_Default_Ignorable(u): def is_Default_Ignorable(u):
# http://www.unicode.org/reports/tr44/#Default_Ignorable_Code_Point # http://www.unicode.org/reports/tr44/#Default_Ignorable_Code_Point
# #
# TODO Move me to unicodedata module and autogenerate. # TODO Move me to unicodedata module and autogenerate.
# #
# Unicode 14.0: # Unicode 14.0:
# $ grep '; Default_Ignorable_Code_Point ' DerivedCoreProperties.txt | sed 's/;.*#/#/' # $ grep '; Default_Ignorable_Code_Point ' DerivedCoreProperties.txt | sed 's/;.*#/#/'
# 00AD # Cf SOFT HYPHEN # 00AD # Cf SOFT HYPHEN
# 034F # Mn COMBINING GRAPHEME JOINER # 034F # Mn COMBINING GRAPHEME JOINER
# 061C # Cf ARABIC LETTER MARK # 061C # Cf ARABIC LETTER MARK
# 115F..1160 # Lo [2] HANGUL CHOSEONG FILLER..HANGUL JUNGSEONG FILLER # 115F..1160 # Lo [2] HANGUL CHOSEONG FILLER..HANGUL JUNGSEONG FILLER
# 17B4..17B5 # Mn [2] KHMER VOWEL INHERENT AQ..KHMER VOWEL INHERENT AA # 17B4..17B5 # Mn [2] KHMER VOWEL INHERENT AQ..KHMER VOWEL INHERENT AA
# 180B..180D # Mn [3] MONGOLIAN FREE VARIATION SELECTOR ONE..MONGOLIAN FREE VARIATION SELECTOR THREE # 180B..180D # Mn [3] MONGOLIAN FREE VARIATION SELECTOR ONE..MONGOLIAN FREE VARIATION SELECTOR THREE
# 180E # Cf MONGOLIAN VOWEL SEPARATOR # 180E # Cf MONGOLIAN VOWEL SEPARATOR
# 180F # Mn MONGOLIAN FREE VARIATION SELECTOR FOUR # 180F # Mn MONGOLIAN FREE VARIATION SELECTOR FOUR
# 200B..200F # Cf [5] ZERO WIDTH SPACE..RIGHT-TO-LEFT MARK # 200B..200F # Cf [5] ZERO WIDTH SPACE..RIGHT-TO-LEFT MARK
# 202A..202E # Cf [5] LEFT-TO-RIGHT EMBEDDING..RIGHT-TO-LEFT OVERRIDE # 202A..202E # Cf [5] LEFT-TO-RIGHT EMBEDDING..RIGHT-TO-LEFT OVERRIDE
# 2060..2064 # Cf [5] WORD JOINER..INVISIBLE PLUS # 2060..2064 # Cf [5] WORD JOINER..INVISIBLE PLUS
# 2065 # Cn <reserved-2065> # 2065 # Cn <reserved-2065>
# 2066..206F # Cf [10] LEFT-TO-RIGHT ISOLATE..NOMINAL DIGIT SHAPES # 2066..206F # Cf [10] LEFT-TO-RIGHT ISOLATE..NOMINAL DIGIT SHAPES
# 3164 # Lo HANGUL FILLER # 3164 # Lo HANGUL FILLER
# FE00..FE0F # Mn [16] VARIATION SELECTOR-1..VARIATION SELECTOR-16 # FE00..FE0F # Mn [16] VARIATION SELECTOR-1..VARIATION SELECTOR-16
# FEFF # Cf ZERO WIDTH NO-BREAK SPACE # FEFF # Cf ZERO WIDTH NO-BREAK SPACE
# FFA0 # Lo HALFWIDTH HANGUL FILLER # FFA0 # Lo HALFWIDTH HANGUL FILLER
# FFF0..FFF8 # Cn [9] <reserved-FFF0>..<reserved-FFF8> # FFF0..FFF8 # Cn [9] <reserved-FFF0>..<reserved-FFF8>
# 1BCA0..1BCA3 # Cf [4] SHORTHAND FORMAT LETTER OVERLAP..SHORTHAND FORMAT UP STEP # 1BCA0..1BCA3 # Cf [4] SHORTHAND FORMAT LETTER OVERLAP..SHORTHAND FORMAT UP STEP
# 1D173..1D17A # Cf [8] MUSICAL SYMBOL BEGIN BEAM..MUSICAL SYMBOL END PHRASE # 1D173..1D17A # Cf [8] MUSICAL SYMBOL BEGIN BEAM..MUSICAL SYMBOL END PHRASE
# E0000 # Cn <reserved-E0000> # E0000 # Cn <reserved-E0000>
# E0001 # Cf LANGUAGE TAG # E0001 # Cf LANGUAGE TAG
# E0002..E001F # Cn [30] <reserved-E0002>..<reserved-E001F> # E0002..E001F # Cn [30] <reserved-E0002>..<reserved-E001F>
# E0020..E007F # Cf [96] TAG SPACE..CANCEL TAG # E0020..E007F # Cf [96] TAG SPACE..CANCEL TAG
# E0080..E00FF # Cn [128] <reserved-E0080>..<reserved-E00FF> # E0080..E00FF # Cn [128] <reserved-E0080>..<reserved-E00FF>
# E0100..E01EF # Mn [240] VARIATION SELECTOR-17..VARIATION SELECTOR-256 # E0100..E01EF # Mn [240] VARIATION SELECTOR-17..VARIATION SELECTOR-256
# E01F0..E0FFF # Cn [3600] <reserved-E01F0>..<reserved-E0FFF> # E01F0..E0FFF # Cn [3600] <reserved-E01F0>..<reserved-E0FFF>
return ( return (
u == 0x00AD or # Cf SOFT HYPHEN u == 0x00AD
u == 0x034F or # Mn COMBINING GRAPHEME JOINER or u == 0x034F # Cf SOFT HYPHEN
u == 0x061C or # Cf ARABIC LETTER MARK or u == 0x061C # Mn COMBINING GRAPHEME JOINER
0x115F <= u <= 0x1160 or # Lo [2] HANGUL CHOSEONG FILLER..HANGUL JUNGSEONG FILLER or 0x115F <= u <= 0x1160 # Cf ARABIC LETTER MARK
0x17B4 <= u <= 0x17B5 or # Mn [2] KHMER VOWEL INHERENT AQ..KHMER VOWEL INHERENT AA or 0x17B4 # Lo [2] HANGUL CHOSEONG FILLER..HANGUL JUNGSEONG FILLER
0x180B <= u <= 0x180D or # Mn [3] MONGOLIAN FREE VARIATION SELECTOR ONE..MONGOLIAN FREE VARIATION SELECTOR THREE <= u
u == 0x180E or # Cf MONGOLIAN VOWEL SEPARATOR <= 0x17B5
u == 0x180F or # Mn MONGOLIAN FREE VARIATION SELECTOR FOUR or 0x180B # Mn [2] KHMER VOWEL INHERENT AQ..KHMER VOWEL INHERENT AA
0x200B <= u <= 0x200F or # Cf [5] ZERO WIDTH SPACE..RIGHT-TO-LEFT MARK <= u
0x202A <= u <= 0x202E or # Cf [5] LEFT-TO-RIGHT EMBEDDING..RIGHT-TO-LEFT OVERRIDE <= 0x180D
0x2060 <= u <= 0x2064 or # Cf [5] WORD JOINER..INVISIBLE PLUS or u # Mn [3] MONGOLIAN FREE VARIATION SELECTOR ONE..MONGOLIAN FREE VARIATION SELECTOR THREE
u == 0x2065 or # Cn <reserved-2065> == 0x180E
0x2066 <= u <= 0x206F or # Cf [10] LEFT-TO-RIGHT ISOLATE..NOMINAL DIGIT SHAPES or u == 0x180F # Cf MONGOLIAN VOWEL SEPARATOR
u == 0x3164 or # Lo HANGUL FILLER or 0x200B <= u <= 0x200F # Mn MONGOLIAN FREE VARIATION SELECTOR FOUR
0xFE00 <= u <= 0xFE0F or # Mn [16] VARIATION SELECTOR-1..VARIATION SELECTOR-16 or 0x202A <= u <= 0x202E # Cf [5] ZERO WIDTH SPACE..RIGHT-TO-LEFT MARK
u == 0xFEFF or # Cf ZERO WIDTH NO-BREAK SPACE or 0x2060 # Cf [5] LEFT-TO-RIGHT EMBEDDING..RIGHT-TO-LEFT OVERRIDE
u == 0xFFA0 or # Lo HALFWIDTH HANGUL FILLER <= u
0xFFF0 <= u <= 0xFFF8 or # Cn [9] <reserved-FFF0>..<reserved-FFF8> <= 0x2064
0x1BCA0 <= u <= 0x1BCA3 or # Cf [4] SHORTHAND FORMAT LETTER OVERLAP..SHORTHAND FORMAT UP STEP or u == 0x2065 # Cf [5] WORD JOINER..INVISIBLE PLUS
0x1D173 <= u <= 0x1D17A or # Cf [8] MUSICAL SYMBOL BEGIN BEAM..MUSICAL SYMBOL END PHRASE or 0x2066 <= u <= 0x206F # Cn <reserved-2065>
u == 0xE0000 or # Cn <reserved-E0000> or u == 0x3164 # Cf [10] LEFT-TO-RIGHT ISOLATE..NOMINAL DIGIT SHAPES
u == 0xE0001 or # Cf LANGUAGE TAG or 0xFE00 <= u <= 0xFE0F # Lo HANGUL FILLER
0xE0002 <= u <= 0xE001F or # Cn [30] <reserved-E0002>..<reserved-E001F> or u == 0xFEFF # Mn [16] VARIATION SELECTOR-1..VARIATION SELECTOR-16
0xE0020 <= u <= 0xE007F or # Cf [96] TAG SPACE..CANCEL TAG or u == 0xFFA0 # Cf ZERO WIDTH NO-BREAK SPACE
0xE0080 <= u <= 0xE00FF or # Cn [128] <reserved-E0080>..<reserved-E00FF> or 0xFFF0 <= u <= 0xFFF8 # Lo HALFWIDTH HANGUL FILLER
0xE0100 <= u <= 0xE01EF or # Mn [240] VARIATION SELECTOR-17..VARIATION SELECTOR-256 or 0x1BCA0 <= u <= 0x1BCA3 # Cn [9] <reserved-FFF0>..<reserved-FFF8>
0xE01F0 <= u <= 0xE0FFF or # Cn [3600] <reserved-E01F0>..<reserved-E0FFF> or 0x1D173 # Cf [4] SHORTHAND FORMAT LETTER OVERLAP..SHORTHAND FORMAT UP STEP
False) <= u
<= 0x1D17A
or u == 0xE0000 # Cf [8] MUSICAL SYMBOL BEGIN BEAM..MUSICAL SYMBOL END PHRASE
or u == 0xE0001 # Cn <reserved-E0000>
or 0xE0002 <= u <= 0xE001F # Cf LANGUAGE TAG
or 0xE0020 <= u <= 0xE007F # Cn [30] <reserved-E0002>..<reserved-E001F>
or 0xE0080 <= u <= 0xE00FF # Cf [96] TAG SPACE..CANCEL TAG
or 0xE0100 <= u <= 0xE01EF # Cn [128] <reserved-E0080>..<reserved-E00FF>
or 0xE01F0 # Mn [240] VARIATION SELECTOR-17..VARIATION SELECTOR-256
<= u
<= 0xE0FFF
or False # Cn [3600] <reserved-E01F0>..<reserved-E0FFF>
)

View File

@ -14,118 +14,130 @@ log = logging.getLogger("fontTools.merge")
# General utility functions for merging values from different fonts # General utility functions for merging values from different fonts
def equal(lst): def equal(lst):
lst = list(lst) lst = list(lst)
t = iter(lst) t = iter(lst)
first = next(t) first = next(t)
assert all(item == first for item in t), "Expected all items to be equal: %s" % lst assert all(item == first for item in t), "Expected all items to be equal: %s" % lst
return first return first
def first(lst): def first(lst):
return next(iter(lst)) return next(iter(lst))
def recalculate(lst): def recalculate(lst):
return NotImplemented return NotImplemented
def current_time(lst): def current_time(lst):
return timestampNow() return timestampNow()
def bitwise_and(lst): def bitwise_and(lst):
return reduce(operator.and_, lst) return reduce(operator.and_, lst)
def bitwise_or(lst): def bitwise_or(lst):
return reduce(operator.or_, lst) return reduce(operator.or_, lst)
def avg_int(lst): def avg_int(lst):
lst = list(lst) lst = list(lst)
return sum(lst) // len(lst) return sum(lst) // len(lst)
def onlyExisting(func): def onlyExisting(func):
"""Returns a filter func that when called with a list, """Returns a filter func that when called with a list,
only calls func on the non-NotImplemented items of the list, only calls func on the non-NotImplemented items of the list,
and only so if there's at least one item remaining. and only so if there's at least one item remaining.
Otherwise returns NotImplemented.""" Otherwise returns NotImplemented."""
def wrapper(lst): def wrapper(lst):
items = [item for item in lst if item is not NotImplemented] items = [item for item in lst if item is not NotImplemented]
return func(items) if items else NotImplemented return func(items) if items else NotImplemented
return wrapper
return wrapper
def sumLists(lst): def sumLists(lst):
l = [] l = []
for item in lst: for item in lst:
l.extend(item) l.extend(item)
return l return l
def sumDicts(lst): def sumDicts(lst):
d = {} d = {}
for item in lst: for item in lst:
d.update(item) d.update(item)
return d return d
def mergeBits(bitmap): def mergeBits(bitmap):
def wrapper(lst):
lst = list(lst)
returnValue = 0
for bitNumber in range(bitmap["size"]):
try:
mergeLogic = bitmap[bitNumber]
except KeyError:
try:
mergeLogic = bitmap["*"]
except KeyError:
raise Exception("Don't know how to merge bit %s" % bitNumber)
shiftedBit = 1 << bitNumber
mergedValue = mergeLogic(bool(item & shiftedBit) for item in lst)
returnValue |= mergedValue << bitNumber
return returnValue
def wrapper(lst): return wrapper
lst = list(lst)
returnValue = 0
for bitNumber in range(bitmap['size']):
try:
mergeLogic = bitmap[bitNumber]
except KeyError:
try:
mergeLogic = bitmap['*']
except KeyError:
raise Exception("Don't know how to merge bit %s" % bitNumber)
shiftedBit = 1 << bitNumber
mergedValue = mergeLogic(bool(item & shiftedBit) for item in lst)
returnValue |= mergedValue << bitNumber
return returnValue
return wrapper
class AttendanceRecordingIdentityDict(object): class AttendanceRecordingIdentityDict(object):
"""A dictionary-like object that records indices of items actually accessed """A dictionary-like object that records indices of items actually accessed
from a list.""" from a list."""
def __init__(self, lst): def __init__(self, lst):
self.l = lst self.l = lst
self.d = {id(v):i for i,v in enumerate(lst)} self.d = {id(v): i for i, v in enumerate(lst)}
self.s = set() self.s = set()
def __getitem__(self, v):
self.s.add(self.d[id(v)])
return v
def __getitem__(self, v):
self.s.add(self.d[id(v)])
return v
class GregariousIdentityDict(object): class GregariousIdentityDict(object):
"""A dictionary-like object that welcomes guests without reservations and """A dictionary-like object that welcomes guests without reservations and
adds them to the end of the guest list.""" adds them to the end of the guest list."""
def __init__(self, lst): def __init__(self, lst):
self.l = lst self.l = lst
self.s = set(id(v) for v in lst) self.s = set(id(v) for v in lst)
def __getitem__(self, v):
if id(v) not in self.s:
self.s.add(id(v))
self.l.append(v)
return v
def __getitem__(self, v):
if id(v) not in self.s:
self.s.add(id(v))
self.l.append(v)
return v
class NonhashableDict(object): class NonhashableDict(object):
"""A dictionary-like object mapping objects to values.""" """A dictionary-like object mapping objects to values."""
def __init__(self, keys, values=None): def __init__(self, keys, values=None):
if values is None: if values is None:
self.d = {id(v):i for i,v in enumerate(keys)} self.d = {id(v): i for i, v in enumerate(keys)}
else: else:
self.d = {id(k):v for k,v in zip(keys, values)} self.d = {id(k): v for k, v in zip(keys, values)}
def __getitem__(self, k): def __getitem__(self, k):
return self.d[id(k)] return self.d[id(k)]
def __setitem__(self, k, v): def __setitem__(self, k, v):
self.d[id(k)] = v self.d[id(k)] = v
def __delitem__(self, k): def __delitem__(self, k):
del self.d[id(k)] del self.d[id(k)]

View File

@ -23,6 +23,7 @@ def calcBounds(array):
ys = [y for x, y in array] ys = [y for x, y in array]
return min(xs), min(ys), max(xs), max(ys) return min(xs), min(ys), max(xs), max(ys)
def calcIntBounds(array, round=otRound): def calcIntBounds(array, round=otRound):
"""Calculate the integer bounding rectangle of a 2D points array. """Calculate the integer bounding rectangle of a 2D points array.
@ -57,6 +58,7 @@ def updateBounds(bounds, p, min=min, max=max):
xMin, yMin, xMax, yMax = bounds xMin, yMin, xMax, yMax = bounds
return min(xMin, x), min(yMin, y), max(xMax, x), max(yMax, y) return min(xMin, x), min(yMin, y), max(xMax, x), max(yMax, y)
def pointInRect(p, rect): def pointInRect(p, rect):
"""Test if a point is inside a bounding rectangle. """Test if a point is inside a bounding rectangle.
@ -72,6 +74,7 @@ def pointInRect(p, rect):
xMin, yMin, xMax, yMax = rect xMin, yMin, xMax, yMax = rect
return (xMin <= x <= xMax) and (yMin <= y <= yMax) return (xMin <= x <= xMax) and (yMin <= y <= yMax)
def pointsInRect(array, rect): def pointsInRect(array, rect):
"""Determine which points are inside a bounding rectangle. """Determine which points are inside a bounding rectangle.
@ -88,6 +91,7 @@ def pointsInRect(array, rect):
xMin, yMin, xMax, yMax = rect xMin, yMin, xMax, yMax = rect
return [(xMin <= x <= xMax) and (yMin <= y <= yMax) for x, y in array] return [(xMin <= x <= xMax) and (yMin <= y <= yMax) for x, y in array]
def vectorLength(vector): def vectorLength(vector):
"""Calculate the length of the given vector. """Calculate the length of the given vector.
@ -100,6 +104,7 @@ def vectorLength(vector):
x, y = vector x, y = vector
return math.sqrt(x**2 + y**2) return math.sqrt(x**2 + y**2)
def asInt16(array): def asInt16(array):
"""Round a list of floats to 16-bit signed integers. """Round a list of floats to 16-bit signed integers.
@ -109,7 +114,7 @@ def asInt16(array):
Returns: Returns:
A list of rounded integers. A list of rounded integers.
""" """
return [int(math.floor(i+0.5)) for i in array] return [int(math.floor(i + 0.5)) for i in array]
def normRect(rect): def normRect(rect):
@ -130,6 +135,7 @@ def normRect(rect):
(xMin, yMin, xMax, yMax) = rect (xMin, yMin, xMax, yMax) = rect
return min(xMin, xMax), min(yMin, yMax), max(xMin, xMax), max(yMin, yMax) return min(xMin, xMax), min(yMin, yMax), max(xMin, xMax), max(yMin, yMax)
def scaleRect(rect, x, y): def scaleRect(rect, x, y):
"""Scale a bounding box rectangle. """Scale a bounding box rectangle.
@ -145,6 +151,7 @@ def scaleRect(rect, x, y):
(xMin, yMin, xMax, yMax) = rect (xMin, yMin, xMax, yMax) = rect
return xMin * x, yMin * y, xMax * x, yMax * y return xMin * x, yMin * y, xMax * x, yMax * y
def offsetRect(rect, dx, dy): def offsetRect(rect, dx, dy):
"""Offset a bounding box rectangle. """Offset a bounding box rectangle.
@ -158,7 +165,8 @@ def offsetRect(rect, dx, dy):
An offset bounding rectangle. An offset bounding rectangle.
""" """
(xMin, yMin, xMax, yMax) = rect (xMin, yMin, xMax, yMax) = rect
return xMin+dx, yMin+dy, xMax+dx, yMax+dy return xMin + dx, yMin + dy, xMax + dx, yMax + dy
def insetRect(rect, dx, dy): def insetRect(rect, dx, dy):
"""Inset a bounding box rectangle on all sides. """Inset a bounding box rectangle on all sides.
@ -173,7 +181,8 @@ def insetRect(rect, dx, dy):
An inset bounding rectangle. An inset bounding rectangle.
""" """
(xMin, yMin, xMax, yMax) = rect (xMin, yMin, xMax, yMax) = rect
return xMin+dx, yMin+dy, xMax-dx, yMax-dy return xMin + dx, yMin + dy, xMax - dx, yMax - dy
def sectRect(rect1, rect2): def sectRect(rect1, rect2):
"""Test for rectangle-rectangle intersection. """Test for rectangle-rectangle intersection.
@ -191,12 +200,17 @@ def sectRect(rect1, rect2):
""" """
(xMin1, yMin1, xMax1, yMax1) = rect1 (xMin1, yMin1, xMax1, yMax1) = rect1
(xMin2, yMin2, xMax2, yMax2) = rect2 (xMin2, yMin2, xMax2, yMax2) = rect2
xMin, yMin, xMax, yMax = (max(xMin1, xMin2), max(yMin1, yMin2), xMin, yMin, xMax, yMax = (
min(xMax1, xMax2), min(yMax1, yMax2)) max(xMin1, xMin2),
max(yMin1, yMin2),
min(xMax1, xMax2),
min(yMax1, yMax2),
)
if xMin >= xMax or yMin >= yMax: if xMin >= xMax or yMin >= yMax:
return False, (0, 0, 0, 0) return False, (0, 0, 0, 0)
return True, (xMin, yMin, xMax, yMax) return True, (xMin, yMin, xMax, yMax)
def unionRect(rect1, rect2): def unionRect(rect1, rect2):
"""Determine union of bounding rectangles. """Determine union of bounding rectangles.
@ -211,10 +225,15 @@ def unionRect(rect1, rect2):
""" """
(xMin1, yMin1, xMax1, yMax1) = rect1 (xMin1, yMin1, xMax1, yMax1) = rect1
(xMin2, yMin2, xMax2, yMax2) = rect2 (xMin2, yMin2, xMax2, yMax2) = rect2
xMin, yMin, xMax, yMax = (min(xMin1, xMin2), min(yMin1, yMin2), xMin, yMin, xMax, yMax = (
max(xMax1, xMax2), max(yMax1, yMax2)) min(xMin1, xMin2),
min(yMin1, yMin2),
max(xMax1, xMax2),
max(yMax1, yMax2),
)
return (xMin, yMin, xMax, yMax) return (xMin, yMin, xMax, yMax)
def rectCenter(rect): def rectCenter(rect):
"""Determine rectangle center. """Determine rectangle center.
@ -226,7 +245,8 @@ def rectCenter(rect):
A 2D tuple representing the point at the center of the rectangle. A 2D tuple representing the point at the center of the rectangle.
""" """
(xMin, yMin, xMax, yMax) = rect (xMin, yMin, xMax, yMax) = rect
return (xMin+xMax)/2, (yMin+yMax)/2 return (xMin + xMax) / 2, (yMin + yMax) / 2
def rectArea(rect): def rectArea(rect):
"""Determine rectangle area. """Determine rectangle area.
@ -241,6 +261,7 @@ def rectArea(rect):
(xMin, yMin, xMax, yMax) = rect (xMin, yMin, xMax, yMax) = rect
return (yMax - yMin) * (xMax - xMin) return (yMax - yMin) * (xMax - xMin)
def intRect(rect): def intRect(rect):
"""Round a rectangle to integer values. """Round a rectangle to integer values.
@ -262,7 +283,6 @@ def intRect(rect):
class Vector(_Vector): class Vector(_Vector):
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
warnings.warn( warnings.warn(
"fontTools.misc.arrayTools.Vector has been deprecated, please use " "fontTools.misc.arrayTools.Vector has been deprecated, please use "
@ -373,7 +393,9 @@ def _test():
(0, 2, 4, 5) (0, 2, 4, 5)
""" """
if __name__ == "__main__": if __name__ == "__main__":
import sys import sys
import doctest import doctest
sys.exit(doctest.testmod().failed) sys.exit(doctest.testmod().failed)

View File

@ -7,6 +7,17 @@ from fontTools.misc.transform import Identity
import math import math
from collections import namedtuple from collections import namedtuple
try:
import cython
COMPILED = cython.compiled
except (AttributeError, ImportError):
# if cython not installed, use mock module with no-op decorators and types
from fontTools.misc import cython
COMPILED = False
Intersection = namedtuple("Intersection", ["pt", "t1", "t2"]) Intersection = namedtuple("Intersection", ["pt", "t1", "t2"])
@ -26,10 +37,13 @@ __all__ = [
"splitCubic", "splitCubic",
"splitQuadraticAtT", "splitQuadraticAtT",
"splitCubicAtT", "splitCubicAtT",
"splitCubicAtTC",
"splitCubicIntoTwoAtTC",
"solveQuadratic", "solveQuadratic",
"solveCubic", "solveCubic",
"quadraticPointAtT", "quadraticPointAtT",
"cubicPointAtT", "cubicPointAtT",
"cubicPointAtTC",
"linePointAtT", "linePointAtT",
"segmentPointAtT", "segmentPointAtT",
"lineLineIntersections", "lineLineIntersections",
@ -67,6 +81,14 @@ def _split_cubic_into_two(p0, p1, p2, p3):
) )
@cython.returns(cython.double)
@cython.locals(
p0=cython.complex,
p1=cython.complex,
p2=cython.complex,
p3=cython.complex,
)
@cython.locals(mult=cython.double, arch=cython.double, box=cython.double)
def _calcCubicArcLengthCRecurse(mult, p0, p1, p2, p3): def _calcCubicArcLengthCRecurse(mult, p0, p1, p2, p3):
arch = abs(p0 - p3) arch = abs(p0 - p3)
box = abs(p0 - p1) + abs(p1 - p2) + abs(p2 - p3) box = abs(p0 - p1) + abs(p1 - p2) + abs(p2 - p3)
@ -79,6 +101,17 @@ def _calcCubicArcLengthCRecurse(mult, p0, p1, p2, p3):
) )
@cython.returns(cython.double)
@cython.locals(
pt1=cython.complex,
pt2=cython.complex,
pt3=cython.complex,
pt4=cython.complex,
)
@cython.locals(
tolerance=cython.double,
mult=cython.double,
)
def calcCubicArcLengthC(pt1, pt2, pt3, pt4, tolerance=0.005): def calcCubicArcLengthC(pt1, pt2, pt3, pt4, tolerance=0.005):
"""Calculates the arc length for a cubic Bezier segment. """Calculates the arc length for a cubic Bezier segment.
@ -97,14 +130,22 @@ epsilonDigits = 6
epsilon = 1e-10 epsilon = 1e-10
@cython.cfunc
@cython.inline
@cython.returns(cython.double)
@cython.locals(v1=cython.complex, v2=cython.complex)
def _dot(v1, v2): def _dot(v1, v2):
return (v1 * v2.conjugate()).real return (v1 * v2.conjugate()).real
@cython.cfunc
@cython.inline
@cython.returns(cython.double)
@cython.locals(x=cython.complex)
def _intSecAtan(x): def _intSecAtan(x):
# In : sympy.integrate(sp.sec(sp.atan(x))) # In : sympy.integrate(sp.sec(sp.atan(x)))
# Out: x*sqrt(x**2 + 1)/2 + asinh(x)/2 # Out: x*sqrt(x**2 + 1)/2 + asinh(x)/2
return x * math.sqrt(x ** 2 + 1) / 2 + math.asinh(x) / 2 return x * math.sqrt(x**2 + 1) / 2 + math.asinh(x) / 2
def calcQuadraticArcLength(pt1, pt2, pt3): def calcQuadraticArcLength(pt1, pt2, pt3):
@ -142,6 +183,25 @@ def calcQuadraticArcLength(pt1, pt2, pt3):
return calcQuadraticArcLengthC(complex(*pt1), complex(*pt2), complex(*pt3)) return calcQuadraticArcLengthC(complex(*pt1), complex(*pt2), complex(*pt3))
@cython.returns(cython.double)
@cython.locals(
pt1=cython.complex,
pt2=cython.complex,
pt3=cython.complex,
d0=cython.complex,
d1=cython.complex,
d=cython.complex,
n=cython.complex,
)
@cython.locals(
scale=cython.double,
origDist=cython.double,
a=cython.double,
b=cython.double,
x0=cython.double,
x1=cython.double,
Len=cython.double,
)
def calcQuadraticArcLengthC(pt1, pt2, pt3): def calcQuadraticArcLengthC(pt1, pt2, pt3):
"""Calculates the arc length for a quadratic Bezier segment. """Calculates the arc length for a quadratic Bezier segment.
@ -191,6 +251,17 @@ def approximateQuadraticArcLength(pt1, pt2, pt3):
return approximateQuadraticArcLengthC(complex(*pt1), complex(*pt2), complex(*pt3)) return approximateQuadraticArcLengthC(complex(*pt1), complex(*pt2), complex(*pt3))
@cython.returns(cython.double)
@cython.locals(
pt1=cython.complex,
pt2=cython.complex,
pt3=cython.complex,
)
@cython.locals(
v0=cython.double,
v1=cython.double,
v2=cython.double,
)
def approximateQuadraticArcLengthC(pt1, pt2, pt3): def approximateQuadraticArcLengthC(pt1, pt2, pt3):
"""Calculates the arc length for a quadratic Bezier segment. """Calculates the arc length for a quadratic Bezier segment.
@ -288,6 +359,20 @@ def approximateCubicArcLength(pt1, pt2, pt3, pt4):
) )
@cython.returns(cython.double)
@cython.locals(
pt1=cython.complex,
pt2=cython.complex,
pt3=cython.complex,
pt4=cython.complex,
)
@cython.locals(
v0=cython.double,
v1=cython.double,
v2=cython.double,
v3=cython.double,
v4=cython.double,
)
def approximateCubicArcLengthC(pt1, pt2, pt3, pt4): def approximateCubicArcLengthC(pt1, pt2, pt3, pt4):
"""Approximates the arc length for a cubic Bezier segment. """Approximates the arc length for a cubic Bezier segment.
@ -549,6 +634,70 @@ def splitCubicAtT(pt1, pt2, pt3, pt4, *ts):
return _splitCubicAtT(a, b, c, d, *ts) return _splitCubicAtT(a, b, c, d, *ts)
@cython.locals(
pt1=cython.complex,
pt2=cython.complex,
pt3=cython.complex,
pt4=cython.complex,
a=cython.complex,
b=cython.complex,
c=cython.complex,
d=cython.complex,
)
def splitCubicAtTC(pt1, pt2, pt3, pt4, *ts):
"""Split a cubic Bezier curve at one or more values of t.
Args:
pt1,pt2,pt3,pt4: Control points of the Bezier as complex numbers..
*ts: Positions at which to split the curve.
Yields:
Curve segments (each curve segment being four complex numbers).
"""
a, b, c, d = calcCubicParametersC(pt1, pt2, pt3, pt4)
yield from _splitCubicAtTC(a, b, c, d, *ts)
@cython.returns(cython.complex)
@cython.locals(
t=cython.double,
pt1=cython.complex,
pt2=cython.complex,
pt3=cython.complex,
pt4=cython.complex,
pointAtT=cython.complex,
off1=cython.complex,
off2=cython.complex,
)
@cython.locals(
t2=cython.double, _1_t=cython.double, _1_t_2=cython.double, _2_t_1_t=cython.double
)
def splitCubicIntoTwoAtTC(pt1, pt2, pt3, pt4, t):
"""Split a cubic Bezier curve at t.
Args:
pt1,pt2,pt3,pt4: Control points of the Bezier as complex numbers.
t: Position at which to split the curve.
Returns:
A tuple of two curve segments (each curve segment being four complex numbers).
"""
t2 = t * t
_1_t = 1 - t
_1_t_2 = _1_t * _1_t
_2_t_1_t = 2 * t * _1_t
pointAtT = (
_1_t_2 * _1_t * pt1 + 3 * (_1_t_2 * t * pt2 + _1_t * t2 * pt3) + t2 * t * pt4
)
off1 = _1_t_2 * pt1 + _2_t_1_t * pt2 + t2 * pt3
off2 = _1_t_2 * pt2 + _2_t_1_t * pt3 + t2 * pt4
pt2 = pt1 + (pt2 - pt1) * t
pt3 = pt4 + (pt3 - pt4) * _1_t
return ((pt1, pt2, off1, pointAtT), (pointAtT, off2, pt3, pt4))
def _splitQuadraticAtT(a, b, c, *ts): def _splitQuadraticAtT(a, b, c, *ts):
ts = list(ts) ts = list(ts)
segments = [] segments = []
@ -611,6 +760,44 @@ def _splitCubicAtT(a, b, c, d, *ts):
return segments return segments
@cython.locals(
a=cython.complex,
b=cython.complex,
c=cython.complex,
d=cython.complex,
t1=cython.double,
t2=cython.double,
delta=cython.double,
delta_2=cython.double,
delta_3=cython.double,
a1=cython.complex,
b1=cython.complex,
c1=cython.complex,
d1=cython.complex,
)
def _splitCubicAtTC(a, b, c, d, *ts):
ts = list(ts)
ts.insert(0, 0.0)
ts.append(1.0)
for i in range(len(ts) - 1):
t1 = ts[i]
t2 = ts[i + 1]
delta = t2 - t1
delta_2 = delta * delta
delta_3 = delta * delta_2
t1_2 = t1 * t1
t1_3 = t1 * t1_2
# calc new a, b, c and d
a1 = a * delta_3
b1 = (3 * a * t1 + b) * delta_2
c1 = (2 * b * t1 + c + 3 * a * t1_2) * delta
d1 = a * t1_3 + b * t1_2 + c * t1 + d
pt1, pt2, pt3, pt4 = calcCubicPointsC(a1, b1, c1, d1)
yield (pt1, pt2, pt3, pt4)
# #
# Equation solvers. # Equation solvers.
# #
@ -773,6 +960,23 @@ def calcCubicParameters(pt1, pt2, pt3, pt4):
return (ax, ay), (bx, by), (cx, cy), (dx, dy) return (ax, ay), (bx, by), (cx, cy), (dx, dy)
@cython.cfunc
@cython.locals(
pt1=cython.complex,
pt2=cython.complex,
pt3=cython.complex,
pt4=cython.complex,
a=cython.complex,
b=cython.complex,
c=cython.complex,
)
def calcCubicParametersC(pt1, pt2, pt3, pt4):
c = (pt2 - pt1) * 3.0
b = (pt3 - pt2) * 3.0 - c
a = pt4 - pt1 - c - b
return (a, b, c, pt1)
def calcQuadraticPoints(a, b, c): def calcQuadraticPoints(a, b, c):
ax, ay = a ax, ay = a
bx, by = b bx, by = b
@ -802,6 +1006,24 @@ def calcCubicPoints(a, b, c, d):
return (x1, y1), (x2, y2), (x3, y3), (x4, y4) return (x1, y1), (x2, y2), (x3, y3), (x4, y4)
@cython.cfunc
@cython.locals(
a=cython.complex,
b=cython.complex,
c=cython.complex,
d=cython.complex,
p2=cython.complex,
p3=cython.complex,
p4=cython.complex,
_1_3=cython.double,
)
def calcCubicPointsC(a, b, c, d, _1_3=1.0 / 3):
p2 = (c * _1_3) + d
p3 = (b + c) * _1_3 + p2
p4 = a + b + c + d
return (d, p2, p3, p4)
# #
# Point at time # Point at time
# #
@ -845,21 +1067,47 @@ def cubicPointAtT(pt1, pt2, pt3, pt4, t):
Returns: Returns:
A 2D tuple with the coordinates of the point. A 2D tuple with the coordinates of the point.
""" """
t2 = t * t
_1_t = 1 - t
_1_t_2 = _1_t * _1_t
x = ( x = (
(1 - t) * (1 - t) * (1 - t) * pt1[0] _1_t_2 * _1_t * pt1[0]
+ 3 * (1 - t) * (1 - t) * t * pt2[0] + 3 * (_1_t_2 * t * pt2[0] + _1_t * t2 * pt3[0])
+ 3 * (1 - t) * t * t * pt3[0] + t2 * t * pt4[0]
+ t * t * t * pt4[0]
) )
y = ( y = (
(1 - t) * (1 - t) * (1 - t) * pt1[1] _1_t_2 * _1_t * pt1[1]
+ 3 * (1 - t) * (1 - t) * t * pt2[1] + 3 * (_1_t_2 * t * pt2[1] + _1_t * t2 * pt3[1])
+ 3 * (1 - t) * t * t * pt3[1] + t2 * t * pt4[1]
+ t * t * t * pt4[1]
) )
return (x, y) return (x, y)
@cython.returns(cython.complex)
@cython.locals(
t=cython.double,
pt1=cython.complex,
pt2=cython.complex,
pt3=cython.complex,
pt4=cython.complex,
)
@cython.locals(t2=cython.double, _1_t=cython.double, _1_t_2=cython.double)
def cubicPointAtTC(pt1, pt2, pt3, pt4, t):
"""Finds the point at time `t` on a cubic curve.
Args:
pt1, pt2, pt3, pt4: Coordinates of the curve as complex numbers.
t: The time along the curve.
Returns:
A complex number with the coordinates of the point.
"""
t2 = t * t
_1_t = 1 - t
_1_t_2 = _1_t * _1_t
return _1_t_2 * _1_t * pt1 + 3 * (_1_t_2 * t * pt2 + _1_t * t2 * pt3) + t2 * t * pt4
def segmentPointAtT(seg, t): def segmentPointAtT(seg, t):
if len(seg) == 2: if len(seg) == 2:
return linePointAtT(*seg, t) return linePointAtT(*seg, t)

View File

@ -4,168 +4,169 @@
class Classifier(object): class Classifier(object):
""" """
Main Classifier object, used to classify things into similar sets. Main Classifier object, used to classify things into similar sets.
""" """
def __init__(self, sort=True): def __init__(self, sort=True):
self._things = set() # set of all things known so far self._things = set() # set of all things known so far
self._sets = [] # list of class sets produced so far self._sets = [] # list of class sets produced so far
self._mapping = {} # map from things to their class set self._mapping = {} # map from things to their class set
self._dirty = False self._dirty = False
self._sort = sort self._sort = sort
def add(self, set_of_things): def add(self, set_of_things):
""" """
Add a set to the classifier. Any iterable is accepted. Add a set to the classifier. Any iterable is accepted.
""" """
if not set_of_things: if not set_of_things:
return return
self._dirty = True self._dirty = True
things, sets, mapping = self._things, self._sets, self._mapping things, sets, mapping = self._things, self._sets, self._mapping
s = set(set_of_things) s = set(set_of_things)
intersection = s.intersection(things) # existing things intersection = s.intersection(things) # existing things
s.difference_update(intersection) # new things s.difference_update(intersection) # new things
difference = s difference = s
del s del s
# Add new class for new things # Add new class for new things
if difference: if difference:
things.update(difference) things.update(difference)
sets.append(difference) sets.append(difference)
for thing in difference: for thing in difference:
mapping[thing] = difference mapping[thing] = difference
del difference del difference
while intersection: while intersection:
# Take one item and process the old class it belongs to # Take one item and process the old class it belongs to
old_class = mapping[next(iter(intersection))] old_class = mapping[next(iter(intersection))]
old_class_intersection = old_class.intersection(intersection) old_class_intersection = old_class.intersection(intersection)
# Update old class to remove items from new set # Update old class to remove items from new set
old_class.difference_update(old_class_intersection) old_class.difference_update(old_class_intersection)
# Remove processed items from todo list # Remove processed items from todo list
intersection.difference_update(old_class_intersection) intersection.difference_update(old_class_intersection)
# Add new class for the intersection with old class # Add new class for the intersection with old class
sets.append(old_class_intersection) sets.append(old_class_intersection)
for thing in old_class_intersection: for thing in old_class_intersection:
mapping[thing] = old_class_intersection mapping[thing] = old_class_intersection
del old_class_intersection del old_class_intersection
def update(self, list_of_sets): def update(self, list_of_sets):
""" """
Add a a list of sets to the classifier. Any iterable of iterables is accepted. Add a a list of sets to the classifier. Any iterable of iterables is accepted.
""" """
for s in list_of_sets: for s in list_of_sets:
self.add(s) self.add(s)
def _process(self): def _process(self):
if not self._dirty: if not self._dirty:
return return
# Do any deferred processing # Do any deferred processing
sets = self._sets sets = self._sets
self._sets = [s for s in sets if s] self._sets = [s for s in sets if s]
if self._sort: if self._sort:
self._sets = sorted(self._sets, key=lambda s: (-len(s), sorted(s))) self._sets = sorted(self._sets, key=lambda s: (-len(s), sorted(s)))
self._dirty = False self._dirty = False
# Output methods # Output methods
def getThings(self): def getThings(self):
"""Returns the set of all things known so far. """Returns the set of all things known so far.
The return value belongs to the Classifier object and should NOT The return value belongs to the Classifier object and should NOT
be modified while the classifier is still in use. be modified while the classifier is still in use.
""" """
self._process() self._process()
return self._things return self._things
def getMapping(self): def getMapping(self):
"""Returns the mapping from things to their class set. """Returns the mapping from things to their class set.
The return value belongs to the Classifier object and should NOT The return value belongs to the Classifier object and should NOT
be modified while the classifier is still in use. be modified while the classifier is still in use.
""" """
self._process() self._process()
return self._mapping return self._mapping
def getClasses(self): def getClasses(self):
"""Returns the list of class sets. """Returns the list of class sets.
The return value belongs to the Classifier object and should NOT The return value belongs to the Classifier object and should NOT
be modified while the classifier is still in use. be modified while the classifier is still in use.
""" """
self._process() self._process()
return self._sets return self._sets
def classify(list_of_sets, sort=True): def classify(list_of_sets, sort=True):
""" """
Takes a iterable of iterables (list of sets from here on; but any Takes a iterable of iterables (list of sets from here on; but any
iterable works.), and returns the smallest list of sets such that iterable works.), and returns the smallest list of sets such that
each set, is either a subset, or is disjoint from, each of the input each set, is either a subset, or is disjoint from, each of the input
sets. sets.
In other words, this function classifies all the things present in In other words, this function classifies all the things present in
any of the input sets, into similar classes, based on which sets any of the input sets, into similar classes, based on which sets
things are a member of. things are a member of.
If sort=True, return class sets are sorted by decreasing size and If sort=True, return class sets are sorted by decreasing size and
their natural sort order within each class size. Otherwise, class their natural sort order within each class size. Otherwise, class
sets are returned in the order that they were identified, which is sets are returned in the order that they were identified, which is
generally not significant. generally not significant.
>>> classify([]) == ([], {}) >>> classify([]) == ([], {})
True True
>>> classify([[]]) == ([], {}) >>> classify([[]]) == ([], {})
True True
>>> classify([[], []]) == ([], {}) >>> classify([[], []]) == ([], {})
True True
>>> classify([[1]]) == ([{1}], {1: {1}}) >>> classify([[1]]) == ([{1}], {1: {1}})
True True
>>> classify([[1,2]]) == ([{1, 2}], {1: {1, 2}, 2: {1, 2}}) >>> classify([[1,2]]) == ([{1, 2}], {1: {1, 2}, 2: {1, 2}})
True True
>>> classify([[1],[2]]) == ([{1}, {2}], {1: {1}, 2: {2}}) >>> classify([[1],[2]]) == ([{1}, {2}], {1: {1}, 2: {2}})
True True
>>> classify([[1,2],[2]]) == ([{1}, {2}], {1: {1}, 2: {2}}) >>> classify([[1,2],[2]]) == ([{1}, {2}], {1: {1}, 2: {2}})
True True
>>> classify([[1,2],[2,4]]) == ([{1}, {2}, {4}], {1: {1}, 2: {2}, 4: {4}}) >>> classify([[1,2],[2,4]]) == ([{1}, {2}, {4}], {1: {1}, 2: {2}, 4: {4}})
True True
>>> classify([[1,2],[2,4,5]]) == ( >>> classify([[1,2],[2,4,5]]) == (
... [{4, 5}, {1}, {2}], {1: {1}, 2: {2}, 4: {4, 5}, 5: {4, 5}}) ... [{4, 5}, {1}, {2}], {1: {1}, 2: {2}, 4: {4, 5}, 5: {4, 5}})
True True
>>> classify([[1,2],[2,4,5]], sort=False) == ( >>> classify([[1,2],[2,4,5]], sort=False) == (
... [{1}, {4, 5}, {2}], {1: {1}, 2: {2}, 4: {4, 5}, 5: {4, 5}}) ... [{1}, {4, 5}, {2}], {1: {1}, 2: {2}, 4: {4, 5}, 5: {4, 5}})
True True
>>> classify([[1,2,9],[2,4,5]], sort=False) == ( >>> classify([[1,2,9],[2,4,5]], sort=False) == (
... [{1, 9}, {4, 5}, {2}], {1: {1, 9}, 2: {2}, 4: {4, 5}, 5: {4, 5}, ... [{1, 9}, {4, 5}, {2}], {1: {1, 9}, 2: {2}, 4: {4, 5}, 5: {4, 5},
... 9: {1, 9}}) ... 9: {1, 9}})
True True
>>> classify([[1,2,9,15],[2,4,5]], sort=False) == ( >>> classify([[1,2,9,15],[2,4,5]], sort=False) == (
... [{1, 9, 15}, {4, 5}, {2}], {1: {1, 9, 15}, 2: {2}, 4: {4, 5}, ... [{1, 9, 15}, {4, 5}, {2}], {1: {1, 9, 15}, 2: {2}, 4: {4, 5},
... 5: {4, 5}, 9: {1, 9, 15}, 15: {1, 9, 15}}) ... 5: {4, 5}, 9: {1, 9, 15}, 15: {1, 9, 15}})
True True
>>> classes, mapping = classify([[1,2,9,15],[2,4,5],[15,5]], sort=False) >>> classes, mapping = classify([[1,2,9,15],[2,4,5],[15,5]], sort=False)
>>> set([frozenset(c) for c in classes]) == set( >>> set([frozenset(c) for c in classes]) == set(
... [frozenset(s) for s in ({1, 9}, {4}, {2}, {5}, {15})]) ... [frozenset(s) for s in ({1, 9}, {4}, {2}, {5}, {15})])
True True
>>> mapping == {1: {1, 9}, 2: {2}, 4: {4}, 5: {5}, 9: {1, 9}, 15: {15}} >>> mapping == {1: {1, 9}, 2: {2}, 4: {4}, 5: {5}, 9: {1, 9}, 15: {15}}
True True
""" """
classifier = Classifier(sort=sort) classifier = Classifier(sort=sort)
classifier.update(list_of_sets) classifier.update(list_of_sets)
return classifier.getClasses(), classifier.getMapping() return classifier.getClasses(), classifier.getMapping()
if __name__ == "__main__": if __name__ == "__main__":
import sys, doctest import sys, doctest
sys.exit(doctest.testmod(optionflags=doctest.ELLIPSIS).failed)
sys.exit(doctest.testmod(optionflags=doctest.ELLIPSIS).failed)

View File

@ -6,7 +6,9 @@ import re
numberAddedRE = re.compile(r"#\d+$") numberAddedRE = re.compile(r"#\d+$")
def makeOutputFileName(input, outputDir=None, extension=None, overWrite=False, suffix=""): def makeOutputFileName(
input, outputDir=None, extension=None, overWrite=False, suffix=""
):
"""Generates a suitable file name for writing output. """Generates a suitable file name for writing output.
Often tools will want to take a file, do some kind of transformation to it, Often tools will want to take a file, do some kind of transformation to it,
@ -44,6 +46,7 @@ def makeOutputFileName(input, outputDir=None, extension=None, overWrite=False, s
if not overWrite: if not overWrite:
while os.path.exists(output): while os.path.exists(output):
output = os.path.join( output = os.path.join(
dirName, fileName + suffix + "#" + repr(n) + extension) dirName, fileName + suffix + "#" + repr(n) + extension
)
n += 1 n += 1
return output return output

View File

@ -10,9 +10,11 @@ We only define the symbols that we use. E.g. see fontTools.cu2qu
from types import SimpleNamespace from types import SimpleNamespace
def _empty_decorator(x): def _empty_decorator(x):
return x return x
compiled = False compiled = False
for name in ("double", "complex", "int"): for name in ("double", "complex", "int"):

View File

@ -1,7 +1,7 @@
"""Misc dict tools.""" """Misc dict tools."""
__all__ = ['hashdict'] __all__ = ["hashdict"]
# https://stackoverflow.com/questions/1151658/python-hashable-dicts # https://stackoverflow.com/questions/1151658/python-hashable-dicts
class hashdict(dict): class hashdict(dict):
@ -26,36 +26,54 @@ class hashdict(dict):
http://stackoverflow.com/questions/1151658/python-hashable-dicts http://stackoverflow.com/questions/1151658/python-hashable-dicts
""" """
def __key(self): def __key(self):
return tuple(sorted(self.items())) return tuple(sorted(self.items()))
def __repr__(self): def __repr__(self):
return "{0}({1})".format(self.__class__.__name__, return "{0}({1})".format(
", ".join("{0}={1}".format( self.__class__.__name__,
str(i[0]),repr(i[1])) for i in self.__key())) ", ".join("{0}={1}".format(str(i[0]), repr(i[1])) for i in self.__key()),
)
def __hash__(self): def __hash__(self):
return hash(self.__key()) return hash(self.__key())
def __setitem__(self, key, value): def __setitem__(self, key, value):
raise TypeError("{0} does not support item assignment" raise TypeError(
.format(self.__class__.__name__)) "{0} does not support item assignment".format(self.__class__.__name__)
)
def __delitem__(self, key): def __delitem__(self, key):
raise TypeError("{0} does not support item assignment" raise TypeError(
.format(self.__class__.__name__)) "{0} does not support item assignment".format(self.__class__.__name__)
)
def clear(self): def clear(self):
raise TypeError("{0} does not support item assignment" raise TypeError(
.format(self.__class__.__name__)) "{0} does not support item assignment".format(self.__class__.__name__)
)
def pop(self, *args, **kwargs): def pop(self, *args, **kwargs):
raise TypeError("{0} does not support item assignment" raise TypeError(
.format(self.__class__.__name__)) "{0} does not support item assignment".format(self.__class__.__name__)
)
def popitem(self, *args, **kwargs): def popitem(self, *args, **kwargs):
raise TypeError("{0} does not support item assignment" raise TypeError(
.format(self.__class__.__name__)) "{0} does not support item assignment".format(self.__class__.__name__)
)
def setdefault(self, *args, **kwargs): def setdefault(self, *args, **kwargs):
raise TypeError("{0} does not support item assignment" raise TypeError(
.format(self.__class__.__name__)) "{0} does not support item assignment".format(self.__class__.__name__)
)
def update(self, *args, **kwargs): def update(self, *args, **kwargs):
raise TypeError("{0} does not support item assignment" raise TypeError(
.format(self.__class__.__name__)) "{0} does not support item assignment".format(self.__class__.__name__)
)
# update is not ok because it mutates the object # update is not ok because it mutates the object
# __add__ is ok because it creates a new object # __add__ is ok because it creates a new object
# while the new object is under construction, it's ok to mutate it # while the new object is under construction, it's ok to mutate it
@ -63,4 +81,3 @@ class hashdict(dict):
result = hashdict(self) result = hashdict(self)
dict.update(result, right) dict.update(result, right)
return result return result

View File

@ -16,98 +16,104 @@ from fontTools.misc.textTools import bytechr, bytesjoin, byteord
def _decryptChar(cipher, R): def _decryptChar(cipher, R):
cipher = byteord(cipher) cipher = byteord(cipher)
plain = ( (cipher ^ (R>>8)) ) & 0xFF plain = ((cipher ^ (R >> 8))) & 0xFF
R = ( (cipher + R) * 52845 + 22719 ) & 0xFFFF R = ((cipher + R) * 52845 + 22719) & 0xFFFF
return bytechr(plain), R return bytechr(plain), R
def _encryptChar(plain, R): def _encryptChar(plain, R):
plain = byteord(plain) plain = byteord(plain)
cipher = ( (plain ^ (R>>8)) ) & 0xFF cipher = ((plain ^ (R >> 8))) & 0xFF
R = ( (cipher + R) * 52845 + 22719 ) & 0xFFFF R = ((cipher + R) * 52845 + 22719) & 0xFFFF
return bytechr(cipher), R return bytechr(cipher), R
def decrypt(cipherstring, R): def decrypt(cipherstring, R):
r""" r"""
Decrypts a string using the Type 1 encryption algorithm. Decrypts a string using the Type 1 encryption algorithm.
Args: Args:
cipherstring: String of ciphertext. cipherstring: String of ciphertext.
R: Initial key. R: Initial key.
Returns: Returns:
decryptedStr: Plaintext string. decryptedStr: Plaintext string.
R: Output key for subsequent decryptions. R: Output key for subsequent decryptions.
Examples:: Examples::
>>> testStr = b"\0\0asdadads asds\265"
>>> decryptedStr, R = decrypt(testStr, 12321)
>>> decryptedStr == b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1'
True
>>> R == 36142
True
"""
plainList = []
for cipher in cipherstring:
plain, R = _decryptChar(cipher, R)
plainList.append(plain)
plainstring = bytesjoin(plainList)
return plainstring, int(R)
>>> testStr = b"\0\0asdadads asds\265"
>>> decryptedStr, R = decrypt(testStr, 12321)
>>> decryptedStr == b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1'
True
>>> R == 36142
True
"""
plainList = []
for cipher in cipherstring:
plain, R = _decryptChar(cipher, R)
plainList.append(plain)
plainstring = bytesjoin(plainList)
return plainstring, int(R)
def encrypt(plainstring, R): def encrypt(plainstring, R):
r""" r"""
Encrypts a string using the Type 1 encryption algorithm. Encrypts a string using the Type 1 encryption algorithm.
Note that the algorithm as described in the Type 1 specification requires the Note that the algorithm as described in the Type 1 specification requires the
plaintext to be prefixed with a number of random bytes. (For ``eexec`` the plaintext to be prefixed with a number of random bytes. (For ``eexec`` the
number of random bytes is set to 4.) This routine does *not* add the random number of random bytes is set to 4.) This routine does *not* add the random
prefix to its input. prefix to its input.
Args: Args:
plainstring: String of plaintext. plainstring: String of plaintext.
R: Initial key. R: Initial key.
Returns: Returns:
cipherstring: Ciphertext string. cipherstring: Ciphertext string.
R: Output key for subsequent encryptions. R: Output key for subsequent encryptions.
Examples:: Examples::
>>> testStr = b"\0\0asdadads asds\265" >>> testStr = b"\0\0asdadads asds\265"
>>> decryptedStr, R = decrypt(testStr, 12321) >>> decryptedStr, R = decrypt(testStr, 12321)
>>> decryptedStr == b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1' >>> decryptedStr == b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1'
True True
>>> R == 36142 >>> R == 36142
True True
>>> testStr = b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1' >>> testStr = b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1'
>>> encryptedStr, R = encrypt(testStr, 12321) >>> encryptedStr, R = encrypt(testStr, 12321)
>>> encryptedStr == b"\0\0asdadads asds\265" >>> encryptedStr == b"\0\0asdadads asds\265"
True True
>>> R == 36142 >>> R == 36142
True True
""" """
cipherList = [] cipherList = []
for plain in plainstring: for plain in plainstring:
cipher, R = _encryptChar(plain, R) cipher, R = _encryptChar(plain, R)
cipherList.append(cipher) cipherList.append(cipher)
cipherstring = bytesjoin(cipherList) cipherstring = bytesjoin(cipherList)
return cipherstring, int(R) return cipherstring, int(R)
def hexString(s): def hexString(s):
import binascii import binascii
return binascii.hexlify(s)
return binascii.hexlify(s)
def deHexString(h): def deHexString(h):
import binascii import binascii
h = bytesjoin(h.split())
return binascii.unhexlify(h) h = bytesjoin(h.split())
return binascii.unhexlify(h)
if __name__ == "__main__": if __name__ == "__main__":
import sys import sys
import doctest import doctest
sys.exit(doctest.testmod().failed)
sys.exit(doctest.testmod().failed)

View File

@ -5,67 +5,68 @@ import fontTools.encodings.codecs
# Map keyed by platformID, then platEncID, then possibly langID # Map keyed by platformID, then platEncID, then possibly langID
_encodingMap = { _encodingMap = {
0: { # Unicode 0: { # Unicode
0: 'utf_16_be', 0: "utf_16_be",
1: 'utf_16_be', 1: "utf_16_be",
2: 'utf_16_be', 2: "utf_16_be",
3: 'utf_16_be', 3: "utf_16_be",
4: 'utf_16_be', 4: "utf_16_be",
5: 'utf_16_be', 5: "utf_16_be",
6: 'utf_16_be', 6: "utf_16_be",
}, },
1: { # Macintosh 1: { # Macintosh
# See # See
# https://github.com/fonttools/fonttools/issues/236 # https://github.com/fonttools/fonttools/issues/236
0: { # Macintosh, platEncID==0, keyed by langID 0: { # Macintosh, platEncID==0, keyed by langID
15: "mac_iceland", 15: "mac_iceland",
17: "mac_turkish", 17: "mac_turkish",
18: "mac_croatian", 18: "mac_croatian",
24: "mac_latin2", 24: "mac_latin2",
25: "mac_latin2", 25: "mac_latin2",
26: "mac_latin2", 26: "mac_latin2",
27: "mac_latin2", 27: "mac_latin2",
28: "mac_latin2", 28: "mac_latin2",
36: "mac_latin2", 36: "mac_latin2",
37: "mac_romanian", 37: "mac_romanian",
38: "mac_latin2", 38: "mac_latin2",
39: "mac_latin2", 39: "mac_latin2",
40: "mac_latin2", 40: "mac_latin2",
Ellipsis: 'mac_roman', # Other Ellipsis: "mac_roman", # Other
}, },
1: 'x_mac_japanese_ttx', 1: "x_mac_japanese_ttx",
2: 'x_mac_trad_chinese_ttx', 2: "x_mac_trad_chinese_ttx",
3: 'x_mac_korean_ttx', 3: "x_mac_korean_ttx",
6: 'mac_greek', 6: "mac_greek",
7: 'mac_cyrillic', 7: "mac_cyrillic",
25: 'x_mac_simp_chinese_ttx', 25: "x_mac_simp_chinese_ttx",
29: 'mac_latin2', 29: "mac_latin2",
35: 'mac_turkish', 35: "mac_turkish",
37: 'mac_iceland', 37: "mac_iceland",
}, },
2: { # ISO 2: { # ISO
0: 'ascii', 0: "ascii",
1: 'utf_16_be', 1: "utf_16_be",
2: 'latin1', 2: "latin1",
}, },
3: { # Microsoft 3: { # Microsoft
0: 'utf_16_be', 0: "utf_16_be",
1: 'utf_16_be', 1: "utf_16_be",
2: 'shift_jis', 2: "shift_jis",
3: 'gb2312', 3: "gb2312",
4: 'big5', 4: "big5",
5: 'euc_kr', 5: "euc_kr",
6: 'johab', 6: "johab",
10: 'utf_16_be', 10: "utf_16_be",
}, },
} }
def getEncoding(platformID, platEncID, langID, default=None): def getEncoding(platformID, platEncID, langID, default=None):
"""Returns the Python encoding name for OpenType platformID/encodingID/langID """Returns the Python encoding name for OpenType platformID/encodingID/langID
triplet. If encoding for these values is not known, by default None is triplet. If encoding for these values is not known, by default None is
returned. That can be overriden by passing a value to the default argument. returned. That can be overriden by passing a value to the default argument.
""" """
encoding = _encodingMap.get(platformID, {}).get(platEncID, default) encoding = _encodingMap.get(platformID, {}).get(platEncID, default)
if isinstance(encoding, dict): if isinstance(encoding, dict):
encoding = encoding.get(langID, encoding[Ellipsis]) encoding = encoding.get(langID, encoding[Ellipsis])
return encoding return encoding

View File

@ -244,7 +244,8 @@ except ImportError:
except UnicodeDecodeError: except UnicodeDecodeError:
raise ValueError( raise ValueError(
"Bytes strings can only contain ASCII characters. " "Bytes strings can only contain ASCII characters. "
"Use unicode strings for non-ASCII characters.") "Use unicode strings for non-ASCII characters."
)
except AttributeError: except AttributeError:
_raise_serialization_error(s) _raise_serialization_error(s)
if s and _invalid_xml_string.search(s): if s and _invalid_xml_string.search(s):
@ -425,9 +426,7 @@ except ImportError:
write(_escape_cdata(elem.tail)) write(_escape_cdata(elem.tail))
def _raise_serialization_error(text): def _raise_serialization_error(text):
raise TypeError( raise TypeError("cannot serialize %r (type %s)" % (text, type(text).__name__))
"cannot serialize %r (type %s)" % (text, type(text).__name__)
)
def _escape_cdata(text): def _escape_cdata(text):
# escape character data # escape character data

View File

@ -27,216 +27,220 @@ maxFileNameLength = 255
class NameTranslationError(Exception): class NameTranslationError(Exception):
pass pass
def userNameToFileName(userName, existing=[], prefix="", suffix=""): def userNameToFileName(userName, existing=[], prefix="", suffix=""):
"""Converts from a user name to a file name. """Converts from a user name to a file name.
Takes care to avoid illegal characters, reserved file names, ambiguity between Takes care to avoid illegal characters, reserved file names, ambiguity between
upper- and lower-case characters, and clashes with existing files. upper- and lower-case characters, and clashes with existing files.
Args: Args:
userName (str): The input file name. userName (str): The input file name.
existing: A case-insensitive list of all existing file names. existing: A case-insensitive list of all existing file names.
prefix: Prefix to be prepended to the file name. prefix: Prefix to be prepended to the file name.
suffix: Suffix to be appended to the file name. suffix: Suffix to be appended to the file name.
Returns: Returns:
A suitable filename. A suitable filename.
Raises: Raises:
NameTranslationError: If no suitable name could be generated. NameTranslationError: If no suitable name could be generated.
Examples:: Examples::
>>> userNameToFileName("a") == "a"
True
>>> userNameToFileName("A") == "A_"
True
>>> userNameToFileName("AE") == "A_E_"
True
>>> userNameToFileName("Ae") == "A_e"
True
>>> userNameToFileName("ae") == "ae"
True
>>> userNameToFileName("aE") == "aE_"
True
>>> userNameToFileName("a.alt") == "a.alt"
True
>>> userNameToFileName("A.alt") == "A_.alt"
True
>>> userNameToFileName("A.Alt") == "A_.A_lt"
True
>>> userNameToFileName("A.aLt") == "A_.aL_t"
True
>>> userNameToFileName(u"A.alT") == "A_.alT_"
True
>>> userNameToFileName("T_H") == "T__H_"
True
>>> userNameToFileName("T_h") == "T__h"
True
>>> userNameToFileName("t_h") == "t_h"
True
>>> userNameToFileName("F_F_I") == "F__F__I_"
True
>>> userNameToFileName("f_f_i") == "f_f_i"
True
>>> userNameToFileName("Aacute_V.swash") == "A_acute_V_.swash"
True
>>> userNameToFileName(".notdef") == "_notdef"
True
>>> userNameToFileName("con") == "_con"
True
>>> userNameToFileName("CON") == "C_O_N_"
True
>>> userNameToFileName("con.alt") == "_con.alt"
True
>>> userNameToFileName("alt.con") == "alt._con"
True
"""
# the incoming name must be a str
if not isinstance(userName, str):
raise ValueError("The value for userName must be a string.")
# establish the prefix and suffix lengths
prefixLength = len(prefix)
suffixLength = len(suffix)
# replace an initial period with an _
# if no prefix is to be added
if not prefix and userName[0] == ".":
userName = "_" + userName[1:]
# filter the user name
filteredUserName = []
for character in userName:
# replace illegal characters with _
if character in illegalCharacters:
character = "_"
# add _ to all non-lower characters
elif character != character.lower():
character += "_"
filteredUserName.append(character)
userName = "".join(filteredUserName)
# clip to 255
sliceLength = maxFileNameLength - prefixLength - suffixLength
userName = userName[:sliceLength]
# test for illegal files names
parts = []
for part in userName.split("."):
if part.lower() in reservedFileNames:
part = "_" + part
parts.append(part)
userName = ".".join(parts)
# test for clash
fullName = prefix + userName + suffix
if fullName.lower() in existing:
fullName = handleClash1(userName, existing, prefix, suffix)
# finished
return fullName
>>> userNameToFileName("a") == "a"
True
>>> userNameToFileName("A") == "A_"
True
>>> userNameToFileName("AE") == "A_E_"
True
>>> userNameToFileName("Ae") == "A_e"
True
>>> userNameToFileName("ae") == "ae"
True
>>> userNameToFileName("aE") == "aE_"
True
>>> userNameToFileName("a.alt") == "a.alt"
True
>>> userNameToFileName("A.alt") == "A_.alt"
True
>>> userNameToFileName("A.Alt") == "A_.A_lt"
True
>>> userNameToFileName("A.aLt") == "A_.aL_t"
True
>>> userNameToFileName(u"A.alT") == "A_.alT_"
True
>>> userNameToFileName("T_H") == "T__H_"
True
>>> userNameToFileName("T_h") == "T__h"
True
>>> userNameToFileName("t_h") == "t_h"
True
>>> userNameToFileName("F_F_I") == "F__F__I_"
True
>>> userNameToFileName("f_f_i") == "f_f_i"
True
>>> userNameToFileName("Aacute_V.swash") == "A_acute_V_.swash"
True
>>> userNameToFileName(".notdef") == "_notdef"
True
>>> userNameToFileName("con") == "_con"
True
>>> userNameToFileName("CON") == "C_O_N_"
True
>>> userNameToFileName("con.alt") == "_con.alt"
True
>>> userNameToFileName("alt.con") == "alt._con"
True
"""
# the incoming name must be a str
if not isinstance(userName, str):
raise ValueError("The value for userName must be a string.")
# establish the prefix and suffix lengths
prefixLength = len(prefix)
suffixLength = len(suffix)
# replace an initial period with an _
# if no prefix is to be added
if not prefix and userName[0] == ".":
userName = "_" + userName[1:]
# filter the user name
filteredUserName = []
for character in userName:
# replace illegal characters with _
if character in illegalCharacters:
character = "_"
# add _ to all non-lower characters
elif character != character.lower():
character += "_"
filteredUserName.append(character)
userName = "".join(filteredUserName)
# clip to 255
sliceLength = maxFileNameLength - prefixLength - suffixLength
userName = userName[:sliceLength]
# test for illegal files names
parts = []
for part in userName.split("."):
if part.lower() in reservedFileNames:
part = "_" + part
parts.append(part)
userName = ".".join(parts)
# test for clash
fullName = prefix + userName + suffix
if fullName.lower() in existing:
fullName = handleClash1(userName, existing, prefix, suffix)
# finished
return fullName
def handleClash1(userName, existing=[], prefix="", suffix=""): def handleClash1(userName, existing=[], prefix="", suffix=""):
""" """
existing should be a case-insensitive list existing should be a case-insensitive list
of all existing file names. of all existing file names.
>>> prefix = ("0" * 5) + "." >>> prefix = ("0" * 5) + "."
>>> suffix = "." + ("0" * 10) >>> suffix = "." + ("0" * 10)
>>> existing = ["a" * 5] >>> existing = ["a" * 5]
>>> e = list(existing) >>> e = list(existing)
>>> handleClash1(userName="A" * 5, existing=e, >>> handleClash1(userName="A" * 5, existing=e,
... prefix=prefix, suffix=suffix) == ( ... prefix=prefix, suffix=suffix) == (
... '00000.AAAAA000000000000001.0000000000') ... '00000.AAAAA000000000000001.0000000000')
True True
>>> e = list(existing) >>> e = list(existing)
>>> e.append(prefix + "aaaaa" + "1".zfill(15) + suffix) >>> e.append(prefix + "aaaaa" + "1".zfill(15) + suffix)
>>> handleClash1(userName="A" * 5, existing=e, >>> handleClash1(userName="A" * 5, existing=e,
... prefix=prefix, suffix=suffix) == ( ... prefix=prefix, suffix=suffix) == (
... '00000.AAAAA000000000000002.0000000000') ... '00000.AAAAA000000000000002.0000000000')
True True
>>> e = list(existing)
>>> e.append(prefix + "AAAAA" + "2".zfill(15) + suffix)
>>> handleClash1(userName="A" * 5, existing=e,
... prefix=prefix, suffix=suffix) == (
... '00000.AAAAA000000000000001.0000000000')
True
"""
# if the prefix length + user name length + suffix length + 15 is at
# or past the maximum length, silce 15 characters off of the user name
prefixLength = len(prefix)
suffixLength = len(suffix)
if prefixLength + len(userName) + suffixLength + 15 > maxFileNameLength:
l = prefixLength + len(userName) + suffixLength + 15
sliceLength = maxFileNameLength - l
userName = userName[:sliceLength]
finalName = None
# try to add numbers to create a unique name
counter = 1
while finalName is None:
name = userName + str(counter).zfill(15)
fullName = prefix + name + suffix
if fullName.lower() not in existing:
finalName = fullName
break
else:
counter += 1
if counter >= 999999999999999:
break
# if there is a clash, go to the next fallback
if finalName is None:
finalName = handleClash2(existing, prefix, suffix)
# finished
return finalName
>>> e = list(existing)
>>> e.append(prefix + "AAAAA" + "2".zfill(15) + suffix)
>>> handleClash1(userName="A" * 5, existing=e,
... prefix=prefix, suffix=suffix) == (
... '00000.AAAAA000000000000001.0000000000')
True
"""
# if the prefix length + user name length + suffix length + 15 is at
# or past the maximum length, silce 15 characters off of the user name
prefixLength = len(prefix)
suffixLength = len(suffix)
if prefixLength + len(userName) + suffixLength + 15 > maxFileNameLength:
l = (prefixLength + len(userName) + suffixLength + 15)
sliceLength = maxFileNameLength - l
userName = userName[:sliceLength]
finalName = None
# try to add numbers to create a unique name
counter = 1
while finalName is None:
name = userName + str(counter).zfill(15)
fullName = prefix + name + suffix
if fullName.lower() not in existing:
finalName = fullName
break
else:
counter += 1
if counter >= 999999999999999:
break
# if there is a clash, go to the next fallback
if finalName is None:
finalName = handleClash2(existing, prefix, suffix)
# finished
return finalName
def handleClash2(existing=[], prefix="", suffix=""): def handleClash2(existing=[], prefix="", suffix=""):
""" """
existing should be a case-insensitive list existing should be a case-insensitive list
of all existing file names. of all existing file names.
>>> prefix = ("0" * 5) + "." >>> prefix = ("0" * 5) + "."
>>> suffix = "." + ("0" * 10) >>> suffix = "." + ("0" * 10)
>>> existing = [prefix + str(i) + suffix for i in range(100)] >>> existing = [prefix + str(i) + suffix for i in range(100)]
>>> e = list(existing) >>> e = list(existing)
>>> handleClash2(existing=e, prefix=prefix, suffix=suffix) == ( >>> handleClash2(existing=e, prefix=prefix, suffix=suffix) == (
... '00000.100.0000000000') ... '00000.100.0000000000')
True True
>>> e = list(existing) >>> e = list(existing)
>>> e.remove(prefix + "1" + suffix) >>> e.remove(prefix + "1" + suffix)
>>> handleClash2(existing=e, prefix=prefix, suffix=suffix) == ( >>> handleClash2(existing=e, prefix=prefix, suffix=suffix) == (
... '00000.1.0000000000') ... '00000.1.0000000000')
True True
>>> e = list(existing)
>>> e.remove(prefix + "2" + suffix)
>>> handleClash2(existing=e, prefix=prefix, suffix=suffix) == (
... '00000.2.0000000000')
True
"""
# calculate the longest possible string
maxLength = maxFileNameLength - len(prefix) - len(suffix)
maxValue = int("9" * maxLength)
# try to find a number
finalName = None
counter = 1
while finalName is None:
fullName = prefix + str(counter) + suffix
if fullName.lower() not in existing:
finalName = fullName
break
else:
counter += 1
if counter >= maxValue:
break
# raise an error if nothing has been found
if finalName is None:
raise NameTranslationError("No unique name could be found.")
# finished
return finalName
>>> e = list(existing)
>>> e.remove(prefix + "2" + suffix)
>>> handleClash2(existing=e, prefix=prefix, suffix=suffix) == (
... '00000.2.0000000000')
True
"""
# calculate the longest possible string
maxLength = maxFileNameLength - len(prefix) - len(suffix)
maxValue = int("9" * maxLength)
# try to find a number
finalName = None
counter = 1
while finalName is None:
fullName = prefix + str(counter) + suffix
if fullName.lower() not in existing:
finalName = fullName
break
else:
counter += 1
if counter >= maxValue:
break
# raise an error if nothing has been found
if finalName is None:
raise NameTranslationError("No unique name could be found.")
# finished
return finalName
if __name__ == "__main__": if __name__ == "__main__":
import doctest import doctest
import sys import sys
sys.exit(doctest.testmod().failed)
sys.exit(doctest.testmod().failed)

View File

@ -23,16 +23,16 @@ import logging
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
__all__ = [ __all__ = [
"MAX_F2DOT14", "MAX_F2DOT14",
"fixedToFloat", "fixedToFloat",
"floatToFixed", "floatToFixed",
"floatToFixedToFloat", "floatToFixedToFloat",
"floatToFixedToStr", "floatToFixedToStr",
"fixedToStr", "fixedToStr",
"strToFixed", "strToFixed",
"strToFixedToFloat", "strToFixedToFloat",
"ensureVersionIsLong", "ensureVersionIsLong",
"versionToFixed", "versionToFixed",
] ]
@ -40,212 +40,214 @@ MAX_F2DOT14 = 0x7FFF / (1 << 14)
def fixedToFloat(value, precisionBits): def fixedToFloat(value, precisionBits):
"""Converts a fixed-point number to a float given the number of """Converts a fixed-point number to a float given the number of
precision bits. precision bits.
Args: Args:
value (int): Number in fixed-point format. value (int): Number in fixed-point format.
precisionBits (int): Number of precision bits. precisionBits (int): Number of precision bits.
Returns: Returns:
Floating point value. Floating point value.
Examples:: Examples::
>>> import math >>> import math
>>> f = fixedToFloat(-10139, precisionBits=14) >>> f = fixedToFloat(-10139, precisionBits=14)
>>> math.isclose(f, -0.61883544921875) >>> math.isclose(f, -0.61883544921875)
True True
""" """
return value / (1 << precisionBits) return value / (1 << precisionBits)
def floatToFixed(value, precisionBits): def floatToFixed(value, precisionBits):
"""Converts a float to a fixed-point number given the number of """Converts a float to a fixed-point number given the number of
precision bits. precision bits.
Args: Args:
value (float): Floating point value. value (float): Floating point value.
precisionBits (int): Number of precision bits. precisionBits (int): Number of precision bits.
Returns: Returns:
int: Fixed-point representation. int: Fixed-point representation.
Examples:: Examples::
>>> floatToFixed(-0.61883544921875, precisionBits=14) >>> floatToFixed(-0.61883544921875, precisionBits=14)
-10139 -10139
>>> floatToFixed(-0.61884, precisionBits=14) >>> floatToFixed(-0.61884, precisionBits=14)
-10139 -10139
""" """
return otRound(value * (1 << precisionBits)) return otRound(value * (1 << precisionBits))
def floatToFixedToFloat(value, precisionBits): def floatToFixedToFloat(value, precisionBits):
"""Converts a float to a fixed-point number and back again. """Converts a float to a fixed-point number and back again.
By converting the float to fixed, rounding it, and converting it back By converting the float to fixed, rounding it, and converting it back
to float again, this returns a floating point values which is exactly to float again, this returns a floating point values which is exactly
representable in fixed-point format. representable in fixed-point format.
Note: this **is** equivalent to ``fixedToFloat(floatToFixed(value))``. Note: this **is** equivalent to ``fixedToFloat(floatToFixed(value))``.
Args: Args:
value (float): The input floating point value. value (float): The input floating point value.
precisionBits (int): Number of precision bits. precisionBits (int): Number of precision bits.
Returns: Returns:
float: The transformed and rounded value. float: The transformed and rounded value.
Examples:: Examples::
>>> import math >>> import math
>>> f1 = -0.61884 >>> f1 = -0.61884
>>> f2 = floatToFixedToFloat(-0.61884, precisionBits=14) >>> f2 = floatToFixedToFloat(-0.61884, precisionBits=14)
>>> f1 != f2 >>> f1 != f2
True True
>>> math.isclose(f2, -0.61883544921875) >>> math.isclose(f2, -0.61883544921875)
True True
""" """
scale = 1 << precisionBits scale = 1 << precisionBits
return otRound(value * scale) / scale return otRound(value * scale) / scale
def fixedToStr(value, precisionBits): def fixedToStr(value, precisionBits):
"""Converts a fixed-point number to a string representing a decimal float. """Converts a fixed-point number to a string representing a decimal float.
This chooses the float that has the shortest decimal representation (the least This chooses the float that has the shortest decimal representation (the least
number of fractional decimal digits). number of fractional decimal digits).
For example, to convert a fixed-point number in a 2.14 format, use For example, to convert a fixed-point number in a 2.14 format, use
``precisionBits=14``:: ``precisionBits=14``::
>>> fixedToStr(-10139, precisionBits=14) >>> fixedToStr(-10139, precisionBits=14)
'-0.61884' '-0.61884'
This is pretty slow compared to the simple division used in ``fixedToFloat``. This is pretty slow compared to the simple division used in ``fixedToFloat``.
Use sporadically when you need to serialize or print the fixed-point number in Use sporadically when you need to serialize or print the fixed-point number in
a human-readable form. a human-readable form.
It uses nearestMultipleShortestRepr under the hood. It uses nearestMultipleShortestRepr under the hood.
Args: Args:
value (int): The fixed-point value to convert. value (int): The fixed-point value to convert.
precisionBits (int): Number of precision bits, *up to a maximum of 16*. precisionBits (int): Number of precision bits, *up to a maximum of 16*.
Returns: Returns:
str: A string representation of the value. str: A string representation of the value.
""" """
scale = 1 << precisionBits scale = 1 << precisionBits
return nearestMultipleShortestRepr(value/scale, factor=1.0/scale) return nearestMultipleShortestRepr(value / scale, factor=1.0 / scale)
def strToFixed(string, precisionBits): def strToFixed(string, precisionBits):
"""Converts a string representing a decimal float to a fixed-point number. """Converts a string representing a decimal float to a fixed-point number.
Args: Args:
string (str): A string representing a decimal float. string (str): A string representing a decimal float.
precisionBits (int): Number of precision bits, *up to a maximum of 16*. precisionBits (int): Number of precision bits, *up to a maximum of 16*.
Returns: Returns:
int: Fixed-point representation. int: Fixed-point representation.
Examples:: Examples::
>>> ## to convert a float string to a 2.14 fixed-point number: >>> ## to convert a float string to a 2.14 fixed-point number:
>>> strToFixed('-0.61884', precisionBits=14) >>> strToFixed('-0.61884', precisionBits=14)
-10139 -10139
""" """
value = float(string) value = float(string)
return otRound(value * (1 << precisionBits)) return otRound(value * (1 << precisionBits))
def strToFixedToFloat(string, precisionBits): def strToFixedToFloat(string, precisionBits):
"""Convert a string to a decimal float with fixed-point rounding. """Convert a string to a decimal float with fixed-point rounding.
This first converts string to a float, then turns it into a fixed-point This first converts string to a float, then turns it into a fixed-point
number with ``precisionBits`` fractional binary digits, then back to a number with ``precisionBits`` fractional binary digits, then back to a
float again. float again.
This is simply a shorthand for fixedToFloat(floatToFixed(float(s))). This is simply a shorthand for fixedToFloat(floatToFixed(float(s))).
Args: Args:
string (str): A string representing a decimal float. string (str): A string representing a decimal float.
precisionBits (int): Number of precision bits. precisionBits (int): Number of precision bits.
Returns: Returns:
float: The transformed and rounded value. float: The transformed and rounded value.
Examples:: Examples::
>>> import math >>> import math
>>> s = '-0.61884' >>> s = '-0.61884'
>>> bits = 14 >>> bits = 14
>>> f = strToFixedToFloat(s, precisionBits=bits) >>> f = strToFixedToFloat(s, precisionBits=bits)
>>> math.isclose(f, -0.61883544921875) >>> math.isclose(f, -0.61883544921875)
True True
>>> f == fixedToFloat(floatToFixed(float(s), precisionBits=bits), precisionBits=bits) >>> f == fixedToFloat(floatToFixed(float(s), precisionBits=bits), precisionBits=bits)
True True
""" """
value = float(string) value = float(string)
scale = 1 << precisionBits scale = 1 << precisionBits
return otRound(value * scale) / scale return otRound(value * scale) / scale
def floatToFixedToStr(value, precisionBits): def floatToFixedToStr(value, precisionBits):
"""Convert float to string with fixed-point rounding. """Convert float to string with fixed-point rounding.
This uses the shortest decimal representation (ie. the least This uses the shortest decimal representation (ie. the least
number of fractional decimal digits) to represent the equivalent number of fractional decimal digits) to represent the equivalent
fixed-point number with ``precisionBits`` fractional binary digits. fixed-point number with ``precisionBits`` fractional binary digits.
It uses nearestMultipleShortestRepr under the hood. It uses nearestMultipleShortestRepr under the hood.
>>> floatToFixedToStr(-0.61883544921875, precisionBits=14) >>> floatToFixedToStr(-0.61883544921875, precisionBits=14)
'-0.61884' '-0.61884'
Args: Args:
value (float): The float value to convert. value (float): The float value to convert.
precisionBits (int): Number of precision bits, *up to a maximum of 16*. precisionBits (int): Number of precision bits, *up to a maximum of 16*.
Returns: Returns:
str: A string representation of the value. str: A string representation of the value.
""" """
scale = 1 << precisionBits scale = 1 << precisionBits
return nearestMultipleShortestRepr(value, factor=1.0/scale) return nearestMultipleShortestRepr(value, factor=1.0 / scale)
def ensureVersionIsLong(value): def ensureVersionIsLong(value):
"""Ensure a table version is an unsigned long. """Ensure a table version is an unsigned long.
OpenType table version numbers are expressed as a single unsigned long OpenType table version numbers are expressed as a single unsigned long
comprising of an unsigned short major version and unsigned short minor comprising of an unsigned short major version and unsigned short minor
version. This function detects if the value to be used as a version number version. This function detects if the value to be used as a version number
looks too small (i.e. is less than ``0x10000``), and converts it to looks too small (i.e. is less than ``0x10000``), and converts it to
fixed-point using :func:`floatToFixed` if so. fixed-point using :func:`floatToFixed` if so.
Args: Args:
value (Number): a candidate table version number. value (Number): a candidate table version number.
Returns: Returns:
int: A table version number, possibly corrected to fixed-point. int: A table version number, possibly corrected to fixed-point.
""" """
if value < 0x10000: if value < 0x10000:
newValue = floatToFixed(value, 16) newValue = floatToFixed(value, 16)
log.warning( log.warning(
"Table version value is a float: %.4f; " "Table version value is a float: %.4f; " "fix to use hex instead: 0x%08x",
"fix to use hex instead: 0x%08x", value, newValue) value,
value = newValue newValue,
return value )
value = newValue
return value
def versionToFixed(value): def versionToFixed(value):
"""Ensure a table version number is fixed-point. """Ensure a table version number is fixed-point.
Args: Args:
value (str): a candidate table version number. value (str): a candidate table version number.
Returns: Returns:
int: A table version number, possibly corrected to fixed-point. int: A table version number, possibly corrected to fixed-point.
""" """
value = int(value, 0) if value.startswith("0") else float(value) value = int(value, 0) if value.startswith("0") else float(value)
value = ensureVersionIsLong(value) value = ensureVersionIsLong(value)
return value return value

View File

@ -13,524 +13,531 @@ TIME_LEVEL = logging.DEBUG
# per-level format strings used by the default formatter # per-level format strings used by the default formatter
# (the level name is not printed for INFO and DEBUG messages) # (the level name is not printed for INFO and DEBUG messages)
DEFAULT_FORMATS = { DEFAULT_FORMATS = {
"*": "%(levelname)s: %(message)s", "*": "%(levelname)s: %(message)s",
"INFO": "%(message)s", "INFO": "%(message)s",
"DEBUG": "%(message)s", "DEBUG": "%(message)s",
} }
class LevelFormatter(logging.Formatter): class LevelFormatter(logging.Formatter):
"""Log formatter with level-specific formatting. """Log formatter with level-specific formatting.
Formatter class which optionally takes a dict of logging levels to Formatter class which optionally takes a dict of logging levels to
format strings, allowing to customise the log records appearance for format strings, allowing to customise the log records appearance for
specific levels. specific levels.
Attributes: Attributes:
fmt: A dictionary mapping logging levels to format strings. fmt: A dictionary mapping logging levels to format strings.
The ``*`` key identifies the default format string. The ``*`` key identifies the default format string.
datefmt: As per py:class:`logging.Formatter` datefmt: As per py:class:`logging.Formatter`
style: As per py:class:`logging.Formatter` style: As per py:class:`logging.Formatter`
>>> import sys >>> import sys
>>> handler = logging.StreamHandler(sys.stdout) >>> handler = logging.StreamHandler(sys.stdout)
>>> formatter = LevelFormatter( >>> formatter = LevelFormatter(
... fmt={ ... fmt={
... '*': '[%(levelname)s] %(message)s', ... '*': '[%(levelname)s] %(message)s',
... 'DEBUG': '%(name)s [%(levelname)s] %(message)s', ... 'DEBUG': '%(name)s [%(levelname)s] %(message)s',
... 'INFO': '%(message)s', ... 'INFO': '%(message)s',
... }) ... })
>>> handler.setFormatter(formatter) >>> handler.setFormatter(formatter)
>>> log = logging.getLogger('test') >>> log = logging.getLogger('test')
>>> log.setLevel(logging.DEBUG) >>> log.setLevel(logging.DEBUG)
>>> log.addHandler(handler) >>> log.addHandler(handler)
>>> log.debug('this uses a custom format string') >>> log.debug('this uses a custom format string')
test [DEBUG] this uses a custom format string test [DEBUG] this uses a custom format string
>>> log.info('this also uses a custom format string') >>> log.info('this also uses a custom format string')
this also uses a custom format string this also uses a custom format string
>>> log.warning("this one uses the default format string") >>> log.warning("this one uses the default format string")
[WARNING] this one uses the default format string [WARNING] this one uses the default format string
""" """
def __init__(self, fmt=None, datefmt=None, style="%"): def __init__(self, fmt=None, datefmt=None, style="%"):
if style != '%': if style != "%":
raise ValueError( raise ValueError(
"only '%' percent style is supported in both python 2 and 3") "only '%' percent style is supported in both python 2 and 3"
if fmt is None: )
fmt = DEFAULT_FORMATS if fmt is None:
if isinstance(fmt, str): fmt = DEFAULT_FORMATS
default_format = fmt if isinstance(fmt, str):
custom_formats = {} default_format = fmt
elif isinstance(fmt, Mapping): custom_formats = {}
custom_formats = dict(fmt) elif isinstance(fmt, Mapping):
default_format = custom_formats.pop("*", None) custom_formats = dict(fmt)
else: default_format = custom_formats.pop("*", None)
raise TypeError('fmt must be a str or a dict of str: %r' % fmt) else:
super(LevelFormatter, self).__init__(default_format, datefmt) raise TypeError("fmt must be a str or a dict of str: %r" % fmt)
self.default_format = self._fmt super(LevelFormatter, self).__init__(default_format, datefmt)
self.custom_formats = {} self.default_format = self._fmt
for level, fmt in custom_formats.items(): self.custom_formats = {}
level = logging._checkLevel(level) for level, fmt in custom_formats.items():
self.custom_formats[level] = fmt level = logging._checkLevel(level)
self.custom_formats[level] = fmt
def format(self, record): def format(self, record):
if self.custom_formats: if self.custom_formats:
fmt = self.custom_formats.get(record.levelno, self.default_format) fmt = self.custom_formats.get(record.levelno, self.default_format)
if self._fmt != fmt: if self._fmt != fmt:
self._fmt = fmt self._fmt = fmt
# for python >= 3.2, _style needs to be set if _fmt changes # for python >= 3.2, _style needs to be set if _fmt changes
if PercentStyle: if PercentStyle:
self._style = PercentStyle(fmt) self._style = PercentStyle(fmt)
return super(LevelFormatter, self).format(record) return super(LevelFormatter, self).format(record)
def configLogger(**kwargs): def configLogger(**kwargs):
"""A more sophisticated logging system configuation manager. """A more sophisticated logging system configuation manager.
This is more or less the same as :py:func:`logging.basicConfig`, This is more or less the same as :py:func:`logging.basicConfig`,
with some additional options and defaults. with some additional options and defaults.
The default behaviour is to create a ``StreamHandler`` which writes to The default behaviour is to create a ``StreamHandler`` which writes to
sys.stderr, set a formatter using the ``DEFAULT_FORMATS`` strings, and add sys.stderr, set a formatter using the ``DEFAULT_FORMATS`` strings, and add
the handler to the top-level library logger ("fontTools"). the handler to the top-level library logger ("fontTools").
A number of optional keyword arguments may be specified, which can alter A number of optional keyword arguments may be specified, which can alter
the default behaviour. the default behaviour.
Args: Args:
logger: Specifies the logger name or a Logger instance to be logger: Specifies the logger name or a Logger instance to be
configured. (Defaults to "fontTools" logger). Unlike ``basicConfig``, configured. (Defaults to "fontTools" logger). Unlike ``basicConfig``,
this function can be called multiple times to reconfigure a logger. this function can be called multiple times to reconfigure a logger.
If the logger or any of its children already exists before the call is If the logger or any of its children already exists before the call is
made, they will be reset before the new configuration is applied. made, they will be reset before the new configuration is applied.
filename: Specifies that a ``FileHandler`` be created, using the filename: Specifies that a ``FileHandler`` be created, using the
specified filename, rather than a ``StreamHandler``. specified filename, rather than a ``StreamHandler``.
filemode: Specifies the mode to open the file, if filename is filemode: Specifies the mode to open the file, if filename is
specified. (If filemode is unspecified, it defaults to ``a``). specified. (If filemode is unspecified, it defaults to ``a``).
format: Use the specified format string for the handler. This format: Use the specified format string for the handler. This
argument also accepts a dictionary of format strings keyed by argument also accepts a dictionary of format strings keyed by
level name, to allow customising the records appearance for level name, to allow customising the records appearance for
specific levels. The special ``'*'`` key is for 'any other' level. specific levels. The special ``'*'`` key is for 'any other' level.
datefmt: Use the specified date/time format. datefmt: Use the specified date/time format.
level: Set the logger level to the specified level. level: Set the logger level to the specified level.
stream: Use the specified stream to initialize the StreamHandler. Note stream: Use the specified stream to initialize the StreamHandler. Note
that this argument is incompatible with ``filename`` - if both that this argument is incompatible with ``filename`` - if both
are present, ``stream`` is ignored. are present, ``stream`` is ignored.
handlers: If specified, this should be an iterable of already created handlers: If specified, this should be an iterable of already created
handlers, which will be added to the logger. Any handler in the handlers, which will be added to the logger. Any handler in the
list which does not have a formatter assigned will be assigned the list which does not have a formatter assigned will be assigned the
formatter created in this function. formatter created in this function.
filters: If specified, this should be an iterable of already created filters: If specified, this should be an iterable of already created
filters. If the ``handlers`` do not already have filters assigned, filters. If the ``handlers`` do not already have filters assigned,
these filters will be added to them. these filters will be added to them.
propagate: All loggers have a ``propagate`` attribute which determines propagate: All loggers have a ``propagate`` attribute which determines
whether to continue searching for handlers up the logging hierarchy. whether to continue searching for handlers up the logging hierarchy.
If not provided, the "propagate" attribute will be set to ``False``. If not provided, the "propagate" attribute will be set to ``False``.
""" """
# using kwargs to enforce keyword-only arguments in py2. # using kwargs to enforce keyword-only arguments in py2.
handlers = kwargs.pop("handlers", None) handlers = kwargs.pop("handlers", None)
if handlers is None: if handlers is None:
if "stream" in kwargs and "filename" in kwargs: if "stream" in kwargs and "filename" in kwargs:
raise ValueError("'stream' and 'filename' should not be " raise ValueError(
"specified together") "'stream' and 'filename' should not be " "specified together"
else: )
if "stream" in kwargs or "filename" in kwargs: else:
raise ValueError("'stream' or 'filename' should not be " if "stream" in kwargs or "filename" in kwargs:
"specified together with 'handlers'") raise ValueError(
if handlers is None: "'stream' or 'filename' should not be "
filename = kwargs.pop("filename", None) "specified together with 'handlers'"
mode = kwargs.pop("filemode", 'a') )
if filename: if handlers is None:
h = logging.FileHandler(filename, mode) filename = kwargs.pop("filename", None)
else: mode = kwargs.pop("filemode", "a")
stream = kwargs.pop("stream", None) if filename:
h = logging.StreamHandler(stream) h = logging.FileHandler(filename, mode)
handlers = [h] else:
# By default, the top-level library logger is configured. stream = kwargs.pop("stream", None)
logger = kwargs.pop("logger", "fontTools") h = logging.StreamHandler(stream)
if not logger or isinstance(logger, str): handlers = [h]
# empty "" or None means the 'root' logger # By default, the top-level library logger is configured.
logger = logging.getLogger(logger) logger = kwargs.pop("logger", "fontTools")
# before (re)configuring, reset named logger and its children (if exist) if not logger or isinstance(logger, str):
_resetExistingLoggers(parent=logger.name) # empty "" or None means the 'root' logger
# use DEFAULT_FORMATS if 'format' is None logger = logging.getLogger(logger)
fs = kwargs.pop("format", None) # before (re)configuring, reset named logger and its children (if exist)
dfs = kwargs.pop("datefmt", None) _resetExistingLoggers(parent=logger.name)
# XXX: '%' is the only format style supported on both py2 and 3 # use DEFAULT_FORMATS if 'format' is None
style = kwargs.pop("style", '%') fs = kwargs.pop("format", None)
fmt = LevelFormatter(fs, dfs, style) dfs = kwargs.pop("datefmt", None)
filters = kwargs.pop("filters", []) # XXX: '%' is the only format style supported on both py2 and 3
for h in handlers: style = kwargs.pop("style", "%")
if h.formatter is None: fmt = LevelFormatter(fs, dfs, style)
h.setFormatter(fmt) filters = kwargs.pop("filters", [])
if not h.filters: for h in handlers:
for f in filters: if h.formatter is None:
h.addFilter(f) h.setFormatter(fmt)
logger.addHandler(h) if not h.filters:
if logger.name != "root": for f in filters:
# stop searching up the hierarchy for handlers h.addFilter(f)
logger.propagate = kwargs.pop("propagate", False) logger.addHandler(h)
# set a custom severity level if logger.name != "root":
level = kwargs.pop("level", None) # stop searching up the hierarchy for handlers
if level is not None: logger.propagate = kwargs.pop("propagate", False)
logger.setLevel(level) # set a custom severity level
if kwargs: level = kwargs.pop("level", None)
keys = ', '.join(kwargs.keys()) if level is not None:
raise ValueError('Unrecognised argument(s): %s' % keys) logger.setLevel(level)
if kwargs:
keys = ", ".join(kwargs.keys())
raise ValueError("Unrecognised argument(s): %s" % keys)
def _resetExistingLoggers(parent="root"): def _resetExistingLoggers(parent="root"):
""" Reset the logger named 'parent' and all its children to their initial """Reset the logger named 'parent' and all its children to their initial
state, if they already exist in the current configuration. state, if they already exist in the current configuration.
""" """
root = logging.root root = logging.root
# get sorted list of all existing loggers # get sorted list of all existing loggers
existing = sorted(root.manager.loggerDict.keys()) existing = sorted(root.manager.loggerDict.keys())
if parent == "root": if parent == "root":
# all the existing loggers are children of 'root' # all the existing loggers are children of 'root'
loggers_to_reset = [parent] + existing loggers_to_reset = [parent] + existing
elif parent not in existing: elif parent not in existing:
# nothing to do # nothing to do
return return
elif parent in existing: elif parent in existing:
loggers_to_reset = [parent] loggers_to_reset = [parent]
# collect children, starting with the entry after parent name # collect children, starting with the entry after parent name
i = existing.index(parent) + 1 i = existing.index(parent) + 1
prefixed = parent + "." prefixed = parent + "."
pflen = len(prefixed) pflen = len(prefixed)
num_existing = len(existing) num_existing = len(existing)
while i < num_existing: while i < num_existing:
if existing[i][:pflen] == prefixed: if existing[i][:pflen] == prefixed:
loggers_to_reset.append(existing[i]) loggers_to_reset.append(existing[i])
i += 1 i += 1
for name in loggers_to_reset: for name in loggers_to_reset:
if name == "root": if name == "root":
root.setLevel(logging.WARNING) root.setLevel(logging.WARNING)
for h in root.handlers[:]: for h in root.handlers[:]:
root.removeHandler(h) root.removeHandler(h)
for f in root.filters[:]: for f in root.filters[:]:
root.removeFilters(f) root.removeFilters(f)
root.disabled = False root.disabled = False
else: else:
logger = root.manager.loggerDict[name] logger = root.manager.loggerDict[name]
logger.level = logging.NOTSET logger.level = logging.NOTSET
logger.handlers = [] logger.handlers = []
logger.filters = [] logger.filters = []
logger.propagate = True logger.propagate = True
logger.disabled = False logger.disabled = False
class Timer(object): class Timer(object):
""" Keeps track of overall time and split/lap times. """Keeps track of overall time and split/lap times.
>>> import time >>> import time
>>> timer = Timer() >>> timer = Timer()
>>> time.sleep(0.01) >>> time.sleep(0.01)
>>> print("First lap:", timer.split()) >>> print("First lap:", timer.split())
First lap: ... First lap: ...
>>> time.sleep(0.02) >>> time.sleep(0.02)
>>> print("Second lap:", timer.split()) >>> print("Second lap:", timer.split())
Second lap: ... Second lap: ...
>>> print("Overall time:", timer.time()) >>> print("Overall time:", timer.time())
Overall time: ... Overall time: ...
Can be used as a context manager inside with-statements. Can be used as a context manager inside with-statements.
>>> with Timer() as t: >>> with Timer() as t:
... time.sleep(0.01) ... time.sleep(0.01)
>>> print("%0.3f seconds" % t.elapsed) >>> print("%0.3f seconds" % t.elapsed)
0... seconds 0... seconds
If initialised with a logger, it can log the elapsed time automatically If initialised with a logger, it can log the elapsed time automatically
upon exiting the with-statement. upon exiting the with-statement.
>>> import logging >>> import logging
>>> log = logging.getLogger("my-fancy-timer-logger") >>> log = logging.getLogger("my-fancy-timer-logger")
>>> configLogger(logger=log, level="DEBUG", format="%(message)s", stream=sys.stdout) >>> configLogger(logger=log, level="DEBUG", format="%(message)s", stream=sys.stdout)
>>> with Timer(log, 'do something'): >>> with Timer(log, 'do something'):
... time.sleep(0.01) ... time.sleep(0.01)
Took ... to do something Took ... to do something
The same Timer instance, holding a reference to a logger, can be reused The same Timer instance, holding a reference to a logger, can be reused
in multiple with-statements, optionally with different messages or levels. in multiple with-statements, optionally with different messages or levels.
>>> timer = Timer(log) >>> timer = Timer(log)
>>> with timer(): >>> with timer():
... time.sleep(0.01) ... time.sleep(0.01)
elapsed time: ...s elapsed time: ...s
>>> with timer('redo it', level=logging.INFO): >>> with timer('redo it', level=logging.INFO):
... time.sleep(0.02) ... time.sleep(0.02)
Took ... to redo it Took ... to redo it
It can also be used as a function decorator to log the time elapsed to run It can also be used as a function decorator to log the time elapsed to run
the decorated function. the decorated function.
>>> @timer() >>> @timer()
... def test1(): ... def test1():
... time.sleep(0.01) ... time.sleep(0.01)
>>> @timer('run test 2', level=logging.INFO) >>> @timer('run test 2', level=logging.INFO)
... def test2(): ... def test2():
... time.sleep(0.02) ... time.sleep(0.02)
>>> test1() >>> test1()
Took ... to run 'test1' Took ... to run 'test1'
>>> test2() >>> test2()
Took ... to run test 2 Took ... to run test 2
""" """
# timeit.default_timer choses the most accurate clock for each platform # timeit.default_timer choses the most accurate clock for each platform
_time = timeit.default_timer _time = timeit.default_timer
default_msg = "elapsed time: %(time).3fs" default_msg = "elapsed time: %(time).3fs"
default_format = "Took %(time).3fs to %(msg)s" default_format = "Took %(time).3fs to %(msg)s"
def __init__(self, logger=None, msg=None, level=None, start=None): def __init__(self, logger=None, msg=None, level=None, start=None):
self.reset(start) self.reset(start)
if logger is None: if logger is None:
for arg in ('msg', 'level'): for arg in ("msg", "level"):
if locals().get(arg) is not None: if locals().get(arg) is not None:
raise ValueError( raise ValueError("'%s' can't be specified without a 'logger'" % arg)
"'%s' can't be specified without a 'logger'" % arg) self.logger = logger
self.logger = logger self.level = level if level is not None else TIME_LEVEL
self.level = level if level is not None else TIME_LEVEL self.msg = msg
self.msg = msg
def reset(self, start=None): def reset(self, start=None):
""" Reset timer to 'start_time' or the current time. """ """Reset timer to 'start_time' or the current time."""
if start is None: if start is None:
self.start = self._time() self.start = self._time()
else: else:
self.start = start self.start = start
self.last = self.start self.last = self.start
self.elapsed = 0.0 self.elapsed = 0.0
def time(self): def time(self):
""" Return the overall time (in seconds) since the timer started. """ """Return the overall time (in seconds) since the timer started."""
return self._time() - self.start return self._time() - self.start
def split(self): def split(self):
""" Split and return the lap time (in seconds) in between splits. """ """Split and return the lap time (in seconds) in between splits."""
current = self._time() current = self._time()
self.elapsed = current - self.last self.elapsed = current - self.last
self.last = current self.last = current
return self.elapsed return self.elapsed
def formatTime(self, msg, time): def formatTime(self, msg, time):
""" Format 'time' value in 'msg' and return formatted string. """Format 'time' value in 'msg' and return formatted string.
If 'msg' contains a '%(time)' format string, try to use that. If 'msg' contains a '%(time)' format string, try to use that.
Otherwise, use the predefined 'default_format'. Otherwise, use the predefined 'default_format'.
If 'msg' is empty or None, fall back to 'default_msg'. If 'msg' is empty or None, fall back to 'default_msg'.
""" """
if not msg: if not msg:
msg = self.default_msg msg = self.default_msg
if msg.find("%(time)") < 0: if msg.find("%(time)") < 0:
msg = self.default_format % {"msg": msg, "time": time} msg = self.default_format % {"msg": msg, "time": time}
else: else:
try: try:
msg = msg % {"time": time} msg = msg % {"time": time}
except (KeyError, ValueError): except (KeyError, ValueError):
pass # skip if the format string is malformed pass # skip if the format string is malformed
return msg return msg
def __enter__(self): def __enter__(self):
""" Start a new lap """ """Start a new lap"""
self.last = self._time() self.last = self._time()
self.elapsed = 0.0 self.elapsed = 0.0
return self return self
def __exit__(self, exc_type, exc_value, traceback): def __exit__(self, exc_type, exc_value, traceback):
""" End the current lap. If timer has a logger, log the time elapsed, """End the current lap. If timer has a logger, log the time elapsed,
using the format string in self.msg (or the default one). using the format string in self.msg (or the default one).
""" """
time = self.split() time = self.split()
if self.logger is None or exc_type: if self.logger is None or exc_type:
# if there's no logger attached, or if any exception occurred in # if there's no logger attached, or if any exception occurred in
# the with-statement, exit without logging the time # the with-statement, exit without logging the time
return return
message = self.formatTime(self.msg, time) message = self.formatTime(self.msg, time)
# Allow log handlers to see the individual parts to facilitate things # Allow log handlers to see the individual parts to facilitate things
# like a server accumulating aggregate stats. # like a server accumulating aggregate stats.
msg_parts = { 'msg': self.msg, 'time': time } msg_parts = {"msg": self.msg, "time": time}
self.logger.log(self.level, message, msg_parts) self.logger.log(self.level, message, msg_parts)
def __call__(self, func_or_msg=None, **kwargs): def __call__(self, func_or_msg=None, **kwargs):
""" If the first argument is a function, return a decorator which runs """If the first argument is a function, return a decorator which runs
the wrapped function inside Timer's context manager. the wrapped function inside Timer's context manager.
Otherwise, treat the first argument as a 'msg' string and return an updated Otherwise, treat the first argument as a 'msg' string and return an updated
Timer instance, referencing the same logger. Timer instance, referencing the same logger.
A 'level' keyword can also be passed to override self.level. A 'level' keyword can also be passed to override self.level.
""" """
if isinstance(func_or_msg, Callable): if isinstance(func_or_msg, Callable):
func = func_or_msg func = func_or_msg
# use the function name when no explicit 'msg' is provided # use the function name when no explicit 'msg' is provided
if not self.msg: if not self.msg:
self.msg = "run '%s'" % func.__name__ self.msg = "run '%s'" % func.__name__
@wraps(func) @wraps(func)
def wrapper(*args, **kwds): def wrapper(*args, **kwds):
with self: with self:
return func(*args, **kwds) return func(*args, **kwds)
return wrapper
else:
msg = func_or_msg or kwargs.get("msg")
level = kwargs.get("level", self.level)
return self.__class__(self.logger, msg, level)
def __float__(self): return wrapper
return self.elapsed else:
msg = func_or_msg or kwargs.get("msg")
level = kwargs.get("level", self.level)
return self.__class__(self.logger, msg, level)
def __int__(self): def __float__(self):
return int(self.elapsed) return self.elapsed
def __str__(self): def __int__(self):
return "%.3f" % self.elapsed return int(self.elapsed)
def __str__(self):
return "%.3f" % self.elapsed
class ChannelsFilter(logging.Filter): class ChannelsFilter(logging.Filter):
"""Provides a hierarchical filter for log entries based on channel names. """Provides a hierarchical filter for log entries based on channel names.
Filters out records emitted from a list of enabled channel names, Filters out records emitted from a list of enabled channel names,
including their children. It works the same as the ``logging.Filter`` including their children. It works the same as the ``logging.Filter``
class, but allows the user to specify multiple channel names. class, but allows the user to specify multiple channel names.
>>> import sys >>> import sys
>>> handler = logging.StreamHandler(sys.stdout) >>> handler = logging.StreamHandler(sys.stdout)
>>> handler.setFormatter(logging.Formatter("%(message)s")) >>> handler.setFormatter(logging.Formatter("%(message)s"))
>>> filter = ChannelsFilter("A.B", "C.D") >>> filter = ChannelsFilter("A.B", "C.D")
>>> handler.addFilter(filter) >>> handler.addFilter(filter)
>>> root = logging.getLogger() >>> root = logging.getLogger()
>>> root.addHandler(handler) >>> root.addHandler(handler)
>>> root.setLevel(level=logging.DEBUG) >>> root.setLevel(level=logging.DEBUG)
>>> logging.getLogger('A.B').debug('this record passes through') >>> logging.getLogger('A.B').debug('this record passes through')
this record passes through this record passes through
>>> logging.getLogger('A.B.C').debug('records from children also pass') >>> logging.getLogger('A.B.C').debug('records from children also pass')
records from children also pass records from children also pass
>>> logging.getLogger('C.D').debug('this one as well') >>> logging.getLogger('C.D').debug('this one as well')
this one as well this one as well
>>> logging.getLogger('A.B.').debug('also this one') >>> logging.getLogger('A.B.').debug('also this one')
also this one also this one
>>> logging.getLogger('A.F').debug('but this one does not!') >>> logging.getLogger('A.F').debug('but this one does not!')
>>> logging.getLogger('C.DE').debug('neither this one!') >>> logging.getLogger('C.DE').debug('neither this one!')
""" """
def __init__(self, *names): def __init__(self, *names):
self.names = names self.names = names
self.num = len(names) self.num = len(names)
self.lengths = {n: len(n) for n in names} self.lengths = {n: len(n) for n in names}
def filter(self, record): def filter(self, record):
if self.num == 0: if self.num == 0:
return True return True
for name in self.names: for name in self.names:
nlen = self.lengths[name] nlen = self.lengths[name]
if name == record.name: if name == record.name:
return True return True
elif (record.name.find(name, 0, nlen) == 0 elif record.name.find(name, 0, nlen) == 0 and record.name[nlen] == ".":
and record.name[nlen] == "."): return True
return True return False
return False
class CapturingLogHandler(logging.Handler): class CapturingLogHandler(logging.Handler):
def __init__(self, logger, level): def __init__(self, logger, level):
super(CapturingLogHandler, self).__init__(level=level) super(CapturingLogHandler, self).__init__(level=level)
self.records = [] self.records = []
if isinstance(logger, str): if isinstance(logger, str):
self.logger = logging.getLogger(logger) self.logger = logging.getLogger(logger)
else: else:
self.logger = logger self.logger = logger
def __enter__(self): def __enter__(self):
self.original_disabled = self.logger.disabled self.original_disabled = self.logger.disabled
self.original_level = self.logger.level self.original_level = self.logger.level
self.original_propagate = self.logger.propagate self.original_propagate = self.logger.propagate
self.logger.addHandler(self) self.logger.addHandler(self)
self.logger.setLevel(self.level) self.logger.setLevel(self.level)
self.logger.disabled = False self.logger.disabled = False
self.logger.propagate = False self.logger.propagate = False
return self return self
def __exit__(self, type, value, traceback): def __exit__(self, type, value, traceback):
self.logger.removeHandler(self) self.logger.removeHandler(self)
self.logger.setLevel(self.original_level) self.logger.setLevel(self.original_level)
self.logger.disabled = self.original_disabled self.logger.disabled = self.original_disabled
self.logger.propagate = self.original_propagate self.logger.propagate = self.original_propagate
return self return self
def emit(self, record): def emit(self, record):
self.records.append(record) self.records.append(record)
def assertRegex(self, regexp, msg=None): def assertRegex(self, regexp, msg=None):
import re import re
pattern = re.compile(regexp)
for r in self.records: pattern = re.compile(regexp)
if pattern.search(r.getMessage()): for r in self.records:
return True if pattern.search(r.getMessage()):
if msg is None: return True
msg = "Pattern '%s' not found in logger records" % regexp if msg is None:
assert 0, msg msg = "Pattern '%s' not found in logger records" % regexp
assert 0, msg
class LogMixin(object): class LogMixin(object):
""" Mixin class that adds logging functionality to another class. """Mixin class that adds logging functionality to another class.
You can define a new class that subclasses from ``LogMixin`` as well as You can define a new class that subclasses from ``LogMixin`` as well as
other base classes through multiple inheritance. other base classes through multiple inheritance.
All instances of that class will have a ``log`` property that returns All instances of that class will have a ``log`` property that returns
a ``logging.Logger`` named after their respective ``<module>.<class>``. a ``logging.Logger`` named after their respective ``<module>.<class>``.
For example: For example:
>>> class BaseClass(object): >>> class BaseClass(object):
... pass ... pass
>>> class MyClass(LogMixin, BaseClass): >>> class MyClass(LogMixin, BaseClass):
... pass ... pass
>>> a = MyClass() >>> a = MyClass()
>>> isinstance(a.log, logging.Logger) >>> isinstance(a.log, logging.Logger)
True True
>>> print(a.log.name) >>> print(a.log.name)
fontTools.misc.loggingTools.MyClass fontTools.misc.loggingTools.MyClass
>>> class AnotherClass(MyClass): >>> class AnotherClass(MyClass):
... pass ... pass
>>> b = AnotherClass() >>> b = AnotherClass()
>>> isinstance(b.log, logging.Logger) >>> isinstance(b.log, logging.Logger)
True True
>>> print(b.log.name) >>> print(b.log.name)
fontTools.misc.loggingTools.AnotherClass fontTools.misc.loggingTools.AnotherClass
""" """
@property @property
def log(self): def log(self):
if not hasattr(self, "_log"): if not hasattr(self, "_log"):
name = ".".join( name = ".".join((self.__class__.__module__, self.__class__.__name__))
(self.__class__.__module__, self.__class__.__name__) self._log = logging.getLogger(name)
) return self._log
self._log = logging.getLogger(name)
return self._log
def deprecateArgument(name, msg, category=UserWarning): def deprecateArgument(name, msg, category=UserWarning):
""" Raise a warning about deprecated function argument 'name'. """ """Raise a warning about deprecated function argument 'name'."""
warnings.warn( warnings.warn("%r is deprecated; %s" % (name, msg), category=category, stacklevel=3)
"%r is deprecated; %s" % (name, msg), category=category, stacklevel=3)
def deprecateFunction(msg, category=UserWarning): def deprecateFunction(msg, category=UserWarning):
""" Decorator to raise a warning when a deprecated function is called. """ """Decorator to raise a warning when a deprecated function is called."""
def decorator(func):
@wraps(func) def decorator(func):
def wrapper(*args, **kwargs): @wraps(func)
warnings.warn( def wrapper(*args, **kwargs):
"%r is deprecated; %s" % (func.__name__, msg), warnings.warn(
category=category, stacklevel=2) "%r is deprecated; %s" % (func.__name__, msg),
return func(*args, **kwargs) category=category,
return wrapper stacklevel=2,
return decorator )
return func(*args, **kwargs)
return wrapper
return decorator
if __name__ == "__main__": if __name__ == "__main__":
import doctest import doctest
sys.exit(doctest.testmod(optionflags=doctest.ELLIPSIS).failed)
sys.exit(doctest.testmod(optionflags=doctest.ELLIPSIS).failed)

View File

@ -1,54 +1,56 @@
from fontTools.misc.textTools import Tag, bytesjoin, strjoin from fontTools.misc.textTools import Tag, bytesjoin, strjoin
try: try:
import xattr import xattr
except ImportError: except ImportError:
xattr = None xattr = None
def _reverseString(s): def _reverseString(s):
s = list(s) s = list(s)
s.reverse() s.reverse()
return strjoin(s) return strjoin(s)
def getMacCreatorAndType(path): def getMacCreatorAndType(path):
"""Returns file creator and file type codes for a path. """Returns file creator and file type codes for a path.
Args: Args:
path (str): A file path. path (str): A file path.
Returns: Returns:
A tuple of two :py:class:`fontTools.textTools.Tag` objects, the first A tuple of two :py:class:`fontTools.textTools.Tag` objects, the first
representing the file creator and the second representing the representing the file creator and the second representing the
file type. file type.
""" """
if xattr is not None: if xattr is not None:
try: try:
finderInfo = xattr.getxattr(path, 'com.apple.FinderInfo') finderInfo = xattr.getxattr(path, "com.apple.FinderInfo")
except (KeyError, IOError): except (KeyError, IOError):
pass pass
else: else:
fileType = Tag(finderInfo[:4]) fileType = Tag(finderInfo[:4])
fileCreator = Tag(finderInfo[4:8]) fileCreator = Tag(finderInfo[4:8])
return fileCreator, fileType return fileCreator, fileType
return None, None return None, None
def setMacCreatorAndType(path, fileCreator, fileType): def setMacCreatorAndType(path, fileCreator, fileType):
"""Set file creator and file type codes for a path. """Set file creator and file type codes for a path.
Note that if the ``xattr`` module is not installed, no action is Note that if the ``xattr`` module is not installed, no action is
taken but no error is raised. taken but no error is raised.
Args: Args:
path (str): A file path. path (str): A file path.
fileCreator: A four-character file creator tag. fileCreator: A four-character file creator tag.
fileType: A four-character file type tag. fileType: A four-character file type tag.
""" """
if xattr is not None: if xattr is not None:
from fontTools.misc.textTools import pad from fontTools.misc.textTools import pad
if not all(len(s) == 4 for s in (fileCreator, fileType)):
raise TypeError('arg must be string of 4 chars') if not all(len(s) == 4 for s in (fileCreator, fileType)):
finderInfo = pad(bytesjoin([fileType, fileCreator]), 32) raise TypeError("arg must be string of 4 chars")
xattr.setxattr(path, 'com.apple.FinderInfo', finderInfo) finderInfo = pad(bytesjoin([fileType, fileCreator]), 32)
xattr.setxattr(path, "com.apple.FinderInfo", finderInfo)

View File

@ -7,216 +7,218 @@ from collections.abc import MutableMapping
class ResourceError(Exception): class ResourceError(Exception):
pass pass
class ResourceReader(MutableMapping): class ResourceReader(MutableMapping):
"""Reader for Mac OS resource forks. """Reader for Mac OS resource forks.
Parses a resource fork and returns resources according to their type. Parses a resource fork and returns resources according to their type.
If run on OS X, this will open the resource fork in the filesystem. If run on OS X, this will open the resource fork in the filesystem.
Otherwise, it will open the file itself and attempt to read it as Otherwise, it will open the file itself and attempt to read it as
though it were a resource fork. though it were a resource fork.
The returned object can be indexed by type and iterated over, The returned object can be indexed by type and iterated over,
returning in each case a list of py:class:`Resource` objects returning in each case a list of py:class:`Resource` objects
representing all the resources of a certain type. representing all the resources of a certain type.
""" """
def __init__(self, fileOrPath):
"""Open a file
Args: def __init__(self, fileOrPath):
fileOrPath: Either an object supporting a ``read`` method, an """Open a file
``os.PathLike`` object, or a string.
"""
self._resources = OrderedDict()
if hasattr(fileOrPath, 'read'):
self.file = fileOrPath
else:
try:
# try reading from the resource fork (only works on OS X)
self.file = self.openResourceFork(fileOrPath)
self._readFile()
return
except (ResourceError, IOError):
# if it fails, use the data fork
self.file = self.openDataFork(fileOrPath)
self._readFile()
@staticmethod Args:
def openResourceFork(path): fileOrPath: Either an object supporting a ``read`` method, an
if hasattr(path, "__fspath__"): # support os.PathLike objects ``os.PathLike`` object, or a string.
path = path.__fspath__() """
with open(path + '/..namedfork/rsrc', 'rb') as resfork: self._resources = OrderedDict()
data = resfork.read() if hasattr(fileOrPath, "read"):
infile = BytesIO(data) self.file = fileOrPath
infile.name = path else:
return infile try:
# try reading from the resource fork (only works on OS X)
self.file = self.openResourceFork(fileOrPath)
self._readFile()
return
except (ResourceError, IOError):
# if it fails, use the data fork
self.file = self.openDataFork(fileOrPath)
self._readFile()
@staticmethod @staticmethod
def openDataFork(path): def openResourceFork(path):
with open(path, 'rb') as datafork: if hasattr(path, "__fspath__"): # support os.PathLike objects
data = datafork.read() path = path.__fspath__()
infile = BytesIO(data) with open(path + "/..namedfork/rsrc", "rb") as resfork:
infile.name = path data = resfork.read()
return infile infile = BytesIO(data)
infile.name = path
return infile
def _readFile(self): @staticmethod
self._readHeaderAndMap() def openDataFork(path):
self._readTypeList() with open(path, "rb") as datafork:
data = datafork.read()
infile = BytesIO(data)
infile.name = path
return infile
def _read(self, numBytes, offset=None): def _readFile(self):
if offset is not None: self._readHeaderAndMap()
try: self._readTypeList()
self.file.seek(offset)
except OverflowError:
raise ResourceError("Failed to seek offset ('offset' is too large)")
if self.file.tell() != offset:
raise ResourceError('Failed to seek offset (reached EOF)')
try:
data = self.file.read(numBytes)
except OverflowError:
raise ResourceError("Cannot read resource ('numBytes' is too large)")
if len(data) != numBytes:
raise ResourceError('Cannot read resource (not enough data)')
return data
def _readHeaderAndMap(self): def _read(self, numBytes, offset=None):
self.file.seek(0) if offset is not None:
headerData = self._read(ResourceForkHeaderSize) try:
sstruct.unpack(ResourceForkHeader, headerData, self) self.file.seek(offset)
# seek to resource map, skip reserved except OverflowError:
mapOffset = self.mapOffset + 22 raise ResourceError("Failed to seek offset ('offset' is too large)")
resourceMapData = self._read(ResourceMapHeaderSize, mapOffset) if self.file.tell() != offset:
sstruct.unpack(ResourceMapHeader, resourceMapData, self) raise ResourceError("Failed to seek offset (reached EOF)")
self.absTypeListOffset = self.mapOffset + self.typeListOffset try:
self.absNameListOffset = self.mapOffset + self.nameListOffset data = self.file.read(numBytes)
except OverflowError:
raise ResourceError("Cannot read resource ('numBytes' is too large)")
if len(data) != numBytes:
raise ResourceError("Cannot read resource (not enough data)")
return data
def _readTypeList(self): def _readHeaderAndMap(self):
absTypeListOffset = self.absTypeListOffset self.file.seek(0)
numTypesData = self._read(2, absTypeListOffset) headerData = self._read(ResourceForkHeaderSize)
self.numTypes, = struct.unpack('>H', numTypesData) sstruct.unpack(ResourceForkHeader, headerData, self)
absTypeListOffset2 = absTypeListOffset + 2 # seek to resource map, skip reserved
for i in range(self.numTypes + 1): mapOffset = self.mapOffset + 22
resTypeItemOffset = absTypeListOffset2 + ResourceTypeItemSize * i resourceMapData = self._read(ResourceMapHeaderSize, mapOffset)
resTypeItemData = self._read(ResourceTypeItemSize, resTypeItemOffset) sstruct.unpack(ResourceMapHeader, resourceMapData, self)
item = sstruct.unpack(ResourceTypeItem, resTypeItemData) self.absTypeListOffset = self.mapOffset + self.typeListOffset
resType = tostr(item['type'], encoding='mac-roman') self.absNameListOffset = self.mapOffset + self.nameListOffset
refListOffset = absTypeListOffset + item['refListOffset']
numRes = item['numRes'] + 1
resources = self._readReferenceList(resType, refListOffset, numRes)
self._resources[resType] = resources
def _readReferenceList(self, resType, refListOffset, numRes): def _readTypeList(self):
resources = [] absTypeListOffset = self.absTypeListOffset
for i in range(numRes): numTypesData = self._read(2, absTypeListOffset)
refOffset = refListOffset + ResourceRefItemSize * i (self.numTypes,) = struct.unpack(">H", numTypesData)
refData = self._read(ResourceRefItemSize, refOffset) absTypeListOffset2 = absTypeListOffset + 2
res = Resource(resType) for i in range(self.numTypes + 1):
res.decompile(refData, self) resTypeItemOffset = absTypeListOffset2 + ResourceTypeItemSize * i
resources.append(res) resTypeItemData = self._read(ResourceTypeItemSize, resTypeItemOffset)
return resources item = sstruct.unpack(ResourceTypeItem, resTypeItemData)
resType = tostr(item["type"], encoding="mac-roman")
refListOffset = absTypeListOffset + item["refListOffset"]
numRes = item["numRes"] + 1
resources = self._readReferenceList(resType, refListOffset, numRes)
self._resources[resType] = resources
def __getitem__(self, resType): def _readReferenceList(self, resType, refListOffset, numRes):
return self._resources[resType] resources = []
for i in range(numRes):
refOffset = refListOffset + ResourceRefItemSize * i
refData = self._read(ResourceRefItemSize, refOffset)
res = Resource(resType)
res.decompile(refData, self)
resources.append(res)
return resources
def __delitem__(self, resType): def __getitem__(self, resType):
del self._resources[resType] return self._resources[resType]
def __setitem__(self, resType, resources): def __delitem__(self, resType):
self._resources[resType] = resources del self._resources[resType]
def __len__(self): def __setitem__(self, resType, resources):
return len(self._resources) self._resources[resType] = resources
def __iter__(self): def __len__(self):
return iter(self._resources) return len(self._resources)
def keys(self): def __iter__(self):
return self._resources.keys() return iter(self._resources)
@property def keys(self):
def types(self): return self._resources.keys()
"""A list of the types of resources in the resource fork."""
return list(self._resources.keys())
def countResources(self, resType): @property
"""Return the number of resources of a given type.""" def types(self):
try: """A list of the types of resources in the resource fork."""
return len(self[resType]) return list(self._resources.keys())
except KeyError:
return 0
def getIndices(self, resType): def countResources(self, resType):
"""Returns a list of indices of resources of a given type.""" """Return the number of resources of a given type."""
numRes = self.countResources(resType) try:
if numRes: return len(self[resType])
return list(range(1, numRes+1)) except KeyError:
else: return 0
return []
def getNames(self, resType): def getIndices(self, resType):
"""Return list of names of all resources of a given type.""" """Returns a list of indices of resources of a given type."""
return [res.name for res in self.get(resType, []) if res.name is not None] numRes = self.countResources(resType)
if numRes:
return list(range(1, numRes + 1))
else:
return []
def getIndResource(self, resType, index): def getNames(self, resType):
"""Return resource of given type located at an index ranging from 1 """Return list of names of all resources of a given type."""
to the number of resources for that type, or None if not found. return [res.name for res in self.get(resType, []) if res.name is not None]
"""
if index < 1:
return None
try:
res = self[resType][index-1]
except (KeyError, IndexError):
return None
return res
def getNamedResource(self, resType, name): def getIndResource(self, resType, index):
"""Return the named resource of given type, else return None.""" """Return resource of given type located at an index ranging from 1
name = tostr(name, encoding='mac-roman') to the number of resources for that type, or None if not found.
for res in self.get(resType, []): """
if res.name == name: if index < 1:
return res return None
return None try:
res = self[resType][index - 1]
except (KeyError, IndexError):
return None
return res
def close(self): def getNamedResource(self, resType, name):
if not self.file.closed: """Return the named resource of given type, else return None."""
self.file.close() name = tostr(name, encoding="mac-roman")
for res in self.get(resType, []):
if res.name == name:
return res
return None
def close(self):
if not self.file.closed:
self.file.close()
class Resource(object): class Resource(object):
"""Represents a resource stored within a resource fork. """Represents a resource stored within a resource fork.
Attributes: Attributes:
type: resource type. type: resource type.
data: resource data. data: resource data.
id: ID. id: ID.
name: resource name. name: resource name.
attr: attributes. attr: attributes.
""" """
def __init__(self, resType=None, resData=None, resID=None, resName=None, def __init__(
resAttr=None): self, resType=None, resData=None, resID=None, resName=None, resAttr=None
self.type = resType ):
self.data = resData self.type = resType
self.id = resID self.data = resData
self.name = resName self.id = resID
self.attr = resAttr self.name = resName
self.attr = resAttr
def decompile(self, refData, reader): def decompile(self, refData, reader):
sstruct.unpack(ResourceRefItem, refData, self) sstruct.unpack(ResourceRefItem, refData, self)
# interpret 3-byte dataOffset as (padded) ULONG to unpack it with struct # interpret 3-byte dataOffset as (padded) ULONG to unpack it with struct
self.dataOffset, = struct.unpack('>L', bytesjoin([b"\0", self.dataOffset])) (self.dataOffset,) = struct.unpack(">L", bytesjoin([b"\0", self.dataOffset]))
absDataOffset = reader.dataOffset + self.dataOffset absDataOffset = reader.dataOffset + self.dataOffset
dataLength, = struct.unpack(">L", reader._read(4, absDataOffset)) (dataLength,) = struct.unpack(">L", reader._read(4, absDataOffset))
self.data = reader._read(dataLength) self.data = reader._read(dataLength)
if self.nameOffset == -1: if self.nameOffset == -1:
return return
absNameOffset = reader.absNameListOffset + self.nameOffset absNameOffset = reader.absNameListOffset + self.nameOffset
nameLength, = struct.unpack('B', reader._read(1, absNameOffset)) (nameLength,) = struct.unpack("B", reader._read(1, absNameOffset))
name, = struct.unpack('>%ss' % nameLength, reader._read(nameLength)) (name,) = struct.unpack(">%ss" % nameLength, reader._read(nameLength))
self.name = tostr(name, encoding='mac-roman') self.name = tostr(name, encoding="mac-roman")
ResourceForkHeader = """ ResourceForkHeader = """

View File

@ -176,7 +176,7 @@ class PlistTarget:
True True
Links: Links:
https://github.com/python/cpython/blob/master/Lib/plistlib.py https://github.com/python/cpython/blob/main/Lib/plistlib.py
http://lxml.de/parsing.html#the-target-parser-interface http://lxml.de/parsing.html#the-target-parser-interface
""" """
@ -353,7 +353,9 @@ def _real_element(value: float, ctx: SimpleNamespace) -> etree.Element:
return el return el
def _dict_element(d: Mapping[str, PlistEncodable], ctx: SimpleNamespace) -> etree.Element: def _dict_element(
d: Mapping[str, PlistEncodable], ctx: SimpleNamespace
) -> etree.Element:
el = etree.Element("dict") el = etree.Element("dict")
items = d.items() items = d.items()
if ctx.sort_keys: if ctx.sort_keys:
@ -371,7 +373,9 @@ def _dict_element(d: Mapping[str, PlistEncodable], ctx: SimpleNamespace) -> etre
return el return el
def _array_element(array: Sequence[PlistEncodable], ctx: SimpleNamespace) -> etree.Element: def _array_element(
array: Sequence[PlistEncodable], ctx: SimpleNamespace
) -> etree.Element:
el = etree.Element("array") el = etree.Element("array")
if len(array) == 0: if len(array) == 0:
return el return el

File diff suppressed because it is too large Load Diff

View File

@ -1,20 +1,20 @@
from fontTools.misc.textTools import bytechr, byteord, bytesjoin, tobytes, tostr from fontTools.misc.textTools import bytechr, byteord, bytesjoin, tobytes, tostr
from fontTools.misc import eexec from fontTools.misc import eexec
from .psOperators import ( from .psOperators import (
PSOperators, PSOperators,
ps_StandardEncoding, ps_StandardEncoding,
ps_array, ps_array,
ps_boolean, ps_boolean,
ps_dict, ps_dict,
ps_integer, ps_integer,
ps_literal, ps_literal,
ps_mark, ps_mark,
ps_name, ps_name,
ps_operator, ps_operator,
ps_procedure, ps_procedure,
ps_procmark, ps_procmark,
ps_real, ps_real,
ps_string, ps_string,
) )
import re import re
from collections.abc import Callable from collections.abc import Callable
@ -24,7 +24,7 @@ import logging
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
ps_special = b'()<>[]{}%' # / is one too, but we take care of that one differently ps_special = b"()<>[]{}%" # / is one too, but we take care of that one differently
skipwhiteRE = re.compile(bytesjoin([b"[", whitespace, b"]*"])) skipwhiteRE = re.compile(bytesjoin([b"[", whitespace, b"]*"]))
endofthingPat = bytesjoin([b"[^][(){}<>/%", whitespace, b"]*"]) endofthingPat = bytesjoin([b"[^][(){}<>/%", whitespace, b"]*"])
@ -32,7 +32,7 @@ endofthingRE = re.compile(endofthingPat)
commentRE = re.compile(b"%[^\n\r]*") commentRE = re.compile(b"%[^\n\r]*")
# XXX This not entirely correct as it doesn't allow *nested* embedded parens: # XXX This not entirely correct as it doesn't allow *nested* embedded parens:
stringPat = br""" stringPat = rb"""
\( \(
( (
( (
@ -51,335 +51,349 @@ stringRE = re.compile(stringPat)
hexstringRE = re.compile(bytesjoin([b"<[", whitespace, b"0-9A-Fa-f]*>"])) hexstringRE = re.compile(bytesjoin([b"<[", whitespace, b"0-9A-Fa-f]*>"]))
class PSTokenError(Exception): pass
class PSError(Exception): pass class PSTokenError(Exception):
pass
class PSError(Exception):
pass
class PSTokenizer(object): class PSTokenizer(object):
def __init__(self, buf=b"", encoding="ascii"):
# Force self.buf to be a byte string
buf = tobytes(buf)
self.buf = buf
self.len = len(buf)
self.pos = 0
self.closed = False
self.encoding = encoding
def __init__(self, buf=b'', encoding="ascii"): def read(self, n=-1):
# Force self.buf to be a byte string """Read at most 'n' bytes from the buffer, or less if the read
buf = tobytes(buf) hits EOF before obtaining 'n' bytes.
self.buf = buf If 'n' is negative or omitted, read all data until EOF is reached.
self.len = len(buf) """
self.pos = 0 if self.closed:
self.closed = False raise ValueError("I/O operation on closed file")
self.encoding = encoding if n is None or n < 0:
newpos = self.len
else:
newpos = min(self.pos + n, self.len)
r = self.buf[self.pos : newpos]
self.pos = newpos
return r
def read(self, n=-1): def close(self):
"""Read at most 'n' bytes from the buffer, or less if the read if not self.closed:
hits EOF before obtaining 'n' bytes. self.closed = True
If 'n' is negative or omitted, read all data until EOF is reached. del self.buf, self.pos
"""
if self.closed:
raise ValueError("I/O operation on closed file")
if n is None or n < 0:
newpos = self.len
else:
newpos = min(self.pos+n, self.len)
r = self.buf[self.pos:newpos]
self.pos = newpos
return r
def close(self): def getnexttoken(
if not self.closed: self,
self.closed = True # localize some stuff, for performance
del self.buf, self.pos len=len,
ps_special=ps_special,
stringmatch=stringRE.match,
hexstringmatch=hexstringRE.match,
commentmatch=commentRE.match,
endmatch=endofthingRE.match,
):
def getnexttoken(self, self.skipwhite()
# localize some stuff, for performance if self.pos >= self.len:
len=len, return None, None
ps_special=ps_special, pos = self.pos
stringmatch=stringRE.match, buf = self.buf
hexstringmatch=hexstringRE.match, char = bytechr(byteord(buf[pos]))
commentmatch=commentRE.match, if char in ps_special:
endmatch=endofthingRE.match): if char in b"{}[]":
tokentype = "do_special"
token = char
elif char == b"%":
tokentype = "do_comment"
_, nextpos = commentmatch(buf, pos).span()
token = buf[pos:nextpos]
elif char == b"(":
tokentype = "do_string"
m = stringmatch(buf, pos)
if m is None:
raise PSTokenError("bad string at character %d" % pos)
_, nextpos = m.span()
token = buf[pos:nextpos]
elif char == b"<":
tokentype = "do_hexstring"
m = hexstringmatch(buf, pos)
if m is None:
raise PSTokenError("bad hexstring at character %d" % pos)
_, nextpos = m.span()
token = buf[pos:nextpos]
else:
raise PSTokenError("bad token at character %d" % pos)
else:
if char == b"/":
tokentype = "do_literal"
m = endmatch(buf, pos + 1)
else:
tokentype = ""
m = endmatch(buf, pos)
if m is None:
raise PSTokenError("bad token at character %d" % pos)
_, nextpos = m.span()
token = buf[pos:nextpos]
self.pos = pos + len(token)
token = tostr(token, encoding=self.encoding)
return tokentype, token
self.skipwhite() def skipwhite(self, whitematch=skipwhiteRE.match):
if self.pos >= self.len: _, nextpos = whitematch(self.buf, self.pos).span()
return None, None self.pos = nextpos
pos = self.pos
buf = self.buf
char = bytechr(byteord(buf[pos]))
if char in ps_special:
if char in b'{}[]':
tokentype = 'do_special'
token = char
elif char == b'%':
tokentype = 'do_comment'
_, nextpos = commentmatch(buf, pos).span()
token = buf[pos:nextpos]
elif char == b'(':
tokentype = 'do_string'
m = stringmatch(buf, pos)
if m is None:
raise PSTokenError('bad string at character %d' % pos)
_, nextpos = m.span()
token = buf[pos:nextpos]
elif char == b'<':
tokentype = 'do_hexstring'
m = hexstringmatch(buf, pos)
if m is None:
raise PSTokenError('bad hexstring at character %d' % pos)
_, nextpos = m.span()
token = buf[pos:nextpos]
else:
raise PSTokenError('bad token at character %d' % pos)
else:
if char == b'/':
tokentype = 'do_literal'
m = endmatch(buf, pos+1)
else:
tokentype = ''
m = endmatch(buf, pos)
if m is None:
raise PSTokenError('bad token at character %d' % pos)
_, nextpos = m.span()
token = buf[pos:nextpos]
self.pos = pos + len(token)
token = tostr(token, encoding=self.encoding)
return tokentype, token
def skipwhite(self, whitematch=skipwhiteRE.match): def starteexec(self):
_, nextpos = whitematch(self.buf, self.pos).span() self.pos = self.pos + 1
self.pos = nextpos self.dirtybuf = self.buf[self.pos :]
self.buf, R = eexec.decrypt(self.dirtybuf, 55665)
self.len = len(self.buf)
self.pos = 4
def starteexec(self): def stopeexec(self):
self.pos = self.pos + 1 if not hasattr(self, "dirtybuf"):
self.dirtybuf = self.buf[self.pos:] return
self.buf, R = eexec.decrypt(self.dirtybuf, 55665) self.buf = self.dirtybuf
self.len = len(self.buf) del self.dirtybuf
self.pos = 4
def stopeexec(self):
if not hasattr(self, 'dirtybuf'):
return
self.buf = self.dirtybuf
del self.dirtybuf
class PSInterpreter(PSOperators): class PSInterpreter(PSOperators):
def __init__(self, encoding="ascii"):
systemdict = {}
userdict = {}
self.encoding = encoding
self.dictstack = [systemdict, userdict]
self.stack = []
self.proclevel = 0
self.procmark = ps_procmark()
self.fillsystemdict()
def __init__(self, encoding="ascii"): def fillsystemdict(self):
systemdict = {} systemdict = self.dictstack[0]
userdict = {} systemdict["["] = systemdict["mark"] = self.mark = ps_mark()
self.encoding = encoding systemdict["]"] = ps_operator("]", self.do_makearray)
self.dictstack = [systemdict, userdict] systemdict["true"] = ps_boolean(1)
self.stack = [] systemdict["false"] = ps_boolean(0)
self.proclevel = 0 systemdict["StandardEncoding"] = ps_array(ps_StandardEncoding)
self.procmark = ps_procmark() systemdict["FontDirectory"] = ps_dict({})
self.fillsystemdict() self.suckoperators(systemdict, self.__class__)
def fillsystemdict(self): def suckoperators(self, systemdict, klass):
systemdict = self.dictstack[0] for name in dir(klass):
systemdict['['] = systemdict['mark'] = self.mark = ps_mark() attr = getattr(self, name)
systemdict[']'] = ps_operator(']', self.do_makearray) if isinstance(attr, Callable) and name[:3] == "ps_":
systemdict['true'] = ps_boolean(1) name = name[3:]
systemdict['false'] = ps_boolean(0) systemdict[name] = ps_operator(name, attr)
systemdict['StandardEncoding'] = ps_array(ps_StandardEncoding) for baseclass in klass.__bases__:
systemdict['FontDirectory'] = ps_dict({}) self.suckoperators(systemdict, baseclass)
self.suckoperators(systemdict, self.__class__)
def suckoperators(self, systemdict, klass): def interpret(self, data, getattr=getattr):
for name in dir(klass): tokenizer = self.tokenizer = PSTokenizer(data, self.encoding)
attr = getattr(self, name) getnexttoken = tokenizer.getnexttoken
if isinstance(attr, Callable) and name[:3] == 'ps_': do_token = self.do_token
name = name[3:] handle_object = self.handle_object
systemdict[name] = ps_operator(name, attr) try:
for baseclass in klass.__bases__: while 1:
self.suckoperators(systemdict, baseclass) tokentype, token = getnexttoken()
if not token:
break
if tokentype:
handler = getattr(self, tokentype)
object = handler(token)
else:
object = do_token(token)
if object is not None:
handle_object(object)
tokenizer.close()
self.tokenizer = None
except:
if self.tokenizer is not None:
log.debug(
"ps error:\n"
"- - - - - - -\n"
"%s\n"
">>>\n"
"%s\n"
"- - - - - - -",
self.tokenizer.buf[self.tokenizer.pos - 50 : self.tokenizer.pos],
self.tokenizer.buf[self.tokenizer.pos : self.tokenizer.pos + 50],
)
raise
def interpret(self, data, getattr=getattr): def handle_object(self, object):
tokenizer = self.tokenizer = PSTokenizer(data, self.encoding) if not (self.proclevel or object.literal or object.type == "proceduretype"):
getnexttoken = tokenizer.getnexttoken if object.type != "operatortype":
do_token = self.do_token object = self.resolve_name(object.value)
handle_object = self.handle_object if object.literal:
try: self.push(object)
while 1: else:
tokentype, token = getnexttoken() if object.type == "proceduretype":
if not token: self.call_procedure(object)
break else:
if tokentype: object.function()
handler = getattr(self, tokentype) else:
object = handler(token) self.push(object)
else:
object = do_token(token)
if object is not None:
handle_object(object)
tokenizer.close()
self.tokenizer = None
except:
if self.tokenizer is not None:
log.debug(
'ps error:\n'
'- - - - - - -\n'
'%s\n'
'>>>\n'
'%s\n'
'- - - - - - -',
self.tokenizer.buf[self.tokenizer.pos-50:self.tokenizer.pos],
self.tokenizer.buf[self.tokenizer.pos:self.tokenizer.pos+50])
raise
def handle_object(self, object): def call_procedure(self, proc):
if not (self.proclevel or object.literal or object.type == 'proceduretype'): handle_object = self.handle_object
if object.type != 'operatortype': for item in proc.value:
object = self.resolve_name(object.value) handle_object(item)
if object.literal:
self.push(object)
else:
if object.type == 'proceduretype':
self.call_procedure(object)
else:
object.function()
else:
self.push(object)
def call_procedure(self, proc): def resolve_name(self, name):
handle_object = self.handle_object dictstack = self.dictstack
for item in proc.value: for i in range(len(dictstack) - 1, -1, -1):
handle_object(item) if name in dictstack[i]:
return dictstack[i][name]
raise PSError("name error: " + str(name))
def resolve_name(self, name): def do_token(
dictstack = self.dictstack self,
for i in range(len(dictstack)-1, -1, -1): token,
if name in dictstack[i]: int=int,
return dictstack[i][name] float=float,
raise PSError('name error: ' + str(name)) ps_name=ps_name,
ps_integer=ps_integer,
ps_real=ps_real,
):
try:
num = int(token)
except (ValueError, OverflowError):
try:
num = float(token)
except (ValueError, OverflowError):
if "#" in token:
hashpos = token.find("#")
try:
base = int(token[:hashpos])
num = int(token[hashpos + 1 :], base)
except (ValueError, OverflowError):
return ps_name(token)
else:
return ps_integer(num)
else:
return ps_name(token)
else:
return ps_real(num)
else:
return ps_integer(num)
def do_token(self, token, def do_comment(self, token):
int=int, pass
float=float,
ps_name=ps_name,
ps_integer=ps_integer,
ps_real=ps_real):
try:
num = int(token)
except (ValueError, OverflowError):
try:
num = float(token)
except (ValueError, OverflowError):
if '#' in token:
hashpos = token.find('#')
try:
base = int(token[:hashpos])
num = int(token[hashpos+1:], base)
except (ValueError, OverflowError):
return ps_name(token)
else:
return ps_integer(num)
else:
return ps_name(token)
else:
return ps_real(num)
else:
return ps_integer(num)
def do_comment(self, token): def do_literal(self, token):
pass return ps_literal(token[1:])
def do_literal(self, token): def do_string(self, token):
return ps_literal(token[1:]) return ps_string(token[1:-1])
def do_string(self, token): def do_hexstring(self, token):
return ps_string(token[1:-1]) hexStr = "".join(token[1:-1].split())
if len(hexStr) % 2:
hexStr = hexStr + "0"
cleanstr = []
for i in range(0, len(hexStr), 2):
cleanstr.append(chr(int(hexStr[i : i + 2], 16)))
cleanstr = "".join(cleanstr)
return ps_string(cleanstr)
def do_hexstring(self, token): def do_special(self, token):
hexStr = "".join(token[1:-1].split()) if token == "{":
if len(hexStr) % 2: self.proclevel = self.proclevel + 1
hexStr = hexStr + '0' return self.procmark
cleanstr = [] elif token == "}":
for i in range(0, len(hexStr), 2): proc = []
cleanstr.append(chr(int(hexStr[i:i+2], 16))) while 1:
cleanstr = "".join(cleanstr) topobject = self.pop()
return ps_string(cleanstr) if topobject == self.procmark:
break
proc.append(topobject)
self.proclevel = self.proclevel - 1
proc.reverse()
return ps_procedure(proc)
elif token == "[":
return self.mark
elif token == "]":
return ps_name("]")
else:
raise PSTokenError("huh?")
def do_special(self, token): def push(self, object):
if token == '{': self.stack.append(object)
self.proclevel = self.proclevel + 1
return self.procmark
elif token == '}':
proc = []
while 1:
topobject = self.pop()
if topobject == self.procmark:
break
proc.append(topobject)
self.proclevel = self.proclevel - 1
proc.reverse()
return ps_procedure(proc)
elif token == '[':
return self.mark
elif token == ']':
return ps_name(']')
else:
raise PSTokenError('huh?')
def push(self, object): def pop(self, *types):
self.stack.append(object) stack = self.stack
if not stack:
raise PSError("stack underflow")
object = stack[-1]
if types:
if object.type not in types:
raise PSError(
"typecheck, expected %s, found %s" % (repr(types), object.type)
)
del stack[-1]
return object
def pop(self, *types): def do_makearray(self):
stack = self.stack array = []
if not stack: while 1:
raise PSError('stack underflow') topobject = self.pop()
object = stack[-1] if topobject == self.mark:
if types: break
if object.type not in types: array.append(topobject)
raise PSError('typecheck, expected %s, found %s' % (repr(types), object.type)) array.reverse()
del stack[-1] self.push(ps_array(array))
return object
def do_makearray(self): def close(self):
array = [] """Remove circular references."""
while 1: del self.stack
topobject = self.pop() del self.dictstack
if topobject == self.mark:
break
array.append(topobject)
array.reverse()
self.push(ps_array(array))
def close(self):
"""Remove circular references."""
del self.stack
del self.dictstack
def unpack_item(item): def unpack_item(item):
tp = type(item.value) tp = type(item.value)
if tp == dict: if tp == dict:
newitem = {} newitem = {}
for key, value in item.value.items(): for key, value in item.value.items():
newitem[key] = unpack_item(value) newitem[key] = unpack_item(value)
elif tp == list: elif tp == list:
newitem = [None] * len(item.value) newitem = [None] * len(item.value)
for i in range(len(item.value)): for i in range(len(item.value)):
newitem[i] = unpack_item(item.value[i]) newitem[i] = unpack_item(item.value[i])
if item.type == 'proceduretype': if item.type == "proceduretype":
newitem = tuple(newitem) newitem = tuple(newitem)
else: else:
newitem = item.value newitem = item.value
return newitem return newitem
def suckfont(data, encoding="ascii"): def suckfont(data, encoding="ascii"):
m = re.search(br"/FontName\s+/([^ \t\n\r]+)\s+def", data) m = re.search(rb"/FontName\s+/([^ \t\n\r]+)\s+def", data)
if m: if m:
fontName = m.group(1) fontName = m.group(1)
fontName = fontName.decode() fontName = fontName.decode()
else: else:
fontName = None fontName = None
interpreter = PSInterpreter(encoding=encoding) interpreter = PSInterpreter(encoding=encoding)
interpreter.interpret(b"/Helvetica 4 dict dup /Encoding StandardEncoding put definefont pop") interpreter.interpret(
interpreter.interpret(data) b"/Helvetica 4 dict dup /Encoding StandardEncoding put definefont pop"
fontdir = interpreter.dictstack[0]['FontDirectory'].value )
if fontName in fontdir: interpreter.interpret(data)
rawfont = fontdir[fontName] fontdir = interpreter.dictstack[0]["FontDirectory"].value
else: if fontName in fontdir:
# fall back, in case fontName wasn't found rawfont = fontdir[fontName]
fontNames = list(fontdir.keys()) else:
if len(fontNames) > 1: # fall back, in case fontName wasn't found
fontNames.remove("Helvetica") fontNames = list(fontdir.keys())
fontNames.sort() if len(fontNames) > 1:
rawfont = fontdir[fontNames[0]] fontNames.remove("Helvetica")
interpreter.close() fontNames.sort()
return unpack_item(rawfont) rawfont = fontdir[fontNames[0]]
interpreter.close()
return unpack_item(rawfont)

File diff suppressed because it is too large Load Diff

View File

@ -9,41 +9,45 @@ import logging
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
__all__ = [ __all__ = [
"noRound", "noRound",
"otRound", "otRound",
"maybeRound", "maybeRound",
"roundFunc", "roundFunc",
] ]
def noRound(value): def noRound(value):
return value return value
def otRound(value): def otRound(value):
"""Round float value to nearest integer towards ``+Infinity``. """Round float value to nearest integer towards ``+Infinity``.
The OpenType spec (in the section on `"normalization" of OpenType Font Variations <https://docs.microsoft.com/en-us/typography/opentype/spec/otvaroverview#coordinate-scales-and-normalization>`_) The OpenType spec (in the section on `"normalization" of OpenType Font Variations <https://docs.microsoft.com/en-us/typography/opentype/spec/otvaroverview#coordinate-scales-and-normalization>`_)
defines the required method for converting floating point values to defines the required method for converting floating point values to
fixed-point. In particular it specifies the following rounding strategy: fixed-point. In particular it specifies the following rounding strategy:
for fractional values of 0.5 and higher, take the next higher integer; for fractional values of 0.5 and higher, take the next higher integer;
for other fractional values, truncate. for other fractional values, truncate.
This function rounds the floating-point value according to this strategy This function rounds the floating-point value according to this strategy
in preparation for conversion to fixed-point. in preparation for conversion to fixed-point.
Args: Args:
value (float): The input floating-point value. value (float): The input floating-point value.
Returns
float: The rounded value.
"""
# See this thread for how we ended up with this implementation:
# https://github.com/fonttools/fonttools/issues/1248#issuecomment-383198166
return int(math.floor(value + 0.5))
Returns
float: The rounded value.
"""
# See this thread for how we ended up with this implementation:
# https://github.com/fonttools/fonttools/issues/1248#issuecomment-383198166
return int(math.floor(value + 0.5))
def maybeRound(v, tolerance, round=otRound): def maybeRound(v, tolerance, round=otRound):
rounded = round(v) rounded = round(v)
return rounded if abs(rounded - v) <= tolerance else v return rounded if abs(rounded - v) <= tolerance else v
def roundFunc(tolerance, round=otRound): def roundFunc(tolerance, round=otRound):
if tolerance < 0: if tolerance < 0:
@ -52,7 +56,7 @@ def roundFunc(tolerance, round=otRound):
if tolerance == 0: if tolerance == 0:
return noRound return noRound
if tolerance >= .5: if tolerance >= 0.5:
return round return round
return functools.partial(maybeRound, tolerance=tolerance, round=round) return functools.partial(maybeRound, tolerance=tolerance, round=round)
@ -85,7 +89,7 @@ def nearestMultipleShortestRepr(value: float, factor: float) -> str:
return "0.0" return "0.0"
value = otRound(value / factor) * factor value = otRound(value / factor) * factor
eps = .5 * factor eps = 0.5 * factor
lo = value - eps lo = value - eps
hi = value + eps hi = value + eps
# If the range of valid choices spans an integer, return the integer. # If the range of valid choices spans an integer, return the integer.
@ -99,7 +103,7 @@ def nearestMultipleShortestRepr(value: float, factor: float) -> str:
for i in range(len(lo)): for i in range(len(lo)):
if lo[i] != hi[i]: if lo[i] != hi[i]:
break break
period = lo.find('.') period = lo.find(".")
assert period < i assert period < i
fmt = "%%.%df" % (i - period) fmt = "%%.%df" % (i - period)
return fmt % value return fmt % value

View File

@ -56,68 +56,72 @@ __copyright__ = "Copyright 1998, Just van Rossum <just@letterror.com>"
class Error(Exception): class Error(Exception):
pass pass
def pack(fmt, obj): def pack(fmt, obj):
formatstring, names, fixes = getformat(fmt, keep_pad_byte=True) formatstring, names, fixes = getformat(fmt, keep_pad_byte=True)
elements = [] elements = []
if not isinstance(obj, dict): if not isinstance(obj, dict):
obj = obj.__dict__ obj = obj.__dict__
for name in names: for name in names:
value = obj[name] value = obj[name]
if name in fixes: if name in fixes:
# fixed point conversion # fixed point conversion
value = fl2fi(value, fixes[name]) value = fl2fi(value, fixes[name])
elif isinstance(value, str): elif isinstance(value, str):
value = tobytes(value) value = tobytes(value)
elements.append(value) elements.append(value)
data = struct.pack(*(formatstring,) + tuple(elements)) data = struct.pack(*(formatstring,) + tuple(elements))
return data return data
def unpack(fmt, data, obj=None): def unpack(fmt, data, obj=None):
if obj is None: if obj is None:
obj = {} obj = {}
data = tobytes(data) data = tobytes(data)
formatstring, names, fixes = getformat(fmt) formatstring, names, fixes = getformat(fmt)
if isinstance(obj, dict): if isinstance(obj, dict):
d = obj d = obj
else: else:
d = obj.__dict__ d = obj.__dict__
elements = struct.unpack(formatstring, data) elements = struct.unpack(formatstring, data)
for i in range(len(names)): for i in range(len(names)):
name = names[i] name = names[i]
value = elements[i] value = elements[i]
if name in fixes: if name in fixes:
# fixed point conversion # fixed point conversion
value = fi2fl(value, fixes[name]) value = fi2fl(value, fixes[name])
elif isinstance(value, bytes): elif isinstance(value, bytes):
try: try:
value = tostr(value) value = tostr(value)
except UnicodeDecodeError: except UnicodeDecodeError:
pass pass
d[name] = value d[name] = value
return obj return obj
def unpack2(fmt, data, obj=None): def unpack2(fmt, data, obj=None):
length = calcsize(fmt) length = calcsize(fmt)
return unpack(fmt, data[:length], obj), data[length:] return unpack(fmt, data[:length], obj), data[length:]
def calcsize(fmt): def calcsize(fmt):
formatstring, names, fixes = getformat(fmt) formatstring, names, fixes = getformat(fmt)
return struct.calcsize(formatstring) return struct.calcsize(formatstring)
# matches "name:formatchar" (whitespace is allowed) # matches "name:formatchar" (whitespace is allowed)
_elementRE = re.compile( _elementRE = re.compile(
r"\s*" # whitespace r"\s*" # whitespace
r"([A-Za-z_][A-Za-z_0-9]*)" # name (python identifier) r"([A-Za-z_][A-Za-z_0-9]*)" # name (python identifier)
r"\s*:\s*" # whitespace : whitespace r"\s*:\s*" # whitespace : whitespace
r"([xcbB?hHiIlLqQfd]|" # formatchar... r"([xcbB?hHiIlLqQfd]|" # formatchar...
r"[0-9]+[ps]|" # ...formatchar... r"[0-9]+[ps]|" # ...formatchar...
r"([0-9]+)\.([0-9]+)(F))" # ...formatchar r"([0-9]+)\.([0-9]+)(F))" # ...formatchar
r"\s*" # whitespace r"\s*" # whitespace
r"(#.*)?$" # [comment] + end of string r"(#.*)?$" # [comment] + end of string
) )
# matches the special struct fmt chars and 'x' (pad byte) # matches the special struct fmt chars and 'x' (pad byte)
_extraRE = re.compile(r"\s*([x@=<>!])\s*(#.*)?$") _extraRE = re.compile(r"\s*([x@=<>!])\s*(#.*)?$")
@ -125,54 +129,53 @@ _extraRE = re.compile(r"\s*([x@=<>!])\s*(#.*)?$")
# matches an "empty" string, possibly containing whitespace and/or a comment # matches an "empty" string, possibly containing whitespace and/or a comment
_emptyRE = re.compile(r"\s*(#.*)?$") _emptyRE = re.compile(r"\s*(#.*)?$")
_fixedpointmappings = { _fixedpointmappings = {8: "b", 16: "h", 32: "l"}
8: "b",
16: "h",
32: "l"}
_formatcache = {} _formatcache = {}
def getformat(fmt, keep_pad_byte=False): def getformat(fmt, keep_pad_byte=False):
fmt = tostr(fmt, encoding="ascii") fmt = tostr(fmt, encoding="ascii")
try: try:
formatstring, names, fixes = _formatcache[fmt] formatstring, names, fixes = _formatcache[fmt]
except KeyError: except KeyError:
lines = re.split("[\n;]", fmt) lines = re.split("[\n;]", fmt)
formatstring = "" formatstring = ""
names = [] names = []
fixes = {} fixes = {}
for line in lines: for line in lines:
if _emptyRE.match(line): if _emptyRE.match(line):
continue continue
m = _extraRE.match(line) m = _extraRE.match(line)
if m: if m:
formatchar = m.group(1) formatchar = m.group(1)
if formatchar != 'x' and formatstring: if formatchar != "x" and formatstring:
raise Error("a special fmt char must be first") raise Error("a special fmt char must be first")
else: else:
m = _elementRE.match(line) m = _elementRE.match(line)
if not m: if not m:
raise Error("syntax error in fmt: '%s'" % line) raise Error("syntax error in fmt: '%s'" % line)
name = m.group(1) name = m.group(1)
formatchar = m.group(2) formatchar = m.group(2)
if keep_pad_byte or formatchar != "x": if keep_pad_byte or formatchar != "x":
names.append(name) names.append(name)
if m.group(3): if m.group(3):
# fixed point # fixed point
before = int(m.group(3)) before = int(m.group(3))
after = int(m.group(4)) after = int(m.group(4))
bits = before + after bits = before + after
if bits not in [8, 16, 32]: if bits not in [8, 16, 32]:
raise Error("fixed point must be 8, 16 or 32 bits long") raise Error("fixed point must be 8, 16 or 32 bits long")
formatchar = _fixedpointmappings[bits] formatchar = _fixedpointmappings[bits]
assert m.group(5) == "F" assert m.group(5) == "F"
fixes[name] = after fixes[name] = after
formatstring = formatstring + formatchar formatstring = formatstring + formatchar
_formatcache[fmt] = formatstring, names, fixes _formatcache[fmt] = formatstring, names, fixes
return formatstring, names, fixes return formatstring, names, fixes
def _test(): def _test():
fmt = """ fmt = """
# comments are allowed # comments are allowed
> # big endian (see documentation for struct) > # big endian (see documentation for struct)
# empty lines are allowed: # empty lines are allowed:
@ -188,29 +191,30 @@ def _test():
apad: x apad: x
""" """
print('size:', calcsize(fmt)) print("size:", calcsize(fmt))
class foo(object): class foo(object):
pass pass
i = foo() i = foo()
i.ashort = 0x7fff i.ashort = 0x7FFF
i.along = 0x7fffffff i.along = 0x7FFFFFFF
i.abyte = 0x7f i.abyte = 0x7F
i.achar = "a" i.achar = "a"
i.astr = "12345" i.astr = "12345"
i.afloat = 0.5 i.afloat = 0.5
i.adouble = 0.5 i.adouble = 0.5
i.afixed = 1.5 i.afixed = 1.5
i.abool = True i.abool = True
data = pack(fmt, i)
print("data:", repr(data))
print(unpack(fmt, data))
i2 = foo()
unpack(fmt, data, i2)
print(vars(i2))
data = pack(fmt, i)
print('data:', repr(data))
print(unpack(fmt, data))
i2 = foo()
unpack(fmt, data, i2)
print(vars(i2))
if __name__ == "__main__": if __name__ == "__main__":
_test() _test()

View File

@ -4,98 +4,104 @@ from itertools import count
import sympy as sp import sympy as sp
import sys import sys
n = 3 # Max Bezier degree; 3 for cubic, 2 for quadratic n = 3 # Max Bezier degree; 3 for cubic, 2 for quadratic
t, x, y = sp.symbols('t x y', real=True) t, x, y = sp.symbols("t x y", real=True)
c = sp.symbols('c', real=False) # Complex representation instead of x/y c = sp.symbols("c", real=False) # Complex representation instead of x/y
X = tuple(sp.symbols('x:%d'%(n+1), real=True)) X = tuple(sp.symbols("x:%d" % (n + 1), real=True))
Y = tuple(sp.symbols('y:%d'%(n+1), real=True)) Y = tuple(sp.symbols("y:%d" % (n + 1), real=True))
P = tuple(zip(*(sp.symbols('p:%d[%s]'%(n+1,w), real=True) for w in '01'))) P = tuple(zip(*(sp.symbols("p:%d[%s]" % (n + 1, w), real=True) for w in "01")))
C = tuple(sp.symbols('c:%d'%(n+1), real=False)) C = tuple(sp.symbols("c:%d" % (n + 1), real=False))
# Cubic Bernstein basis functions # Cubic Bernstein basis functions
BinomialCoefficient = [(1, 0)] BinomialCoefficient = [(1, 0)]
for i in range(1, n+1): for i in range(1, n + 1):
last = BinomialCoefficient[-1] last = BinomialCoefficient[-1]
this = tuple(last[j-1]+last[j] for j in range(len(last)))+(0,) this = tuple(last[j - 1] + last[j] for j in range(len(last))) + (0,)
BinomialCoefficient.append(this) BinomialCoefficient.append(this)
BinomialCoefficient = tuple(tuple(item[:-1]) for item in BinomialCoefficient) BinomialCoefficient = tuple(tuple(item[:-1]) for item in BinomialCoefficient)
del last, this del last, this
BernsteinPolynomial = tuple( BernsteinPolynomial = tuple(
tuple(c * t**i * (1-t)**(n-i) for i,c in enumerate(coeffs)) tuple(c * t**i * (1 - t) ** (n - i) for i, c in enumerate(coeffs))
for n,coeffs in enumerate(BinomialCoefficient)) for n, coeffs in enumerate(BinomialCoefficient)
)
BezierCurve = tuple( BezierCurve = tuple(
tuple(sum(P[i][j]*bernstein for i,bernstein in enumerate(bernsteins)) tuple(
for j in range(2)) sum(P[i][j] * bernstein for i, bernstein in enumerate(bernsteins))
for n,bernsteins in enumerate(BernsteinPolynomial)) for j in range(2)
)
for n, bernsteins in enumerate(BernsteinPolynomial)
)
BezierCurveC = tuple( BezierCurveC = tuple(
sum(C[i]*bernstein for i,bernstein in enumerate(bernsteins)) sum(C[i] * bernstein for i, bernstein in enumerate(bernsteins))
for n,bernsteins in enumerate(BernsteinPolynomial)) for n, bernsteins in enumerate(BernsteinPolynomial)
)
def green(f, curveXY): def green(f, curveXY):
f = -sp.integrate(sp.sympify(f), y) f = -sp.integrate(sp.sympify(f), y)
f = f.subs({x:curveXY[0], y:curveXY[1]}) f = f.subs({x: curveXY[0], y: curveXY[1]})
f = sp.integrate(f * sp.diff(curveXY[0], t), (t, 0, 1)) f = sp.integrate(f * sp.diff(curveXY[0], t), (t, 0, 1))
return f return f
class _BezierFuncsLazy(dict): class _BezierFuncsLazy(dict):
def __init__(self, symfunc):
self._symfunc = symfunc
self._bezfuncs = {}
def __init__(self, symfunc): def __missing__(self, i):
self._symfunc = symfunc args = ["p%d" % d for d in range(i + 1)]
self._bezfuncs = {} f = green(self._symfunc, BezierCurve[i])
f = sp.gcd_terms(f.collect(sum(P, ()))) # Optimize
return sp.lambdify(args, f)
def __missing__(self, i):
args = ['p%d'%d for d in range(i+1)]
f = green(self._symfunc, BezierCurve[i])
f = sp.gcd_terms(f.collect(sum(P,()))) # Optimize
return sp.lambdify(args, f)
class GreenPen(BasePen): class GreenPen(BasePen):
_BezierFuncs = {} _BezierFuncs = {}
@classmethod @classmethod
def _getGreenBezierFuncs(celf, func): def _getGreenBezierFuncs(celf, func):
funcstr = str(func) funcstr = str(func)
if not funcstr in celf._BezierFuncs: if not funcstr in celf._BezierFuncs:
celf._BezierFuncs[funcstr] = _BezierFuncsLazy(func) celf._BezierFuncs[funcstr] = _BezierFuncsLazy(func)
return celf._BezierFuncs[funcstr] return celf._BezierFuncs[funcstr]
def __init__(self, func, glyphset=None): def __init__(self, func, glyphset=None):
BasePen.__init__(self, glyphset) BasePen.__init__(self, glyphset)
self._funcs = self._getGreenBezierFuncs(func) self._funcs = self._getGreenBezierFuncs(func)
self.value = 0 self.value = 0
def _moveTo(self, p0): def _moveTo(self, p0):
self.__startPoint = p0 self.__startPoint = p0
def _closePath(self): def _closePath(self):
p0 = self._getCurrentPoint() p0 = self._getCurrentPoint()
if p0 != self.__startPoint: if p0 != self.__startPoint:
self._lineTo(self.__startPoint) self._lineTo(self.__startPoint)
def _endPath(self): def _endPath(self):
p0 = self._getCurrentPoint() p0 = self._getCurrentPoint()
if p0 != self.__startPoint: if p0 != self.__startPoint:
# Green theorem is not defined on open contours. # Green theorem is not defined on open contours.
raise NotImplementedError raise NotImplementedError
def _lineTo(self, p1): def _lineTo(self, p1):
p0 = self._getCurrentPoint() p0 = self._getCurrentPoint()
self.value += self._funcs[1](p0, p1) self.value += self._funcs[1](p0, p1)
def _qCurveToOne(self, p1, p2): def _qCurveToOne(self, p1, p2):
p0 = self._getCurrentPoint() p0 = self._getCurrentPoint()
self.value += self._funcs[2](p0, p1, p2) self.value += self._funcs[2](p0, p1, p2)
def _curveToOne(self, p1, p2, p3):
p0 = self._getCurrentPoint()
self.value += self._funcs[3](p0, p1, p2, p3)
def _curveToOne(self, p1, p2, p3):
p0 = self._getCurrentPoint()
self.value += self._funcs[3](p0, p1, p2, p3)
# Sample pens. # Sample pens.
# Do not use this in real code. # Do not use this in real code.
@ -103,29 +109,26 @@ class GreenPen(BasePen):
AreaPen = partial(GreenPen, func=1) AreaPen = partial(GreenPen, func=1)
MomentXPen = partial(GreenPen, func=x) MomentXPen = partial(GreenPen, func=x)
MomentYPen = partial(GreenPen, func=y) MomentYPen = partial(GreenPen, func=y)
MomentXXPen = partial(GreenPen, func=x*x) MomentXXPen = partial(GreenPen, func=x * x)
MomentYYPen = partial(GreenPen, func=y*y) MomentYYPen = partial(GreenPen, func=y * y)
MomentXYPen = partial(GreenPen, func=x*y) MomentXYPen = partial(GreenPen, func=x * y)
def printGreenPen(penName, funcs, file=sys.stdout, docstring=None): def printGreenPen(penName, funcs, file=sys.stdout, docstring=None):
if docstring is not None: if docstring is not None:
print('"""%s"""' % docstring) print('"""%s"""' % docstring)
print( print(
'''from fontTools.pens.basePen import BasePen, OpenContourError """from fontTools.pens.basePen import BasePen, OpenContourError
try: try:
import cython import cython
except ImportError:
COMPILED = cython.compiled
except (AttributeError, ImportError):
# if cython not installed, use mock module with no-op decorators and types # if cython not installed, use mock module with no-op decorators and types
from fontTools.misc import cython from fontTools.misc import cython
if cython.compiled:
# Yep, I'm compiled.
COMPILED = True
else:
# Just a lowly interpreted script.
COMPILED = False COMPILED = False
@ -135,10 +138,14 @@ class %s(BasePen):
def __init__(self, glyphset=None): def __init__(self, glyphset=None):
BasePen.__init__(self, glyphset) BasePen.__init__(self, glyphset)
'''% (penName, penName), file=file) """
for name,f in funcs: % (penName, penName),
print(' self.%s = 0' % name, file=file) file=file,
print(''' )
for name, f in funcs:
print(" self.%s = 0" % name, file=file)
print(
"""
def _moveTo(self, p0): def _moveTo(self, p0):
self.__startPoint = p0 self.__startPoint = p0
@ -154,32 +161,40 @@ class %s(BasePen):
raise OpenContourError( raise OpenContourError(
"Green theorem is not defined on open contours." "Green theorem is not defined on open contours."
) )
''', end='', file=file) """,
end="",
file=file,
)
for n in (1, 2, 3): for n in (1, 2, 3):
subs = {P[i][j]: [X, Y][j][i] for i in range(n + 1) for j in range(2)}
greens = [green(f, BezierCurve[n]) for name, f in funcs]
greens = [sp.gcd_terms(f.collect(sum(P, ()))) for f in greens] # Optimize
greens = [f.subs(subs) for f in greens] # Convert to p to x/y
defs, exprs = sp.cse(
greens,
optimizations="basic",
symbols=(sp.Symbol("r%d" % i) for i in count()),
)
subs = {P[i][j]: [X, Y][j][i] for i in range(n+1) for j in range(2)} print()
greens = [green(f, BezierCurve[n]) for name,f in funcs] for name, value in defs:
greens = [sp.gcd_terms(f.collect(sum(P,()))) for f in greens] # Optimize print(" @cython.locals(%s=cython.double)" % name, file=file)
greens = [f.subs(subs) for f in greens] # Convert to p to x/y if n == 1:
defs, exprs = sp.cse(greens, print(
optimizations='basic', """\
symbols=(sp.Symbol('r%d'%i) for i in count()))
print()
for name,value in defs:
print(' @cython.locals(%s=cython.double)' % name, file=file)
if n == 1:
print('''\
@cython.locals(x0=cython.double, y0=cython.double) @cython.locals(x0=cython.double, y0=cython.double)
@cython.locals(x1=cython.double, y1=cython.double) @cython.locals(x1=cython.double, y1=cython.double)
def _lineTo(self, p1): def _lineTo(self, p1):
x0,y0 = self._getCurrentPoint() x0,y0 = self._getCurrentPoint()
x1,y1 = p1 x1,y1 = p1
''', file=file) """,
elif n == 2: file=file,
print('''\ )
elif n == 2:
print(
"""\
@cython.locals(x0=cython.double, y0=cython.double) @cython.locals(x0=cython.double, y0=cython.double)
@cython.locals(x1=cython.double, y1=cython.double) @cython.locals(x1=cython.double, y1=cython.double)
@cython.locals(x2=cython.double, y2=cython.double) @cython.locals(x2=cython.double, y2=cython.double)
@ -187,9 +202,12 @@ class %s(BasePen):
x0,y0 = self._getCurrentPoint() x0,y0 = self._getCurrentPoint()
x1,y1 = p1 x1,y1 = p1
x2,y2 = p2 x2,y2 = p2
''', file=file) """,
elif n == 3: file=file,
print('''\ )
elif n == 3:
print(
"""\
@cython.locals(x0=cython.double, y0=cython.double) @cython.locals(x0=cython.double, y0=cython.double)
@cython.locals(x1=cython.double, y1=cython.double) @cython.locals(x1=cython.double, y1=cython.double)
@cython.locals(x2=cython.double, y2=cython.double) @cython.locals(x2=cython.double, y2=cython.double)
@ -199,29 +217,35 @@ class %s(BasePen):
x1,y1 = p1 x1,y1 = p1
x2,y2 = p2 x2,y2 = p2
x3,y3 = p3 x3,y3 = p3
''', file=file) """,
for name,value in defs: file=file,
print(' %s = %s' % (name, value), file=file) )
for name, value in defs:
print(" %s = %s" % (name, value), file=file)
print(file=file) print(file=file)
for name,value in zip([f[0] for f in funcs], exprs): for name, value in zip([f[0] for f in funcs], exprs):
print(' self.%s += %s' % (name, value), file=file) print(" self.%s += %s" % (name, value), file=file)
print(''' print(
"""
if __name__ == '__main__': if __name__ == '__main__':
from fontTools.misc.symfont import x, y, printGreenPen from fontTools.misc.symfont import x, y, printGreenPen
printGreenPen('%s', ['''%penName, file=file) printGreenPen('%s', ["""
for name,f in funcs: % penName,
print(" ('%s', %s)," % (name, str(f)), file=file) file=file,
print(' ])', file=file) )
for name, f in funcs:
print(" ('%s', %s)," % (name, str(f)), file=file)
print(" ])", file=file)
if __name__ == '__main__': if __name__ == "__main__":
pen = AreaPen() pen = AreaPen()
pen.moveTo((100,100)) pen.moveTo((100, 100))
pen.lineTo((100,200)) pen.lineTo((100, 200))
pen.lineTo((200,200)) pen.lineTo((200, 200))
pen.curveTo((200,250),(300,300),(250,350)) pen.curveTo((200, 250), (300, 300), (250, 350))
pen.lineTo((200,100)) pen.lineTo((200, 100))
pen.closePath() pen.closePath()
print(pen.value) print(pen.value)

View File

@ -29,12 +29,14 @@ def parseXML(xmlSnippet):
if isinstance(xmlSnippet, bytes): if isinstance(xmlSnippet, bytes):
xml += xmlSnippet xml += xmlSnippet
elif isinstance(xmlSnippet, str): elif isinstance(xmlSnippet, str):
xml += tobytes(xmlSnippet, 'utf-8') xml += tobytes(xmlSnippet, "utf-8")
elif isinstance(xmlSnippet, Iterable): elif isinstance(xmlSnippet, Iterable):
xml += b"".join(tobytes(s, 'utf-8') for s in xmlSnippet) xml += b"".join(tobytes(s, "utf-8") for s in xmlSnippet)
else: else:
raise TypeError("expected string or sequence of strings; found %r" raise TypeError(
% type(xmlSnippet).__name__) "expected string or sequence of strings; found %r"
% type(xmlSnippet).__name__
)
xml += b"</root>" xml += b"</root>"
reader.parser.Parse(xml, 0) reader.parser.Parse(xml, 0)
return reader.root[2] return reader.root[2]
@ -76,6 +78,7 @@ class FakeFont:
return self.glyphOrder_[glyphID] return self.glyphOrder_[glyphID]
else: else:
return "glyph%.5d" % glyphID return "glyph%.5d" % glyphID
def getGlyphNameMany(self, lst): def getGlyphNameMany(self, lst):
return [self.getGlyphName(gid) for gid in lst] return [self.getGlyphName(gid) for gid in lst]
@ -92,6 +95,7 @@ class FakeFont:
class TestXMLReader_(object): class TestXMLReader_(object):
def __init__(self): def __init__(self):
from xml.parsers.expat import ParserCreate from xml.parsers.expat import ParserCreate
self.parser = ParserCreate() self.parser = ParserCreate()
self.parser.StartElementHandler = self.startElement_ self.parser.StartElementHandler = self.startElement_
self.parser.EndElementHandler = self.endElement_ self.parser.EndElementHandler = self.endElement_
@ -114,7 +118,7 @@ class TestXMLReader_(object):
self.stack[-1][2].append(data) self.stack[-1][2].append(data)
def makeXMLWriter(newlinestr='\n'): def makeXMLWriter(newlinestr="\n"):
# don't write OS-specific new lines # don't write OS-specific new lines
writer = XMLWriter(BytesIO(), newlinestr=newlinestr) writer = XMLWriter(BytesIO(), newlinestr=newlinestr)
# erase XML declaration # erase XML declaration
@ -166,7 +170,7 @@ class MockFont(object):
to its glyphOrder.""" to its glyphOrder."""
def __init__(self): def __init__(self):
self._glyphOrder = ['.notdef'] self._glyphOrder = [".notdef"]
class AllocatingDict(dict): class AllocatingDict(dict):
def __missing__(reverseDict, key): def __missing__(reverseDict, key):
@ -174,7 +178,8 @@ class MockFont(object):
gid = len(reverseDict) gid = len(reverseDict)
reverseDict[key] = gid reverseDict[key] = gid
return gid return gid
self._reverseGlyphOrder = AllocatingDict({'.notdef': 0})
self._reverseGlyphOrder = AllocatingDict({".notdef": 0})
self.lazy = False self.lazy = False
def getGlyphID(self, glyph): def getGlyphID(self, glyph):
@ -192,7 +197,6 @@ class MockFont(object):
class TestCase(_TestCase): class TestCase(_TestCase):
def __init__(self, methodName): def __init__(self, methodName):
_TestCase.__init__(self, methodName) _TestCase.__init__(self, methodName)
# Python 3 renamed assertRaisesRegexp to assertRaisesRegex, # Python 3 renamed assertRaisesRegexp to assertRaisesRegex,
@ -202,7 +206,6 @@ class TestCase(_TestCase):
class DataFilesHandler(TestCase): class DataFilesHandler(TestCase):
def setUp(self): def setUp(self):
self.tempdir = None self.tempdir = None
self.num_tempfiles = 0 self.num_tempfiles = 0

View File

@ -33,90 +33,90 @@ class Tag(str):
def readHex(content): def readHex(content):
"""Convert a list of hex strings to binary data.""" """Convert a list of hex strings to binary data."""
return deHexStr(strjoin(chunk for chunk in content if isinstance(chunk, str))) return deHexStr(strjoin(chunk for chunk in content if isinstance(chunk, str)))
def deHexStr(hexdata): def deHexStr(hexdata):
"""Convert a hex string to binary data.""" """Convert a hex string to binary data."""
hexdata = strjoin(hexdata.split()) hexdata = strjoin(hexdata.split())
if len(hexdata) % 2: if len(hexdata) % 2:
hexdata = hexdata + "0" hexdata = hexdata + "0"
data = [] data = []
for i in range(0, len(hexdata), 2): for i in range(0, len(hexdata), 2):
data.append(bytechr(int(hexdata[i:i+2], 16))) data.append(bytechr(int(hexdata[i : i + 2], 16)))
return bytesjoin(data) return bytesjoin(data)
def hexStr(data): def hexStr(data):
"""Convert binary data to a hex string.""" """Convert binary data to a hex string."""
h = string.hexdigits h = string.hexdigits
r = '' r = ""
for c in data: for c in data:
i = byteord(c) i = byteord(c)
r = r + h[(i >> 4) & 0xF] + h[i & 0xF] r = r + h[(i >> 4) & 0xF] + h[i & 0xF]
return r return r
def num2binary(l, bits=32): def num2binary(l, bits=32):
items = [] items = []
binary = "" binary = ""
for i in range(bits): for i in range(bits):
if l & 0x1: if l & 0x1:
binary = "1" + binary binary = "1" + binary
else: else:
binary = "0" + binary binary = "0" + binary
l = l >> 1 l = l >> 1
if not ((i+1) % 8): if not ((i + 1) % 8):
items.append(binary) items.append(binary)
binary = "" binary = ""
if binary: if binary:
items.append(binary) items.append(binary)
items.reverse() items.reverse()
assert l in (0, -1), "number doesn't fit in number of bits" assert l in (0, -1), "number doesn't fit in number of bits"
return ' '.join(items) return " ".join(items)
def binary2num(bin): def binary2num(bin):
bin = strjoin(bin.split()) bin = strjoin(bin.split())
l = 0 l = 0
for digit in bin: for digit in bin:
l = l << 1 l = l << 1
if digit != "0": if digit != "0":
l = l | 0x1 l = l | 0x1
return l return l
def caselessSort(alist): def caselessSort(alist):
"""Return a sorted copy of a list. If there are only strings """Return a sorted copy of a list. If there are only strings
in the list, it will not consider case. in the list, it will not consider case.
""" """
try: try:
return sorted(alist, key=lambda a: (a.lower(), a)) return sorted(alist, key=lambda a: (a.lower(), a))
except TypeError: except TypeError:
return sorted(alist) return sorted(alist)
def pad(data, size): def pad(data, size):
r""" Pad byte string 'data' with null bytes until its length is a r"""Pad byte string 'data' with null bytes until its length is a
multiple of 'size'. multiple of 'size'.
>>> len(pad(b'abcd', 4)) >>> len(pad(b'abcd', 4))
4 4
>>> len(pad(b'abcde', 2)) >>> len(pad(b'abcde', 2))
6 6
>>> len(pad(b'abcde', 4)) >>> len(pad(b'abcde', 4))
8 8
>>> pad(b'abcdef', 4) == b'abcdef\x00\x00' >>> pad(b'abcdef', 4) == b'abcdef\x00\x00'
True True
""" """
data = tobytes(data) data = tobytes(data)
if size > 1: if size > 1:
remainder = len(data) % size remainder = len(data) % size
if remainder: if remainder:
data += b"\0" * (size - remainder) data += b"\0" * (size - remainder)
return data return data
def tostr(s, encoding="ascii", errors="strict"): def tostr(s, encoding="ascii", errors="strict"):
@ -150,5 +150,6 @@ def bytesjoin(iterable, joiner=b""):
if __name__ == "__main__": if __name__ == "__main__":
import doctest, sys import doctest, sys
sys.exit(doctest.testmod().failed)
sys.exit(doctest.testmod().failed)

View File

@ -10,59 +10,79 @@ import calendar
epoch_diff = calendar.timegm((1904, 1, 1, 0, 0, 0, 0, 0, 0)) epoch_diff = calendar.timegm((1904, 1, 1, 0, 0, 0, 0, 0, 0))
DAYNAMES = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"] DAYNAMES = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
MONTHNAMES = [None, "Jan", "Feb", "Mar", "Apr", "May", "Jun", MONTHNAMES = [
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"] None,
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
]
def asctime(t=None): def asctime(t=None):
""" """
Convert a tuple or struct_time representing a time as returned by gmtime() Convert a tuple or struct_time representing a time as returned by gmtime()
or localtime() to a 24-character string of the following form: or localtime() to a 24-character string of the following form:
>>> asctime(time.gmtime(0)) >>> asctime(time.gmtime(0))
'Thu Jan 1 00:00:00 1970' 'Thu Jan 1 00:00:00 1970'
If t is not provided, the current time as returned by localtime() is used. If t is not provided, the current time as returned by localtime() is used.
Locale information is not used by asctime(). Locale information is not used by asctime().
This is meant to normalise the output of the built-in time.asctime() across This is meant to normalise the output of the built-in time.asctime() across
different platforms and Python versions. different platforms and Python versions.
In Python 3.x, the day of the month is right-justified, whereas on Windows In Python 3.x, the day of the month is right-justified, whereas on Windows
Python 2.7 it is padded with zeros. Python 2.7 it is padded with zeros.
See https://github.com/fonttools/fonttools/issues/455 See https://github.com/fonttools/fonttools/issues/455
""" """
if t is None: if t is None:
t = time.localtime() t = time.localtime()
s = "%s %s %2s %s" % ( s = "%s %s %2s %s" % (
DAYNAMES[t.tm_wday], MONTHNAMES[t.tm_mon], t.tm_mday, DAYNAMES[t.tm_wday],
time.strftime("%H:%M:%S %Y", t)) MONTHNAMES[t.tm_mon],
return s t.tm_mday,
time.strftime("%H:%M:%S %Y", t),
)
return s
def timestampToString(value): def timestampToString(value):
return asctime(time.gmtime(max(0, value + epoch_diff))) return asctime(time.gmtime(max(0, value + epoch_diff)))
def timestampFromString(value): def timestampFromString(value):
wkday, mnth = value[:7].split() wkday, mnth = value[:7].split()
t = datetime.strptime(value[7:], ' %d %H:%M:%S %Y') t = datetime.strptime(value[7:], " %d %H:%M:%S %Y")
t = t.replace(month=MONTHNAMES.index(mnth), tzinfo=timezone.utc) t = t.replace(month=MONTHNAMES.index(mnth), tzinfo=timezone.utc)
wkday_idx = DAYNAMES.index(wkday) wkday_idx = DAYNAMES.index(wkday)
assert t.weekday() == wkday_idx, '"' + value + '" has inconsistent weekday' assert t.weekday() == wkday_idx, '"' + value + '" has inconsistent weekday'
return int(t.timestamp()) - epoch_diff return int(t.timestamp()) - epoch_diff
def timestampNow(): def timestampNow():
# https://reproducible-builds.org/specs/source-date-epoch/ # https://reproducible-builds.org/specs/source-date-epoch/
source_date_epoch = os.environ.get("SOURCE_DATE_EPOCH") source_date_epoch = os.environ.get("SOURCE_DATE_EPOCH")
if source_date_epoch is not None: if source_date_epoch is not None:
return int(source_date_epoch) - epoch_diff return int(source_date_epoch) - epoch_diff
return int(time.time() - epoch_diff) return int(time.time() - epoch_diff)
def timestampSinceEpoch(value): def timestampSinceEpoch(value):
return int(value - epoch_diff) return int(value - epoch_diff)
if __name__ == "__main__": if __name__ == "__main__":
import sys import sys
import doctest import doctest
sys.exit(doctest.testmod().failed)
sys.exit(doctest.testmod().failed)

View File

@ -19,6 +19,9 @@ Offset
Scale Scale
Convenience function that returns a scaling transformation Convenience function that returns a scaling transformation
The DecomposedTransform class implements a transformation with separate
translate, rotation, scale, skew, and transformation-center components.
:Example: :Example:
>>> t = Transform(2, 0, 0, 3, 0, 0) >>> t = Transform(2, 0, 0, 3, 0, 0)
@ -49,10 +52,12 @@ Scale
>>> >>>
""" """
import math
from typing import NamedTuple from typing import NamedTuple
from dataclasses import dataclass
__all__ = ["Transform", "Identity", "Offset", "Scale"] __all__ = ["Transform", "Identity", "Offset", "Scale", "DecomposedTransform"]
_EPSILON = 1e-15 _EPSILON = 1e-15
@ -61,338 +66,430 @@ _MINUS_ONE_EPSILON = -1 + _EPSILON
def _normSinCos(v): def _normSinCos(v):
if abs(v) < _EPSILON: if abs(v) < _EPSILON:
v = 0 v = 0
elif v > _ONE_EPSILON: elif v > _ONE_EPSILON:
v = 1 v = 1
elif v < _MINUS_ONE_EPSILON: elif v < _MINUS_ONE_EPSILON:
v = -1 v = -1
return v return v
class Transform(NamedTuple): class Transform(NamedTuple):
"""2x2 transformation matrix plus offset, a.k.a. Affine transform. """2x2 transformation matrix plus offset, a.k.a. Affine transform.
Transform instances are immutable: all transforming methods, eg. Transform instances are immutable: all transforming methods, eg.
rotate(), return a new Transform instance. rotate(), return a new Transform instance.
:Example: :Example:
>>> t = Transform() >>> t = Transform()
>>> t >>> t
<Transform [1 0 0 1 0 0]> <Transform [1 0 0 1 0 0]>
>>> t.scale(2) >>> t.scale(2)
<Transform [2 0 0 2 0 0]> <Transform [2 0 0 2 0 0]>
>>> t.scale(2.5, 5.5) >>> t.scale(2.5, 5.5)
<Transform [2.5 0 0 5.5 0 0]> <Transform [2.5 0 0 5.5 0 0]>
>>> >>>
>>> t.scale(2, 3).transformPoint((100, 100)) >>> t.scale(2, 3).transformPoint((100, 100))
(200, 300) (200, 300)
Transform's constructor takes six arguments, all of which are Transform's constructor takes six arguments, all of which are
optional, and can be used as keyword arguments:: optional, and can be used as keyword arguments::
>>> Transform(12) >>> Transform(12)
<Transform [12 0 0 1 0 0]> <Transform [12 0 0 1 0 0]>
>>> Transform(dx=12) >>> Transform(dx=12)
<Transform [1 0 0 1 12 0]> <Transform [1 0 0 1 12 0]>
>>> Transform(yx=12) >>> Transform(yx=12)
<Transform [1 0 12 1 0 0]> <Transform [1 0 12 1 0 0]>
Transform instances also behave like sequences of length 6:: Transform instances also behave like sequences of length 6::
>>> len(Identity) >>> len(Identity)
6 6
>>> list(Identity) >>> list(Identity)
[1, 0, 0, 1, 0, 0] [1, 0, 0, 1, 0, 0]
>>> tuple(Identity) >>> tuple(Identity)
(1, 0, 0, 1, 0, 0) (1, 0, 0, 1, 0, 0)
Transform instances are comparable:: Transform instances are comparable::
>>> t1 = Identity.scale(2, 3).translate(4, 6) >>> t1 = Identity.scale(2, 3).translate(4, 6)
>>> t2 = Identity.translate(8, 18).scale(2, 3) >>> t2 = Identity.translate(8, 18).scale(2, 3)
>>> t1 == t2 >>> t1 == t2
1 1
But beware of floating point rounding errors:: But beware of floating point rounding errors::
>>> t1 = Identity.scale(0.2, 0.3).translate(0.4, 0.6) >>> t1 = Identity.scale(0.2, 0.3).translate(0.4, 0.6)
>>> t2 = Identity.translate(0.08, 0.18).scale(0.2, 0.3) >>> t2 = Identity.translate(0.08, 0.18).scale(0.2, 0.3)
>>> t1 >>> t1
<Transform [0.2 0 0 0.3 0.08 0.18]> <Transform [0.2 0 0 0.3 0.08 0.18]>
>>> t2 >>> t2
<Transform [0.2 0 0 0.3 0.08 0.18]> <Transform [0.2 0 0 0.3 0.08 0.18]>
>>> t1 == t2 >>> t1 == t2
0 0
Transform instances are hashable, meaning you can use them as Transform instances are hashable, meaning you can use them as
keys in dictionaries:: keys in dictionaries::
>>> d = {Scale(12, 13): None} >>> d = {Scale(12, 13): None}
>>> d >>> d
{<Transform [12 0 0 13 0 0]>: None} {<Transform [12 0 0 13 0 0]>: None}
But again, beware of floating point rounding errors:: But again, beware of floating point rounding errors::
>>> t1 = Identity.scale(0.2, 0.3).translate(0.4, 0.6) >>> t1 = Identity.scale(0.2, 0.3).translate(0.4, 0.6)
>>> t2 = Identity.translate(0.08, 0.18).scale(0.2, 0.3) >>> t2 = Identity.translate(0.08, 0.18).scale(0.2, 0.3)
>>> t1 >>> t1
<Transform [0.2 0 0 0.3 0.08 0.18]> <Transform [0.2 0 0 0.3 0.08 0.18]>
>>> t2 >>> t2
<Transform [0.2 0 0 0.3 0.08 0.18]> <Transform [0.2 0 0 0.3 0.08 0.18]>
>>> d = {t1: None} >>> d = {t1: None}
>>> d >>> d
{<Transform [0.2 0 0 0.3 0.08 0.18]>: None} {<Transform [0.2 0 0 0.3 0.08 0.18]>: None}
>>> d[t2] >>> d[t2]
Traceback (most recent call last): Traceback (most recent call last):
File "<stdin>", line 1, in ? File "<stdin>", line 1, in ?
KeyError: <Transform [0.2 0 0 0.3 0.08 0.18]> KeyError: <Transform [0.2 0 0 0.3 0.08 0.18]>
""" """
xx: float = 1 xx: float = 1
xy: float = 0 xy: float = 0
yx: float = 0 yx: float = 0
yy: float = 1 yy: float = 1
dx: float = 0 dx: float = 0
dy: float = 0 dy: float = 0
def transformPoint(self, p): def transformPoint(self, p):
"""Transform a point. """Transform a point.
:Example: :Example:
>>> t = Transform() >>> t = Transform()
>>> t = t.scale(2.5, 5.5) >>> t = t.scale(2.5, 5.5)
>>> t.transformPoint((100, 100)) >>> t.transformPoint((100, 100))
(250.0, 550.0) (250.0, 550.0)
""" """
(x, y) = p (x, y) = p
xx, xy, yx, yy, dx, dy = self xx, xy, yx, yy, dx, dy = self
return (xx*x + yx*y + dx, xy*x + yy*y + dy) return (xx * x + yx * y + dx, xy * x + yy * y + dy)
def transformPoints(self, points): def transformPoints(self, points):
"""Transform a list of points. """Transform a list of points.
:Example: :Example:
>>> t = Scale(2, 3) >>> t = Scale(2, 3)
>>> t.transformPoints([(0, 0), (0, 100), (100, 100), (100, 0)]) >>> t.transformPoints([(0, 0), (0, 100), (100, 100), (100, 0)])
[(0, 0), (0, 300), (200, 300), (200, 0)] [(0, 0), (0, 300), (200, 300), (200, 0)]
>>> >>>
""" """
xx, xy, yx, yy, dx, dy = self xx, xy, yx, yy, dx, dy = self
return [(xx*x + yx*y + dx, xy*x + yy*y + dy) for x, y in points] return [(xx * x + yx * y + dx, xy * x + yy * y + dy) for x, y in points]
def transformVector(self, v): def transformVector(self, v):
"""Transform an (dx, dy) vector, treating translation as zero. """Transform an (dx, dy) vector, treating translation as zero.
:Example: :Example:
>>> t = Transform(2, 0, 0, 2, 10, 20) >>> t = Transform(2, 0, 0, 2, 10, 20)
>>> t.transformVector((3, -4)) >>> t.transformVector((3, -4))
(6, -8) (6, -8)
>>> >>>
""" """
(dx, dy) = v (dx, dy) = v
xx, xy, yx, yy = self[:4] xx, xy, yx, yy = self[:4]
return (xx*dx + yx*dy, xy*dx + yy*dy) return (xx * dx + yx * dy, xy * dx + yy * dy)
def transformVectors(self, vectors): def transformVectors(self, vectors):
"""Transform a list of (dx, dy) vector, treating translation as zero. """Transform a list of (dx, dy) vector, treating translation as zero.
:Example: :Example:
>>> t = Transform(2, 0, 0, 2, 10, 20) >>> t = Transform(2, 0, 0, 2, 10, 20)
>>> t.transformVectors([(3, -4), (5, -6)]) >>> t.transformVectors([(3, -4), (5, -6)])
[(6, -8), (10, -12)] [(6, -8), (10, -12)]
>>> >>>
""" """
xx, xy, yx, yy = self[:4] xx, xy, yx, yy = self[:4]
return [(xx*dx + yx*dy, xy*dx + yy*dy) for dx, dy in vectors] return [(xx * dx + yx * dy, xy * dx + yy * dy) for dx, dy in vectors]
def translate(self, x=0, y=0): def translate(self, x=0, y=0):
"""Return a new transformation, translated (offset) by x, y. """Return a new transformation, translated (offset) by x, y.
:Example: :Example:
>>> t = Transform() >>> t = Transform()
>>> t.translate(20, 30) >>> t.translate(20, 30)
<Transform [1 0 0 1 20 30]> <Transform [1 0 0 1 20 30]>
>>> >>>
""" """
return self.transform((1, 0, 0, 1, x, y)) return self.transform((1, 0, 0, 1, x, y))
def scale(self, x=1, y=None): def scale(self, x=1, y=None):
"""Return a new transformation, scaled by x, y. The 'y' argument """Return a new transformation, scaled by x, y. The 'y' argument
may be None, which implies to use the x value for y as well. may be None, which implies to use the x value for y as well.
:Example: :Example:
>>> t = Transform() >>> t = Transform()
>>> t.scale(5) >>> t.scale(5)
<Transform [5 0 0 5 0 0]> <Transform [5 0 0 5 0 0]>
>>> t.scale(5, 6) >>> t.scale(5, 6)
<Transform [5 0 0 6 0 0]> <Transform [5 0 0 6 0 0]>
>>> >>>
""" """
if y is None: if y is None:
y = x y = x
return self.transform((x, 0, 0, y, 0, 0)) return self.transform((x, 0, 0, y, 0, 0))
def rotate(self, angle): def rotate(self, angle):
"""Return a new transformation, rotated by 'angle' (radians). """Return a new transformation, rotated by 'angle' (radians).
:Example: :Example:
>>> import math >>> import math
>>> t = Transform() >>> t = Transform()
>>> t.rotate(math.pi / 2) >>> t.rotate(math.pi / 2)
<Transform [0 1 -1 0 0 0]> <Transform [0 1 -1 0 0 0]>
>>> >>>
""" """
import math import math
c = _normSinCos(math.cos(angle))
s = _normSinCos(math.sin(angle))
return self.transform((c, s, -s, c, 0, 0))
def skew(self, x=0, y=0): c = _normSinCos(math.cos(angle))
"""Return a new transformation, skewed by x and y. s = _normSinCos(math.sin(angle))
return self.transform((c, s, -s, c, 0, 0))
:Example: def skew(self, x=0, y=0):
>>> import math """Return a new transformation, skewed by x and y.
>>> t = Transform()
>>> t.skew(math.pi / 4)
<Transform [1 0 1 1 0 0]>
>>>
"""
import math
return self.transform((1, math.tan(y), math.tan(x), 1, 0, 0))
def transform(self, other): :Example:
"""Return a new transformation, transformed by another >>> import math
transformation. >>> t = Transform()
>>> t.skew(math.pi / 4)
<Transform [1 0 1 1 0 0]>
>>>
"""
import math
:Example: return self.transform((1, math.tan(y), math.tan(x), 1, 0, 0))
>>> t = Transform(2, 0, 0, 3, 1, 6)
>>> t.transform((4, 3, 2, 1, 5, 6))
<Transform [8 9 4 3 11 24]>
>>>
"""
xx1, xy1, yx1, yy1, dx1, dy1 = other
xx2, xy2, yx2, yy2, dx2, dy2 = self
return self.__class__(
xx1*xx2 + xy1*yx2,
xx1*xy2 + xy1*yy2,
yx1*xx2 + yy1*yx2,
yx1*xy2 + yy1*yy2,
xx2*dx1 + yx2*dy1 + dx2,
xy2*dx1 + yy2*dy1 + dy2)
def reverseTransform(self, other): def transform(self, other):
"""Return a new transformation, which is the other transformation """Return a new transformation, transformed by another
transformed by self. self.reverseTransform(other) is equivalent to transformation.
other.transform(self).
:Example: :Example:
>>> t = Transform(2, 0, 0, 3, 1, 6) >>> t = Transform(2, 0, 0, 3, 1, 6)
>>> t.reverseTransform((4, 3, 2, 1, 5, 6)) >>> t.transform((4, 3, 2, 1, 5, 6))
<Transform [8 6 6 3 21 15]> <Transform [8 9 4 3 11 24]>
>>> Transform(4, 3, 2, 1, 5, 6).transform((2, 0, 0, 3, 1, 6)) >>>
<Transform [8 6 6 3 21 15]> """
>>> xx1, xy1, yx1, yy1, dx1, dy1 = other
""" xx2, xy2, yx2, yy2, dx2, dy2 = self
xx1, xy1, yx1, yy1, dx1, dy1 = self return self.__class__(
xx2, xy2, yx2, yy2, dx2, dy2 = other xx1 * xx2 + xy1 * yx2,
return self.__class__( xx1 * xy2 + xy1 * yy2,
xx1*xx2 + xy1*yx2, yx1 * xx2 + yy1 * yx2,
xx1*xy2 + xy1*yy2, yx1 * xy2 + yy1 * yy2,
yx1*xx2 + yy1*yx2, xx2 * dx1 + yx2 * dy1 + dx2,
yx1*xy2 + yy1*yy2, xy2 * dx1 + yy2 * dy1 + dy2,
xx2*dx1 + yx2*dy1 + dx2, )
xy2*dx1 + yy2*dy1 + dy2)
def inverse(self): def reverseTransform(self, other):
"""Return the inverse transformation. """Return a new transformation, which is the other transformation
transformed by self. self.reverseTransform(other) is equivalent to
other.transform(self).
:Example: :Example:
>>> t = Identity.translate(2, 3).scale(4, 5) >>> t = Transform(2, 0, 0, 3, 1, 6)
>>> t.transformPoint((10, 20)) >>> t.reverseTransform((4, 3, 2, 1, 5, 6))
(42, 103) <Transform [8 6 6 3 21 15]>
>>> it = t.inverse() >>> Transform(4, 3, 2, 1, 5, 6).transform((2, 0, 0, 3, 1, 6))
>>> it.transformPoint((42, 103)) <Transform [8 6 6 3 21 15]>
(10.0, 20.0) >>>
>>> """
""" xx1, xy1, yx1, yy1, dx1, dy1 = self
if self == Identity: xx2, xy2, yx2, yy2, dx2, dy2 = other
return self return self.__class__(
xx, xy, yx, yy, dx, dy = self xx1 * xx2 + xy1 * yx2,
det = xx*yy - yx*xy xx1 * xy2 + xy1 * yy2,
xx, xy, yx, yy = yy/det, -xy/det, -yx/det, xx/det yx1 * xx2 + yy1 * yx2,
dx, dy = -xx*dx - yx*dy, -xy*dx - yy*dy yx1 * xy2 + yy1 * yy2,
return self.__class__(xx, xy, yx, yy, dx, dy) xx2 * dx1 + yx2 * dy1 + dx2,
xy2 * dx1 + yy2 * dy1 + dy2,
)
def toPS(self): def inverse(self):
"""Return a PostScript representation """Return the inverse transformation.
:Example: :Example:
>>> t = Identity.translate(2, 3).scale(4, 5)
>>> t.transformPoint((10, 20))
(42, 103)
>>> it = t.inverse()
>>> it.transformPoint((42, 103))
(10.0, 20.0)
>>>
"""
if self == Identity:
return self
xx, xy, yx, yy, dx, dy = self
det = xx * yy - yx * xy
xx, xy, yx, yy = yy / det, -xy / det, -yx / det, xx / det
dx, dy = -xx * dx - yx * dy, -xy * dx - yy * dy
return self.__class__(xx, xy, yx, yy, dx, dy)
>>> t = Identity.scale(2, 3).translate(4, 5) def toPS(self):
>>> t.toPS() """Return a PostScript representation
'[2 0 0 3 8 15]'
>>>
"""
return "[%s %s %s %s %s %s]" % self
def __bool__(self): :Example:
"""Returns True if transform is not identity, False otherwise.
:Example: >>> t = Identity.scale(2, 3).translate(4, 5)
>>> t.toPS()
'[2 0 0 3 8 15]'
>>>
"""
return "[%s %s %s %s %s %s]" % self
>>> bool(Identity) def toDecomposed(self) -> "DecomposedTransform":
False """Decompose into a DecomposedTransform."""
>>> bool(Transform()) return DecomposedTransform.fromTransform(self)
False
>>> bool(Scale(1.))
False
>>> bool(Scale(2))
True
>>> bool(Offset())
False
>>> bool(Offset(0))
False
>>> bool(Offset(2))
True
"""
return self != Identity
def __repr__(self): def __bool__(self):
return "<%s [%g %g %g %g %g %g]>" % ((self.__class__.__name__,) + self) """Returns True if transform is not identity, False otherwise.
:Example:
>>> bool(Identity)
False
>>> bool(Transform())
False
>>> bool(Scale(1.))
False
>>> bool(Scale(2))
True
>>> bool(Offset())
False
>>> bool(Offset(0))
False
>>> bool(Offset(2))
True
"""
return self != Identity
def __repr__(self):
return "<%s [%g %g %g %g %g %g]>" % ((self.__class__.__name__,) + self)
Identity = Transform() Identity = Transform()
def Offset(x=0, y=0):
"""Return the identity transformation offset by x, y.
:Example: def Offset(x=0, y=0):
>>> Offset(2, 3) """Return the identity transformation offset by x, y.
<Transform [1 0 0 1 2 3]>
>>> :Example:
""" >>> Offset(2, 3)
return Transform(1, 0, 0, 1, x, y) <Transform [1 0 0 1 2 3]>
>>>
"""
return Transform(1, 0, 0, 1, x, y)
def Scale(x, y=None): def Scale(x, y=None):
"""Return the identity transformation scaled by x, y. The 'y' argument """Return the identity transformation scaled by x, y. The 'y' argument
may be None, which implies to use the x value for y as well. may be None, which implies to use the x value for y as well.
:Example: :Example:
>>> Scale(2, 3) >>> Scale(2, 3)
<Transform [2 0 0 3 0 0]> <Transform [2 0 0 3 0 0]>
>>> >>>
""" """
if y is None: if y is None:
y = x y = x
return Transform(x, 0, 0, y, 0, 0) return Transform(x, 0, 0, y, 0, 0)
@dataclass
class DecomposedTransform:
"""The DecomposedTransform class implements a transformation with separate
translate, rotation, scale, skew, and transformation-center components.
"""
translateX: float = 0
translateY: float = 0
rotation: float = 0 # in degrees, counter-clockwise
scaleX: float = 1
scaleY: float = 1
skewX: float = 0 # in degrees, clockwise
skewY: float = 0 # in degrees, counter-clockwise
tCenterX: float = 0
tCenterY: float = 0
@classmethod
def fromTransform(self, transform):
# Adapted from an answer on
# https://math.stackexchange.com/questions/13150/extracting-rotation-scale-values-from-2d-transformation-matrix
a, b, c, d, x, y = transform
sx = math.copysign(1, a)
if sx < 0:
a *= sx
b *= sx
delta = a * d - b * c
rotation = 0
scaleX = scaleY = 0
skewX = skewY = 0
# Apply the QR-like decomposition.
if a != 0 or b != 0:
r = math.sqrt(a * a + b * b)
rotation = math.acos(a / r) if b >= 0 else -math.acos(a / r)
scaleX, scaleY = (r, delta / r)
skewX, skewY = (math.atan((a * c + b * d) / (r * r)), 0)
elif c != 0 or d != 0:
s = math.sqrt(c * c + d * d)
rotation = math.pi / 2 - (
math.acos(-c / s) if d >= 0 else -math.acos(c / s)
)
scaleX, scaleY = (delta / s, s)
skewX, skewY = (0, math.atan((a * c + b * d) / (s * s)))
else:
# a = b = c = d = 0
pass
return DecomposedTransform(
x,
y,
math.degrees(rotation),
scaleX * sx,
scaleY,
math.degrees(skewX) * sx,
math.degrees(skewY),
0,
0,
)
def toTransform(self):
"""Return the Transform() equivalent of this transformation.
:Example:
>>> DecomposedTransform(scaleX=2, scaleY=2).toTransform()
<Transform [2 0 0 2 0 0]>
>>>
"""
t = Transform()
t = t.translate(
self.translateX + self.tCenterX, self.translateY + self.tCenterY
)
t = t.rotate(math.radians(self.rotation))
t = t.scale(self.scaleX, self.scaleY)
t = t.skew(math.radians(self.skewX), math.radians(self.skewY))
t = t.translate(-self.tCenterX, -self.tCenterY)
return t
if __name__ == "__main__": if __name__ == "__main__":
import sys import sys
import doctest import doctest
sys.exit(doctest.testmod().failed)
sys.exit(doctest.testmod().failed)

View File

@ -8,164 +8,169 @@ import logging
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
class TTXParseError(Exception): pass
class TTXParseError(Exception):
pass
BUFSIZE = 0x4000 BUFSIZE = 0x4000
class XMLReader(object): class XMLReader(object):
def __init__(
self, fileOrPath, ttFont, progress=None, quiet=None, contentOnly=False
):
if fileOrPath == "-":
fileOrPath = sys.stdin
if not hasattr(fileOrPath, "read"):
self.file = open(fileOrPath, "rb")
self._closeStream = True
else:
# assume readable file object
self.file = fileOrPath
self._closeStream = False
self.ttFont = ttFont
self.progress = progress
if quiet is not None:
from fontTools.misc.loggingTools import deprecateArgument
def __init__(self, fileOrPath, ttFont, progress=None, quiet=None, contentOnly=False): deprecateArgument("quiet", "configure logging instead")
if fileOrPath == '-': self.quiet = quiet
fileOrPath = sys.stdin self.root = None
if not hasattr(fileOrPath, "read"): self.contentStack = []
self.file = open(fileOrPath, "rb") self.contentOnly = contentOnly
self._closeStream = True self.stackSize = 0
else:
# assume readable file object
self.file = fileOrPath
self._closeStream = False
self.ttFont = ttFont
self.progress = progress
if quiet is not None:
from fontTools.misc.loggingTools import deprecateArgument
deprecateArgument("quiet", "configure logging instead")
self.quiet = quiet
self.root = None
self.contentStack = []
self.contentOnly = contentOnly
self.stackSize = 0
def read(self, rootless=False): def read(self, rootless=False):
if rootless: if rootless:
self.stackSize += 1 self.stackSize += 1
if self.progress: if self.progress:
self.file.seek(0, 2) self.file.seek(0, 2)
fileSize = self.file.tell() fileSize = self.file.tell()
self.progress.set(0, fileSize // 100 or 1) self.progress.set(0, fileSize // 100 or 1)
self.file.seek(0) self.file.seek(0)
self._parseFile(self.file) self._parseFile(self.file)
if self._closeStream: if self._closeStream:
self.close() self.close()
if rootless: if rootless:
self.stackSize -= 1 self.stackSize -= 1
def close(self): def close(self):
self.file.close() self.file.close()
def _parseFile(self, file): def _parseFile(self, file):
from xml.parsers.expat import ParserCreate from xml.parsers.expat import ParserCreate
parser = ParserCreate()
parser.StartElementHandler = self._startElementHandler
parser.EndElementHandler = self._endElementHandler
parser.CharacterDataHandler = self._characterDataHandler
pos = 0 parser = ParserCreate()
while True: parser.StartElementHandler = self._startElementHandler
chunk = file.read(BUFSIZE) parser.EndElementHandler = self._endElementHandler
if not chunk: parser.CharacterDataHandler = self._characterDataHandler
parser.Parse(chunk, 1)
break
pos = pos + len(chunk)
if self.progress:
self.progress.set(pos // 100)
parser.Parse(chunk, 0)
def _startElementHandler(self, name, attrs): pos = 0
if self.stackSize == 1 and self.contentOnly: while True:
# We already know the table we're parsing, skip chunk = file.read(BUFSIZE)
# parsing the table tag and continue to if not chunk:
# stack '2' which begins parsing content parser.Parse(chunk, 1)
self.contentStack.append([]) break
self.stackSize = 2 pos = pos + len(chunk)
return if self.progress:
stackSize = self.stackSize self.progress.set(pos // 100)
self.stackSize = stackSize + 1 parser.Parse(chunk, 0)
subFile = attrs.get("src")
if subFile is not None:
if hasattr(self.file, 'name'):
# if file has a name, get its parent directory
dirname = os.path.dirname(self.file.name)
else:
# else fall back to using the current working directory
dirname = os.getcwd()
subFile = os.path.join(dirname, subFile)
if not stackSize:
if name != "ttFont":
raise TTXParseError("illegal root tag: %s" % name)
if self.ttFont.reader is None and not self.ttFont.tables:
sfntVersion = attrs.get("sfntVersion")
if sfntVersion is not None:
if len(sfntVersion) != 4:
sfntVersion = safeEval('"' + sfntVersion + '"')
self.ttFont.sfntVersion = sfntVersion
self.contentStack.append([])
elif stackSize == 1:
if subFile is not None:
subReader = XMLReader(subFile, self.ttFont, self.progress)
subReader.read()
self.contentStack.append([])
return
tag = ttLib.xmlToTag(name)
msg = "Parsing '%s' table..." % tag
if self.progress:
self.progress.setLabel(msg)
log.info(msg)
if tag == "GlyphOrder":
tableClass = ttLib.GlyphOrder
elif "ERROR" in attrs or ('raw' in attrs and safeEval(attrs['raw'])):
tableClass = DefaultTable
else:
tableClass = ttLib.getTableClass(tag)
if tableClass is None:
tableClass = DefaultTable
if tag == 'loca' and tag in self.ttFont:
# Special-case the 'loca' table as we need the
# original if the 'glyf' table isn't recompiled.
self.currentTable = self.ttFont[tag]
else:
self.currentTable = tableClass(tag)
self.ttFont[tag] = self.currentTable
self.contentStack.append([])
elif stackSize == 2 and subFile is not None:
subReader = XMLReader(subFile, self.ttFont, self.progress, contentOnly=True)
subReader.read()
self.contentStack.append([])
self.root = subReader.root
elif stackSize == 2:
self.contentStack.append([])
self.root = (name, attrs, self.contentStack[-1])
else:
l = []
self.contentStack[-1].append((name, attrs, l))
self.contentStack.append(l)
def _characterDataHandler(self, data): def _startElementHandler(self, name, attrs):
if self.stackSize > 1: if self.stackSize == 1 and self.contentOnly:
self.contentStack[-1].append(data) # We already know the table we're parsing, skip
# parsing the table tag and continue to
# stack '2' which begins parsing content
self.contentStack.append([])
self.stackSize = 2
return
stackSize = self.stackSize
self.stackSize = stackSize + 1
subFile = attrs.get("src")
if subFile is not None:
if hasattr(self.file, "name"):
# if file has a name, get its parent directory
dirname = os.path.dirname(self.file.name)
else:
# else fall back to using the current working directory
dirname = os.getcwd()
subFile = os.path.join(dirname, subFile)
if not stackSize:
if name != "ttFont":
raise TTXParseError("illegal root tag: %s" % name)
if self.ttFont.reader is None and not self.ttFont.tables:
sfntVersion = attrs.get("sfntVersion")
if sfntVersion is not None:
if len(sfntVersion) != 4:
sfntVersion = safeEval('"' + sfntVersion + '"')
self.ttFont.sfntVersion = sfntVersion
self.contentStack.append([])
elif stackSize == 1:
if subFile is not None:
subReader = XMLReader(subFile, self.ttFont, self.progress)
subReader.read()
self.contentStack.append([])
return
tag = ttLib.xmlToTag(name)
msg = "Parsing '%s' table..." % tag
if self.progress:
self.progress.setLabel(msg)
log.info(msg)
if tag == "GlyphOrder":
tableClass = ttLib.GlyphOrder
elif "ERROR" in attrs or ("raw" in attrs and safeEval(attrs["raw"])):
tableClass = DefaultTable
else:
tableClass = ttLib.getTableClass(tag)
if tableClass is None:
tableClass = DefaultTable
if tag == "loca" and tag in self.ttFont:
# Special-case the 'loca' table as we need the
# original if the 'glyf' table isn't recompiled.
self.currentTable = self.ttFont[tag]
else:
self.currentTable = tableClass(tag)
self.ttFont[tag] = self.currentTable
self.contentStack.append([])
elif stackSize == 2 and subFile is not None:
subReader = XMLReader(subFile, self.ttFont, self.progress, contentOnly=True)
subReader.read()
self.contentStack.append([])
self.root = subReader.root
elif stackSize == 2:
self.contentStack.append([])
self.root = (name, attrs, self.contentStack[-1])
else:
l = []
self.contentStack[-1].append((name, attrs, l))
self.contentStack.append(l)
def _endElementHandler(self, name): def _characterDataHandler(self, data):
self.stackSize = self.stackSize - 1 if self.stackSize > 1:
del self.contentStack[-1] self.contentStack[-1].append(data)
if not self.contentOnly:
if self.stackSize == 1: def _endElementHandler(self, name):
self.root = None self.stackSize = self.stackSize - 1
elif self.stackSize == 2: del self.contentStack[-1]
name, attrs, content = self.root if not self.contentOnly:
self.currentTable.fromXML(name, attrs, content, self.ttFont) if self.stackSize == 1:
self.root = None self.root = None
elif self.stackSize == 2:
name, attrs, content = self.root
self.currentTable.fromXML(name, attrs, content, self.ttFont)
self.root = None
class ProgressPrinter(object): class ProgressPrinter(object):
def __init__(self, title, maxval=100):
print(title)
def __init__(self, title, maxval=100): def set(self, val, maxval=None):
print(title) pass
def set(self, val, maxval=None): def increment(self, val=1):
pass pass
def increment(self, val=1): def setLabel(self, text):
pass print(text)
def setLabel(self, text):
print(text)

View File

@ -9,186 +9,196 @@ INDENT = " "
class XMLWriter(object): class XMLWriter(object):
def __init__(
self,
fileOrPath,
indentwhite=INDENT,
idlefunc=None,
encoding="utf_8",
newlinestr="\n",
):
if encoding.lower().replace("-", "").replace("_", "") != "utf8":
raise Exception("Only UTF-8 encoding is supported.")
if fileOrPath == "-":
fileOrPath = sys.stdout
if not hasattr(fileOrPath, "write"):
self.filename = fileOrPath
self.file = open(fileOrPath, "wb")
self._closeStream = True
else:
self.filename = None
# assume writable file object
self.file = fileOrPath
self._closeStream = False
def __init__(self, fileOrPath, indentwhite=INDENT, idlefunc=None, encoding="utf_8", # Figure out if writer expects bytes or unicodes
newlinestr="\n"): try:
if encoding.lower().replace('-','').replace('_','') != 'utf8': # The bytes check should be first. See:
raise Exception('Only UTF-8 encoding is supported.') # https://github.com/fonttools/fonttools/pull/233
if fileOrPath == '-': self.file.write(b"")
fileOrPath = sys.stdout self.totype = tobytes
if not hasattr(fileOrPath, "write"): except TypeError:
self.filename = fileOrPath # This better not fail.
self.file = open(fileOrPath, "wb") self.file.write("")
self._closeStream = True self.totype = tostr
else: self.indentwhite = self.totype(indentwhite)
self.filename = None if newlinestr is None:
# assume writable file object self.newlinestr = self.totype(os.linesep)
self.file = fileOrPath else:
self._closeStream = False self.newlinestr = self.totype(newlinestr)
self.indentlevel = 0
self.stack = []
self.needindent = 1
self.idlefunc = idlefunc
self.idlecounter = 0
self._writeraw('<?xml version="1.0" encoding="UTF-8"?>')
self.newline()
# Figure out if writer expects bytes or unicodes def __enter__(self):
try: return self
# The bytes check should be first. See:
# https://github.com/fonttools/fonttools/pull/233
self.file.write(b'')
self.totype = tobytes
except TypeError:
# This better not fail.
self.file.write('')
self.totype = tostr
self.indentwhite = self.totype(indentwhite)
if newlinestr is None:
self.newlinestr = self.totype(os.linesep)
else:
self.newlinestr = self.totype(newlinestr)
self.indentlevel = 0
self.stack = []
self.needindent = 1
self.idlefunc = idlefunc
self.idlecounter = 0
self._writeraw('<?xml version="1.0" encoding="UTF-8"?>')
self.newline()
def __enter__(self): def __exit__(self, exception_type, exception_value, traceback):
return self self.close()
def __exit__(self, exception_type, exception_value, traceback): def close(self):
self.close() if self._closeStream:
self.file.close()
def close(self): def write(self, string, indent=True):
if self._closeStream: """Writes text."""
self.file.close() self._writeraw(escape(string), indent=indent)
def write(self, string, indent=True): def writecdata(self, string):
"""Writes text.""" """Writes text in a CDATA section."""
self._writeraw(escape(string), indent=indent) self._writeraw("<![CDATA[" + string + "]]>")
def writecdata(self, string): def write8bit(self, data, strip=False):
"""Writes text in a CDATA section.""" """Writes a bytes() sequence into the XML, escaping
self._writeraw("<![CDATA[" + string + "]]>") non-ASCII bytes. When this is read in xmlReader,
the original bytes can be recovered by encoding to
'latin-1'."""
self._writeraw(escape8bit(data), strip=strip)
def write8bit(self, data, strip=False): def write_noindent(self, string):
"""Writes a bytes() sequence into the XML, escaping """Writes text without indentation."""
non-ASCII bytes. When this is read in xmlReader, self._writeraw(escape(string), indent=False)
the original bytes can be recovered by encoding to
'latin-1'."""
self._writeraw(escape8bit(data), strip=strip)
def write_noindent(self, string): def _writeraw(self, data, indent=True, strip=False):
"""Writes text without indentation.""" """Writes bytes, possibly indented."""
self._writeraw(escape(string), indent=False) if indent and self.needindent:
self.file.write(self.indentlevel * self.indentwhite)
self.needindent = 0
s = self.totype(data, encoding="utf_8")
if strip:
s = s.strip()
self.file.write(s)
def _writeraw(self, data, indent=True, strip=False): def newline(self):
"""Writes bytes, possibly indented.""" self.file.write(self.newlinestr)
if indent and self.needindent: self.needindent = 1
self.file.write(self.indentlevel * self.indentwhite) idlecounter = self.idlecounter
self.needindent = 0 if not idlecounter % 100 and self.idlefunc is not None:
s = self.totype(data, encoding="utf_8") self.idlefunc()
if (strip): self.idlecounter = idlecounter + 1
s = s.strip()
self.file.write(s)
def newline(self): def comment(self, data):
self.file.write(self.newlinestr) data = escape(data)
self.needindent = 1 lines = data.split("\n")
idlecounter = self.idlecounter self._writeraw("<!-- " + lines[0])
if not idlecounter % 100 and self.idlefunc is not None: for line in lines[1:]:
self.idlefunc() self.newline()
self.idlecounter = idlecounter + 1 self._writeraw(" " + line)
self._writeraw(" -->")
def comment(self, data): def simpletag(self, _TAG_, *args, **kwargs):
data = escape(data) attrdata = self.stringifyattrs(*args, **kwargs)
lines = data.split("\n") data = "<%s%s/>" % (_TAG_, attrdata)
self._writeraw("<!-- " + lines[0]) self._writeraw(data)
for line in lines[1:]:
self.newline()
self._writeraw(" " + line)
self._writeraw(" -->")
def simpletag(self, _TAG_, *args, **kwargs): def begintag(self, _TAG_, *args, **kwargs):
attrdata = self.stringifyattrs(*args, **kwargs) attrdata = self.stringifyattrs(*args, **kwargs)
data = "<%s%s/>" % (_TAG_, attrdata) data = "<%s%s>" % (_TAG_, attrdata)
self._writeraw(data) self._writeraw(data)
self.stack.append(_TAG_)
self.indent()
def begintag(self, _TAG_, *args, **kwargs): def endtag(self, _TAG_):
attrdata = self.stringifyattrs(*args, **kwargs) assert self.stack and self.stack[-1] == _TAG_, "nonmatching endtag"
data = "<%s%s>" % (_TAG_, attrdata) del self.stack[-1]
self._writeraw(data) self.dedent()
self.stack.append(_TAG_) data = "</%s>" % _TAG_
self.indent() self._writeraw(data)
def endtag(self, _TAG_): def dumphex(self, data):
assert self.stack and self.stack[-1] == _TAG_, "nonmatching endtag" linelength = 16
del self.stack[-1] hexlinelength = linelength * 2
self.dedent() chunksize = 8
data = "</%s>" % _TAG_ for i in range(0, len(data), linelength):
self._writeraw(data) hexline = hexStr(data[i : i + linelength])
line = ""
white = ""
for j in range(0, hexlinelength, chunksize):
line = line + white + hexline[j : j + chunksize]
white = " "
self._writeraw(line)
self.newline()
def dumphex(self, data): def indent(self):
linelength = 16 self.indentlevel = self.indentlevel + 1
hexlinelength = linelength * 2
chunksize = 8
for i in range(0, len(data), linelength):
hexline = hexStr(data[i:i+linelength])
line = ""
white = ""
for j in range(0, hexlinelength, chunksize):
line = line + white + hexline[j:j+chunksize]
white = " "
self._writeraw(line)
self.newline()
def indent(self): def dedent(self):
self.indentlevel = self.indentlevel + 1 assert self.indentlevel > 0
self.indentlevel = self.indentlevel - 1
def dedent(self): def stringifyattrs(self, *args, **kwargs):
assert self.indentlevel > 0 if kwargs:
self.indentlevel = self.indentlevel - 1 assert not args
attributes = sorted(kwargs.items())
def stringifyattrs(self, *args, **kwargs): elif args:
if kwargs: assert len(args) == 1
assert not args attributes = args[0]
attributes = sorted(kwargs.items()) else:
elif args: return ""
assert len(args) == 1 data = ""
attributes = args[0] for attr, value in attributes:
else: if not isinstance(value, (bytes, str)):
return "" value = str(value)
data = "" data = data + ' %s="%s"' % (attr, escapeattr(value))
for attr, value in attributes: return data
if not isinstance(value, (bytes, str)):
value = str(value)
data = data + ' %s="%s"' % (attr, escapeattr(value))
return data
def escape(data): def escape(data):
data = tostr(data, 'utf_8') data = tostr(data, "utf_8")
data = data.replace("&", "&amp;") data = data.replace("&", "&amp;")
data = data.replace("<", "&lt;") data = data.replace("<", "&lt;")
data = data.replace(">", "&gt;") data = data.replace(">", "&gt;")
data = data.replace("\r", "&#13;") data = data.replace("\r", "&#13;")
return data return data
def escapeattr(data): def escapeattr(data):
data = escape(data) data = escape(data)
data = data.replace('"', "&quot;") data = data.replace('"', "&quot;")
return data return data
def escape8bit(data): def escape8bit(data):
"""Input is Unicode string.""" """Input is Unicode string."""
def escapechar(c):
n = ord(c) def escapechar(c):
if 32 <= n <= 127 and c not in "<&>": n = ord(c)
return c if 32 <= n <= 127 and c not in "<&>":
else: return c
return "&#" + repr(n) + ";" else:
return strjoin(map(escapechar, data.decode('latin-1'))) return "&#" + repr(n) + ";"
return strjoin(map(escapechar, data.decode("latin-1")))
def hexStr(s): def hexStr(s):
h = string.hexdigits h = string.hexdigits
r = '' r = ""
for c in s: for c in s:
i = byteord(c) i = byteord(c)
r = r + h[(i >> 4) & 0xF] + h[i & 0xF] r = r + h[(i >> 4) & 0xF] + h[i & 0xF]
return r return r

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
import sys import sys
from fontTools.mtiLib import main from fontTools.mtiLib import main
if __name__ == '__main__': if __name__ == "__main__":
sys.exit(main()) sys.exit(main())

View File

@ -764,7 +764,7 @@ class ChainContextSubstBuilder(ChainContextualBuilder):
result.setdefault(glyph, set()).update(replacements) result.setdefault(glyph, set()).update(replacements)
return result return result
def find_chainable_single_subst(self, glyphs): def find_chainable_single_subst(self, mapping):
"""Helper for add_single_subst_chained_()""" """Helper for add_single_subst_chained_()"""
res = None res = None
for rule in self.rules[::-1]: for rule in self.rules[::-1]:
@ -772,7 +772,7 @@ class ChainContextSubstBuilder(ChainContextualBuilder):
return res return res
for sub in rule.lookups: for sub in rule.lookups:
if isinstance(sub, SingleSubstBuilder) and not any( if isinstance(sub, SingleSubstBuilder) and not any(
g in glyphs for g in sub.mapping.keys() g in mapping and mapping[g] != sub.mapping[g] for g in sub.mapping
): ):
res = sub res = sub
return res return res

View File

@ -2,5 +2,5 @@ import sys
from fontTools.otlLib.optimize import main from fontTools.otlLib.optimize import main
if __name__ == '__main__': if __name__ == "__main__":
sys.exit(main()) sys.exit(main())

View File

@ -7,51 +7,46 @@ __all__ = ["AreaPen"]
class AreaPen(BasePen): class AreaPen(BasePen):
def __init__(self, glyphset=None):
BasePen.__init__(self, glyphset)
self.value = 0
def __init__(self, glyphset=None): def _moveTo(self, p0):
BasePen.__init__(self, glyphset) self._p0 = self._startPoint = p0
self.value = 0
def _moveTo(self, p0): def _lineTo(self, p1):
self._p0 = self._startPoint = p0 x0, y0 = self._p0
x1, y1 = p1
self.value -= (x1 - x0) * (y1 + y0) * 0.5
self._p0 = p1
def _lineTo(self, p1): def _qCurveToOne(self, p1, p2):
x0, y0 = self._p0 # https://github.com/Pomax/bezierinfo/issues/44
x1, y1 = p1 p0 = self._p0
self.value -= (x1 - x0) * (y1 + y0) * .5 x0, y0 = p0[0], p0[1]
self._p0 = p1 x1, y1 = p1[0] - x0, p1[1] - y0
x2, y2 = p2[0] - x0, p2[1] - y0
self.value -= (x2 * y1 - x1 * y2) / 3
self._lineTo(p2)
self._p0 = p2
def _qCurveToOne(self, p1, p2): def _curveToOne(self, p1, p2, p3):
# https://github.com/Pomax/bezierinfo/issues/44 # https://github.com/Pomax/bezierinfo/issues/44
p0 = self._p0 p0 = self._p0
x0, y0 = p0[0], p0[1] x0, y0 = p0[0], p0[1]
x1, y1 = p1[0] - x0, p1[1] - y0 x1, y1 = p1[0] - x0, p1[1] - y0
x2, y2 = p2[0] - x0, p2[1] - y0 x2, y2 = p2[0] - x0, p2[1] - y0
self.value -= (x2 * y1 - x1 * y2) / 3 x3, y3 = p3[0] - x0, p3[1] - y0
self._lineTo(p2) self.value -= (x1 * (-y2 - y3) + x2 * (y1 - 2 * y3) + x3 * (y1 + 2 * y2)) * 0.15
self._p0 = p2 self._lineTo(p3)
self._p0 = p3
def _curveToOne(self, p1, p2, p3): def _closePath(self):
# https://github.com/Pomax/bezierinfo/issues/44 self._lineTo(self._startPoint)
p0 = self._p0 del self._p0, self._startPoint
x0, y0 = p0[0], p0[1]
x1, y1 = p1[0] - x0, p1[1] - y0
x2, y2 = p2[0] - x0, p2[1] - y0
x3, y3 = p3[0] - x0, p3[1] - y0
self.value -= (
x1 * ( - y2 - y3) +
x2 * (y1 - 2*y3) +
x3 * (y1 + 2*y2 )
) * 0.15
self._lineTo(p3)
self._p0 = p3
def _closePath(self): def _endPath(self):
self._lineTo(self._startPoint) if self._p0 != self._startPoint:
del self._p0, self._startPoint # Area is not defined for open contours.
raise NotImplementedError
def _endPath(self): del self._p0, self._startPoint
if self._p0 != self._startPoint:
# Area is not defined for open contours.
raise NotImplementedError
del self._p0, self._startPoint

View File

@ -36,376 +36,409 @@ Coordinates are usually expressed as (x, y) tuples, but generally any
sequence of length 2 will do. sequence of length 2 will do.
""" """
from typing import Tuple from typing import Tuple, Dict
from fontTools.misc.loggingTools import LogMixin from fontTools.misc.loggingTools import LogMixin
from fontTools.misc.transform import DecomposedTransform
__all__ = ["AbstractPen", "NullPen", "BasePen", "PenError", __all__ = [
"decomposeSuperBezierSegment", "decomposeQuadraticSegment"] "AbstractPen",
"NullPen",
"BasePen",
"PenError",
"decomposeSuperBezierSegment",
"decomposeQuadraticSegment",
]
class PenError(Exception): class PenError(Exception):
"""Represents an error during penning.""" """Represents an error during penning."""
class OpenContourError(PenError): class OpenContourError(PenError):
pass pass
class AbstractPen: class AbstractPen:
def moveTo(self, pt: Tuple[float, float]) -> None:
"""Begin a new sub path, set the current point to 'pt'. You must
end each sub path with a call to pen.closePath() or pen.endPath().
"""
raise NotImplementedError
def moveTo(self, pt: Tuple[float, float]) -> None: def lineTo(self, pt: Tuple[float, float]) -> None:
"""Begin a new sub path, set the current point to 'pt'. You must """Draw a straight line from the current point to 'pt'."""
end each sub path with a call to pen.closePath() or pen.endPath(). raise NotImplementedError
"""
raise NotImplementedError
def lineTo(self, pt: Tuple[float, float]) -> None: def curveTo(self, *points: Tuple[float, float]) -> None:
"""Draw a straight line from the current point to 'pt'.""" """Draw a cubic bezier with an arbitrary number of control points.
raise NotImplementedError
def curveTo(self, *points: Tuple[float, float]) -> None: The last point specified is on-curve, all others are off-curve
"""Draw a cubic bezier with an arbitrary number of control points. (control) points. If the number of control points is > 2, the
segment is split into multiple bezier segments. This works
like this:
The last point specified is on-curve, all others are off-curve Let n be the number of control points (which is the number of
(control) points. If the number of control points is > 2, the arguments to this call minus 1). If n==2, a plain vanilla cubic
segment is split into multiple bezier segments. This works bezier is drawn. If n==1, we fall back to a quadratic segment and
like this: if n==0 we draw a straight line. It gets interesting when n>2:
n-1 PostScript-style cubic segments will be drawn as if it were
one curve. See decomposeSuperBezierSegment().
Let n be the number of control points (which is the number of The conversion algorithm used for n>2 is inspired by NURB
arguments to this call minus 1). If n==2, a plain vanilla cubic splines, and is conceptually equivalent to the TrueType "implied
bezier is drawn. If n==1, we fall back to a quadratic segment and points" principle. See also decomposeQuadraticSegment().
if n==0 we draw a straight line. It gets interesting when n>2: """
n-1 PostScript-style cubic segments will be drawn as if it were raise NotImplementedError
one curve. See decomposeSuperBezierSegment().
The conversion algorithm used for n>2 is inspired by NURB def qCurveTo(self, *points: Tuple[float, float]) -> None:
splines, and is conceptually equivalent to the TrueType "implied """Draw a whole string of quadratic curve segments.
points" principle. See also decomposeQuadraticSegment().
"""
raise NotImplementedError
def qCurveTo(self, *points: Tuple[float, float]) -> None: The last point specified is on-curve, all others are off-curve
"""Draw a whole string of quadratic curve segments. points.
The last point specified is on-curve, all others are off-curve This method implements TrueType-style curves, breaking up curves
points. using 'implied points': between each two consequtive off-curve points,
there is one implied point exactly in the middle between them. See
also decomposeQuadraticSegment().
This method implements TrueType-style curves, breaking up curves The last argument (normally the on-curve point) may be None.
using 'implied points': between each two consequtive off-curve points, This is to support contours that have NO on-curve points (a rarely
there is one implied point exactly in the middle between them. See seen feature of TrueType outlines).
also decomposeQuadraticSegment(). """
raise NotImplementedError
The last argument (normally the on-curve point) may be None. def closePath(self) -> None:
This is to support contours that have NO on-curve points (a rarely """Close the current sub path. You must call either pen.closePath()
seen feature of TrueType outlines). or pen.endPath() after each sub path.
""" """
raise NotImplementedError pass
def closePath(self) -> None: def endPath(self) -> None:
"""Close the current sub path. You must call either pen.closePath() """End the current sub path, but don't close it. You must call
or pen.endPath() after each sub path. either pen.closePath() or pen.endPath() after each sub path.
""" """
pass pass
def endPath(self) -> None: def addComponent(
"""End the current sub path, but don't close it. You must call self,
either pen.closePath() or pen.endPath() after each sub path. glyphName: str,
""" transformation: Tuple[float, float, float, float, float, float],
pass ) -> None:
"""Add a sub glyph. The 'transformation' argument must be a 6-tuple
containing an affine transformation, or a Transform object from the
fontTools.misc.transform module. More precisely: it should be a
sequence containing 6 numbers.
"""
raise NotImplementedError
def addComponent( def addVarComponent(
self, self,
glyphName: str, glyphName: str,
transformation: Tuple[float, float, float, float, float, float] transformation: DecomposedTransform,
) -> None: location: Dict[str, float],
"""Add a sub glyph. The 'transformation' argument must be a 6-tuple ) -> None:
containing an affine transformation, or a Transform object from the """Add a VarComponent sub glyph. The 'transformation' argument
fontTools.misc.transform module. More precisely: it should be a must be a DecomposedTransform from the fontTools.misc.transform module,
sequence containing 6 numbers. and the 'location' argument must be a dictionary mapping axis tags
""" to their locations.
raise NotImplementedError """
# GlyphSet decomposes for us
raise AttributeError
class NullPen(AbstractPen): class NullPen(AbstractPen):
"""A pen that does nothing. """A pen that does nothing."""
"""
def moveTo(self, pt): def moveTo(self, pt):
pass pass
def lineTo(self, pt): def lineTo(self, pt):
pass pass
def curveTo(self, *points): def curveTo(self, *points):
pass pass
def qCurveTo(self, *points): def qCurveTo(self, *points):
pass pass
def closePath(self): def closePath(self):
pass pass
def endPath(self): def endPath(self):
pass pass
def addComponent(self, glyphName, transformation): def addComponent(self, glyphName, transformation):
pass pass
def addVarComponent(self, glyphName, transformation, location):
pass
class LoggingPen(LogMixin, AbstractPen): class LoggingPen(LogMixin, AbstractPen):
"""A pen with a ``log`` property (see fontTools.misc.loggingTools.LogMixin) """A pen with a ``log`` property (see fontTools.misc.loggingTools.LogMixin)"""
"""
pass pass
class MissingComponentError(KeyError): class MissingComponentError(KeyError):
"""Indicates a component pointing to a non-existent glyph in the glyphset.""" """Indicates a component pointing to a non-existent glyph in the glyphset."""
class DecomposingPen(LoggingPen): class DecomposingPen(LoggingPen):
""" Implements a 'addComponent' method that decomposes components """Implements a 'addComponent' method that decomposes components
(i.e. draws them onto self as simple contours). (i.e. draws them onto self as simple contours).
It can also be used as a mixin class (e.g. see ContourRecordingPen). It can also be used as a mixin class (e.g. see ContourRecordingPen).
You must override moveTo, lineTo, curveTo and qCurveTo. You may You must override moveTo, lineTo, curveTo and qCurveTo. You may
additionally override closePath, endPath and addComponent. additionally override closePath, endPath and addComponent.
By default a warning message is logged when a base glyph is missing; By default a warning message is logged when a base glyph is missing;
set the class variable ``skipMissingComponents`` to False if you want set the class variable ``skipMissingComponents`` to False if you want
to raise a :class:`MissingComponentError` exception. to raise a :class:`MissingComponentError` exception.
""" """
skipMissingComponents = True skipMissingComponents = True
def __init__(self, glyphSet): def __init__(self, glyphSet):
""" Takes a single 'glyphSet' argument (dict), in which the glyphs """Takes a single 'glyphSet' argument (dict), in which the glyphs
that are referenced as components are looked up by their name. that are referenced as components are looked up by their name.
""" """
super(DecomposingPen, self).__init__() super(DecomposingPen, self).__init__()
self.glyphSet = glyphSet self.glyphSet = glyphSet
def addComponent(self, glyphName, transformation): def addComponent(self, glyphName, transformation):
""" Transform the points of the base glyph and draw it onto self. """Transform the points of the base glyph and draw it onto self."""
""" from fontTools.pens.transformPen import TransformPen
from fontTools.pens.transformPen import TransformPen
try: try:
glyph = self.glyphSet[glyphName] glyph = self.glyphSet[glyphName]
except KeyError: except KeyError:
if not self.skipMissingComponents: if not self.skipMissingComponents:
raise MissingComponentError(glyphName) raise MissingComponentError(glyphName)
self.log.warning( self.log.warning("glyph '%s' is missing from glyphSet; skipped" % glyphName)
"glyph '%s' is missing from glyphSet; skipped" % glyphName) else:
else: tPen = TransformPen(self, transformation)
tPen = TransformPen(self, transformation) glyph.draw(tPen)
glyph.draw(tPen)
def addVarComponent(self, glyphName, transformation, location):
# GlyphSet decomposes for us
raise AttributeError
class BasePen(DecomposingPen): class BasePen(DecomposingPen):
"""Base class for drawing pens. You must override _moveTo, _lineTo and """Base class for drawing pens. You must override _moveTo, _lineTo and
_curveToOne. You may additionally override _closePath, _endPath, _curveToOne. You may additionally override _closePath, _endPath,
addComponent and/or _qCurveToOne. You should not override any other addComponent, addVarComponent, and/or _qCurveToOne. You should not
methods. override any other methods.
""" """
def __init__(self, glyphSet=None): def __init__(self, glyphSet=None):
super(BasePen, self).__init__(glyphSet) super(BasePen, self).__init__(glyphSet)
self.__currentPoint = None self.__currentPoint = None
# must override # must override
def _moveTo(self, pt): def _moveTo(self, pt):
raise NotImplementedError raise NotImplementedError
def _lineTo(self, pt): def _lineTo(self, pt):
raise NotImplementedError raise NotImplementedError
def _curveToOne(self, pt1, pt2, pt3): def _curveToOne(self, pt1, pt2, pt3):
raise NotImplementedError raise NotImplementedError
# may override # may override
def _closePath(self): def _closePath(self):
pass pass
def _endPath(self): def _endPath(self):
pass pass
def _qCurveToOne(self, pt1, pt2): def _qCurveToOne(self, pt1, pt2):
"""This method implements the basic quadratic curve type. The """This method implements the basic quadratic curve type. The
default implementation delegates the work to the cubic curve default implementation delegates the work to the cubic curve
function. Optionally override with a native implementation. function. Optionally override with a native implementation.
""" """
pt0x, pt0y = self.__currentPoint pt0x, pt0y = self.__currentPoint
pt1x, pt1y = pt1 pt1x, pt1y = pt1
pt2x, pt2y = pt2 pt2x, pt2y = pt2
mid1x = pt0x + 0.66666666666666667 * (pt1x - pt0x) mid1x = pt0x + 0.66666666666666667 * (pt1x - pt0x)
mid1y = pt0y + 0.66666666666666667 * (pt1y - pt0y) mid1y = pt0y + 0.66666666666666667 * (pt1y - pt0y)
mid2x = pt2x + 0.66666666666666667 * (pt1x - pt2x) mid2x = pt2x + 0.66666666666666667 * (pt1x - pt2x)
mid2y = pt2y + 0.66666666666666667 * (pt1y - pt2y) mid2y = pt2y + 0.66666666666666667 * (pt1y - pt2y)
self._curveToOne((mid1x, mid1y), (mid2x, mid2y), pt2) self._curveToOne((mid1x, mid1y), (mid2x, mid2y), pt2)
# don't override # don't override
def _getCurrentPoint(self): def _getCurrentPoint(self):
"""Return the current point. This is not part of the public """Return the current point. This is not part of the public
interface, yet is useful for subclasses. interface, yet is useful for subclasses.
""" """
return self.__currentPoint return self.__currentPoint
def closePath(self): def closePath(self):
self._closePath() self._closePath()
self.__currentPoint = None self.__currentPoint = None
def endPath(self): def endPath(self):
self._endPath() self._endPath()
self.__currentPoint = None self.__currentPoint = None
def moveTo(self, pt): def moveTo(self, pt):
self._moveTo(pt) self._moveTo(pt)
self.__currentPoint = pt self.__currentPoint = pt
def lineTo(self, pt): def lineTo(self, pt):
self._lineTo(pt) self._lineTo(pt)
self.__currentPoint = pt self.__currentPoint = pt
def curveTo(self, *points): def curveTo(self, *points):
n = len(points) - 1 # 'n' is the number of control points n = len(points) - 1 # 'n' is the number of control points
assert n >= 0 assert n >= 0
if n == 2: if n == 2:
# The common case, we have exactly two BCP's, so this is a standard # The common case, we have exactly two BCP's, so this is a standard
# cubic bezier. Even though decomposeSuperBezierSegment() handles # cubic bezier. Even though decomposeSuperBezierSegment() handles
# this case just fine, we special-case it anyway since it's so # this case just fine, we special-case it anyway since it's so
# common. # common.
self._curveToOne(*points) self._curveToOne(*points)
self.__currentPoint = points[-1] self.__currentPoint = points[-1]
elif n > 2: elif n > 2:
# n is the number of control points; split curve into n-1 cubic # n is the number of control points; split curve into n-1 cubic
# bezier segments. The algorithm used here is inspired by NURB # bezier segments. The algorithm used here is inspired by NURB
# splines and the TrueType "implied point" principle, and ensures # splines and the TrueType "implied point" principle, and ensures
# the smoothest possible connection between two curve segments, # the smoothest possible connection between two curve segments,
# with no disruption in the curvature. It is practical since it # with no disruption in the curvature. It is practical since it
# allows one to construct multiple bezier segments with a much # allows one to construct multiple bezier segments with a much
# smaller amount of points. # smaller amount of points.
_curveToOne = self._curveToOne _curveToOne = self._curveToOne
for pt1, pt2, pt3 in decomposeSuperBezierSegment(points): for pt1, pt2, pt3 in decomposeSuperBezierSegment(points):
_curveToOne(pt1, pt2, pt3) _curveToOne(pt1, pt2, pt3)
self.__currentPoint = pt3 self.__currentPoint = pt3
elif n == 1: elif n == 1:
self.qCurveTo(*points) self.qCurveTo(*points)
elif n == 0: elif n == 0:
self.lineTo(points[0]) self.lineTo(points[0])
else: else:
raise AssertionError("can't get there from here") raise AssertionError("can't get there from here")
def qCurveTo(self, *points): def qCurveTo(self, *points):
n = len(points) - 1 # 'n' is the number of control points n = len(points) - 1 # 'n' is the number of control points
assert n >= 0 assert n >= 0
if points[-1] is None: if points[-1] is None:
# Special case for TrueType quadratics: it is possible to # Special case for TrueType quadratics: it is possible to
# define a contour with NO on-curve points. BasePen supports # define a contour with NO on-curve points. BasePen supports
# this by allowing the final argument (the expected on-curve # this by allowing the final argument (the expected on-curve
# point) to be None. We simulate the feature by making the implied # point) to be None. We simulate the feature by making the implied
# on-curve point between the last and the first off-curve points # on-curve point between the last and the first off-curve points
# explicit. # explicit.
x, y = points[-2] # last off-curve point x, y = points[-2] # last off-curve point
nx, ny = points[0] # first off-curve point nx, ny = points[0] # first off-curve point
impliedStartPoint = (0.5 * (x + nx), 0.5 * (y + ny)) impliedStartPoint = (0.5 * (x + nx), 0.5 * (y + ny))
self.__currentPoint = impliedStartPoint self.__currentPoint = impliedStartPoint
self._moveTo(impliedStartPoint) self._moveTo(impliedStartPoint)
points = points[:-1] + (impliedStartPoint,) points = points[:-1] + (impliedStartPoint,)
if n > 0: if n > 0:
# Split the string of points into discrete quadratic curve # Split the string of points into discrete quadratic curve
# segments. Between any two consecutive off-curve points # segments. Between any two consecutive off-curve points
# there's an implied on-curve point exactly in the middle. # there's an implied on-curve point exactly in the middle.
# This is where the segment splits. # This is where the segment splits.
_qCurveToOne = self._qCurveToOne _qCurveToOne = self._qCurveToOne
for pt1, pt2 in decomposeQuadraticSegment(points): for pt1, pt2 in decomposeQuadraticSegment(points):
_qCurveToOne(pt1, pt2) _qCurveToOne(pt1, pt2)
self.__currentPoint = pt2 self.__currentPoint = pt2
else: else:
self.lineTo(points[0]) self.lineTo(points[0])
def decomposeSuperBezierSegment(points): def decomposeSuperBezierSegment(points):
"""Split the SuperBezier described by 'points' into a list of regular """Split the SuperBezier described by 'points' into a list of regular
bezier segments. The 'points' argument must be a sequence with length bezier segments. The 'points' argument must be a sequence with length
3 or greater, containing (x, y) coordinates. The last point is the 3 or greater, containing (x, y) coordinates. The last point is the
destination on-curve point, the rest of the points are off-curve points. destination on-curve point, the rest of the points are off-curve points.
The start point should not be supplied. The start point should not be supplied.
This function returns a list of (pt1, pt2, pt3) tuples, which each This function returns a list of (pt1, pt2, pt3) tuples, which each
specify a regular curveto-style bezier segment. specify a regular curveto-style bezier segment.
""" """
n = len(points) - 1 n = len(points) - 1
assert n > 1 assert n > 1
bezierSegments = [] bezierSegments = []
pt1, pt2, pt3 = points[0], None, None pt1, pt2, pt3 = points[0], None, None
for i in range(2, n+1): for i in range(2, n + 1):
# calculate points in between control points. # calculate points in between control points.
nDivisions = min(i, 3, n-i+2) nDivisions = min(i, 3, n - i + 2)
for j in range(1, nDivisions): for j in range(1, nDivisions):
factor = j / nDivisions factor = j / nDivisions
temp1 = points[i-1] temp1 = points[i - 1]
temp2 = points[i-2] temp2 = points[i - 2]
temp = (temp2[0] + factor * (temp1[0] - temp2[0]), temp = (
temp2[1] + factor * (temp1[1] - temp2[1])) temp2[0] + factor * (temp1[0] - temp2[0]),
if pt2 is None: temp2[1] + factor * (temp1[1] - temp2[1]),
pt2 = temp )
else: if pt2 is None:
pt3 = (0.5 * (pt2[0] + temp[0]), pt2 = temp
0.5 * (pt2[1] + temp[1])) else:
bezierSegments.append((pt1, pt2, pt3)) pt3 = (0.5 * (pt2[0] + temp[0]), 0.5 * (pt2[1] + temp[1]))
pt1, pt2, pt3 = temp, None, None bezierSegments.append((pt1, pt2, pt3))
bezierSegments.append((pt1, points[-2], points[-1])) pt1, pt2, pt3 = temp, None, None
return bezierSegments bezierSegments.append((pt1, points[-2], points[-1]))
return bezierSegments
def decomposeQuadraticSegment(points): def decomposeQuadraticSegment(points):
"""Split the quadratic curve segment described by 'points' into a list """Split the quadratic curve segment described by 'points' into a list
of "atomic" quadratic segments. The 'points' argument must be a sequence of "atomic" quadratic segments. The 'points' argument must be a sequence
with length 2 or greater, containing (x, y) coordinates. The last point with length 2 or greater, containing (x, y) coordinates. The last point
is the destination on-curve point, the rest of the points are off-curve is the destination on-curve point, the rest of the points are off-curve
points. The start point should not be supplied. points. The start point should not be supplied.
This function returns a list of (pt1, pt2) tuples, which each specify a This function returns a list of (pt1, pt2) tuples, which each specify a
plain quadratic bezier segment. plain quadratic bezier segment.
""" """
n = len(points) - 1 n = len(points) - 1
assert n > 0 assert n > 0
quadSegments = [] quadSegments = []
for i in range(n - 1): for i in range(n - 1):
x, y = points[i] x, y = points[i]
nx, ny = points[i+1] nx, ny = points[i + 1]
impliedPt = (0.5 * (x + nx), 0.5 * (y + ny)) impliedPt = (0.5 * (x + nx), 0.5 * (y + ny))
quadSegments.append((points[i], impliedPt)) quadSegments.append((points[i], impliedPt))
quadSegments.append((points[-2], points[-1])) quadSegments.append((points[-2], points[-1]))
return quadSegments return quadSegments
class _TestPen(BasePen): class _TestPen(BasePen):
"""Test class that prints PostScript to stdout.""" """Test class that prints PostScript to stdout."""
def _moveTo(self, pt):
print("%s %s moveto" % (pt[0], pt[1])) def _moveTo(self, pt):
def _lineTo(self, pt): print("%s %s moveto" % (pt[0], pt[1]))
print("%s %s lineto" % (pt[0], pt[1]))
def _curveToOne(self, bcp1, bcp2, pt): def _lineTo(self, pt):
print("%s %s %s %s %s %s curveto" % (bcp1[0], bcp1[1], print("%s %s lineto" % (pt[0], pt[1]))
bcp2[0], bcp2[1], pt[0], pt[1]))
def _closePath(self): def _curveToOne(self, bcp1, bcp2, pt):
print("closepath") print(
"%s %s %s %s %s %s curveto"
% (bcp1[0], bcp1[1], bcp2[0], bcp2[1], pt[0], pt[1])
)
def _closePath(self):
print("closepath")
if __name__ == "__main__": if __name__ == "__main__":
pen = _TestPen(None) pen = _TestPen(None)
pen.moveTo((0, 0)) pen.moveTo((0, 0))
pen.lineTo((0, 100)) pen.lineTo((0, 100))
pen.curveTo((50, 75), (60, 50), (50, 25), (0, 0)) pen.curveTo((50, 75), (60, 50), (50, 25), (0, 0))
pen.closePath() pen.closePath()
pen = _TestPen(None) pen = _TestPen(None)
# testing the "no on-curve point" scenario # testing the "no on-curve point" scenario
pen.qCurveTo((0, 0), (0, 100), (100, 100), (100, 0), None) pen.qCurveTo((0, 0), (0, 100), (100, 100), (100, 0), None)
pen.closePath() pen.closePath()

View File

@ -8,91 +8,93 @@ __all__ = ["BoundsPen", "ControlBoundsPen"]
class ControlBoundsPen(BasePen): class ControlBoundsPen(BasePen):
"""Pen to calculate the "control bounds" of a shape. This is the """Pen to calculate the "control bounds" of a shape. This is the
bounding box of all control points, so may be larger than the bounding box of all control points, so may be larger than the
actual bounding box if there are curves that don't have points actual bounding box if there are curves that don't have points
on their extremes. on their extremes.
When the shape has been drawn, the bounds are available as the When the shape has been drawn, the bounds are available as the
``bounds`` attribute of the pen object. It's a 4-tuple:: ``bounds`` attribute of the pen object. It's a 4-tuple::
(xMin, yMin, xMax, yMax). (xMin, yMin, xMax, yMax).
If ``ignoreSinglePoints`` is True, single points are ignored. If ``ignoreSinglePoints`` is True, single points are ignored.
""" """
def __init__(self, glyphSet, ignoreSinglePoints=False): def __init__(self, glyphSet, ignoreSinglePoints=False):
BasePen.__init__(self, glyphSet) BasePen.__init__(self, glyphSet)
self.ignoreSinglePoints = ignoreSinglePoints self.ignoreSinglePoints = ignoreSinglePoints
self.init() self.init()
def init(self): def init(self):
self.bounds = None self.bounds = None
self._start = None self._start = None
def _moveTo(self, pt): def _moveTo(self, pt):
self._start = pt self._start = pt
if not self.ignoreSinglePoints: if not self.ignoreSinglePoints:
self._addMoveTo() self._addMoveTo()
def _addMoveTo(self): def _addMoveTo(self):
if self._start is None: if self._start is None:
return return
bounds = self.bounds bounds = self.bounds
if bounds: if bounds:
self.bounds = updateBounds(bounds, self._start) self.bounds = updateBounds(bounds, self._start)
else: else:
x, y = self._start x, y = self._start
self.bounds = (x, y, x, y) self.bounds = (x, y, x, y)
self._start = None self._start = None
def _lineTo(self, pt): def _lineTo(self, pt):
self._addMoveTo() self._addMoveTo()
self.bounds = updateBounds(self.bounds, pt) self.bounds = updateBounds(self.bounds, pt)
def _curveToOne(self, bcp1, bcp2, pt): def _curveToOne(self, bcp1, bcp2, pt):
self._addMoveTo() self._addMoveTo()
bounds = self.bounds bounds = self.bounds
bounds = updateBounds(bounds, bcp1) bounds = updateBounds(bounds, bcp1)
bounds = updateBounds(bounds, bcp2) bounds = updateBounds(bounds, bcp2)
bounds = updateBounds(bounds, pt) bounds = updateBounds(bounds, pt)
self.bounds = bounds self.bounds = bounds
def _qCurveToOne(self, bcp, pt): def _qCurveToOne(self, bcp, pt):
self._addMoveTo() self._addMoveTo()
bounds = self.bounds bounds = self.bounds
bounds = updateBounds(bounds, bcp) bounds = updateBounds(bounds, bcp)
bounds = updateBounds(bounds, pt) bounds = updateBounds(bounds, pt)
self.bounds = bounds self.bounds = bounds
class BoundsPen(ControlBoundsPen): class BoundsPen(ControlBoundsPen):
"""Pen to calculate the bounds of a shape. It calculates the """Pen to calculate the bounds of a shape. It calculates the
correct bounds even when the shape contains curves that don't correct bounds even when the shape contains curves that don't
have points on their extremes. This is somewhat slower to compute have points on their extremes. This is somewhat slower to compute
than the "control bounds". than the "control bounds".
When the shape has been drawn, the bounds are available as the When the shape has been drawn, the bounds are available as the
``bounds`` attribute of the pen object. It's a 4-tuple:: ``bounds`` attribute of the pen object. It's a 4-tuple::
(xMin, yMin, xMax, yMax) (xMin, yMin, xMax, yMax)
""" """
def _curveToOne(self, bcp1, bcp2, pt): def _curveToOne(self, bcp1, bcp2, pt):
self._addMoveTo() self._addMoveTo()
bounds = self.bounds bounds = self.bounds
bounds = updateBounds(bounds, pt) bounds = updateBounds(bounds, pt)
if not pointInRect(bcp1, bounds) or not pointInRect(bcp2, bounds): if not pointInRect(bcp1, bounds) or not pointInRect(bcp2, bounds):
bounds = unionRect(bounds, calcCubicBounds( bounds = unionRect(
self._getCurrentPoint(), bcp1, bcp2, pt)) bounds, calcCubicBounds(self._getCurrentPoint(), bcp1, bcp2, pt)
self.bounds = bounds )
self.bounds = bounds
def _qCurveToOne(self, bcp, pt): def _qCurveToOne(self, bcp, pt):
self._addMoveTo() self._addMoveTo()
bounds = self.bounds bounds = self.bounds
bounds = updateBounds(bounds, pt) bounds = updateBounds(bounds, pt)
if not pointInRect(bcp, bounds): if not pointInRect(bcp, bounds):
bounds = unionRect(bounds, calcQuadraticBounds( bounds = unionRect(
self._getCurrentPoint(), bcp, pt)) bounds, calcQuadraticBounds(self._getCurrentPoint(), bcp, pt)
self.bounds = bounds )
self.bounds = bounds

View File

@ -5,22 +5,22 @@ __all__ = ["CocoaPen"]
class CocoaPen(BasePen): class CocoaPen(BasePen):
def __init__(self, glyphSet, path=None):
BasePen.__init__(self, glyphSet)
if path is None:
from AppKit import NSBezierPath
def __init__(self, glyphSet, path=None): path = NSBezierPath.bezierPath()
BasePen.__init__(self, glyphSet) self.path = path
if path is None:
from AppKit import NSBezierPath
path = NSBezierPath.bezierPath()
self.path = path
def _moveTo(self, p): def _moveTo(self, p):
self.path.moveToPoint_(p) self.path.moveToPoint_(p)
def _lineTo(self, p): def _lineTo(self, p):
self.path.lineToPoint_(p) self.path.lineToPoint_(p)
def _curveToOne(self, p1, p2, p3): def _curveToOne(self, p1, p2, p3):
self.path.curveToPoint_controlPoint1_controlPoint2_(p3, p1, p2) self.path.curveToPoint_controlPoint1_controlPoint2_(p3, p1, p2)
def _closePath(self): def _closePath(self):
self.path.closePath() self.path.closePath()

View File

@ -12,15 +12,17 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from fontTools.cu2qu import curve_to_quadratic import operator
from fontTools.pens.basePen import AbstractPen, decomposeSuperBezierSegment from fontTools.cu2qu import curve_to_quadratic, curves_to_quadratic
from fontTools.pens.basePen import decomposeSuperBezierSegment
from fontTools.pens.filterPen import FilterPen
from fontTools.pens.reverseContourPen import ReverseContourPen from fontTools.pens.reverseContourPen import ReverseContourPen
from fontTools.pens.pointPen import BasePointToSegmentPen from fontTools.pens.pointPen import BasePointToSegmentPen
from fontTools.pens.pointPen import ReverseContourPointPen from fontTools.pens.pointPen import ReverseContourPointPen
class Cu2QuPen(AbstractPen): class Cu2QuPen(FilterPen):
""" A filter pen to convert cubic bezier curves to quadratic b-splines """A filter pen to convert cubic bezier curves to quadratic b-splines
using the FontTools SegmentPen protocol. using the FontTools SegmentPen protocol.
Args: Args:
@ -31,114 +33,56 @@ class Cu2QuPen(AbstractPen):
value equal, or close to UPEM / 1000. value equal, or close to UPEM / 1000.
reverse_direction: flip the contours' direction but keep starting point. reverse_direction: flip the contours' direction but keep starting point.
stats: a dictionary counting the point numbers of quadratic segments. stats: a dictionary counting the point numbers of quadratic segments.
ignore_single_points: don't emit contours containing only a single point all_quadratic: if True (default), only quadratic b-splines are generated.
if False, quadratic curves or cubic curves are generated depending
NOTE: The "ignore_single_points" argument is deprecated since v1.3.0, on which one is more economical.
which dropped Robofab subpport. It's no longer needed to special-case
UFO2-style anchors (aka "named points") when using ufoLib >= 2.0,
as these are no longer drawn onto pens as single-point contours,
but are handled separately as anchors.
""" """
def __init__(self, other_pen, max_err, reverse_direction=False, def __init__(
stats=None, ignore_single_points=False): self,
other_pen,
max_err,
reverse_direction=False,
stats=None,
all_quadratic=True,
):
if reverse_direction: if reverse_direction:
self.pen = ReverseContourPen(other_pen) other_pen = ReverseContourPen(other_pen)
else: super().__init__(other_pen)
self.pen = other_pen
self.max_err = max_err self.max_err = max_err
self.stats = stats self.stats = stats
if ignore_single_points: self.all_quadratic = all_quadratic
import warnings
warnings.warn("ignore_single_points is deprecated and "
"will be removed in future versions",
UserWarning, stacklevel=2)
self.ignore_single_points = ignore_single_points
self.start_pt = None
self.current_pt = None
def _check_contour_is_open(self): def _convert_curve(self, pt1, pt2, pt3):
if self.current_pt is None:
raise AssertionError("moveTo is required")
def _check_contour_is_closed(self):
if self.current_pt is not None:
raise AssertionError("closePath or endPath is required")
def _add_moveTo(self):
if self.start_pt is not None:
self.pen.moveTo(self.start_pt)
self.start_pt = None
def moveTo(self, pt):
self._check_contour_is_closed()
self.start_pt = self.current_pt = pt
if not self.ignore_single_points:
self._add_moveTo()
def lineTo(self, pt):
self._check_contour_is_open()
self._add_moveTo()
self.pen.lineTo(pt)
self.current_pt = pt
def qCurveTo(self, *points):
self._check_contour_is_open()
n = len(points)
if n == 1:
self.lineTo(points[0])
elif n > 1:
self._add_moveTo()
self.pen.qCurveTo(*points)
self.current_pt = points[-1]
else:
raise AssertionError("illegal qcurve segment point count: %d" % n)
def _curve_to_quadratic(self, pt1, pt2, pt3):
curve = (self.current_pt, pt1, pt2, pt3) curve = (self.current_pt, pt1, pt2, pt3)
quadratic = curve_to_quadratic(curve, self.max_err) result = curve_to_quadratic(curve, self.max_err, self.all_quadratic)
if self.stats is not None: if self.stats is not None:
n = str(len(quadratic) - 2) n = str(len(result) - 2)
self.stats[n] = self.stats.get(n, 0) + 1 self.stats[n] = self.stats.get(n, 0) + 1
self.qCurveTo(*quadratic[1:]) if self.all_quadratic:
self.qCurveTo(*result[1:])
else:
if len(result) == 3:
self.qCurveTo(*result[1:])
else:
assert len(result) == 4
super().curveTo(*result[1:])
def curveTo(self, *points): def curveTo(self, *points):
self._check_contour_is_open()
n = len(points) n = len(points)
if n == 3: if n == 3:
# this is the most common case, so we special-case it # this is the most common case, so we special-case it
self._curve_to_quadratic(*points) self._convert_curve(*points)
elif n > 3: elif n > 3:
for segment in decomposeSuperBezierSegment(points): for segment in decomposeSuperBezierSegment(points):
self._curve_to_quadratic(*segment) self._convert_curve(*segment)
elif n == 2:
self.qCurveTo(*points)
elif n == 1:
self.lineTo(points[0])
else: else:
raise AssertionError("illegal curve segment point count: %d" % n) self.qCurveTo(*points)
def closePath(self):
self._check_contour_is_open()
if self.start_pt is None:
# if 'start_pt' is _not_ None, we are ignoring single-point paths
self.pen.closePath()
self.current_pt = self.start_pt = None
def endPath(self):
self._check_contour_is_open()
if self.start_pt is None:
self.pen.endPath()
self.current_pt = self.start_pt = None
def addComponent(self, glyphName, transformation):
self._check_contour_is_closed()
self.pen.addComponent(glyphName, transformation)
class Cu2QuPointPen(BasePointToSegmentPen): class Cu2QuPointPen(BasePointToSegmentPen):
""" A filter pen to convert cubic bezier curves to quadratic b-splines """A filter pen to convert cubic bezier curves to quadratic b-splines
using the RoboFab PointPen protocol. using the FontTools PointPen protocol.
Args: Args:
other_point_pen: another PointPen used to draw the transformed outline. other_point_pen: another PointPen used to draw the transformed outline.
@ -147,10 +91,26 @@ class Cu2QuPointPen(BasePointToSegmentPen):
value equal, or close to UPEM / 1000. value equal, or close to UPEM / 1000.
reverse_direction: reverse the winding direction of all contours. reverse_direction: reverse the winding direction of all contours.
stats: a dictionary counting the point numbers of quadratic segments. stats: a dictionary counting the point numbers of quadratic segments.
all_quadratic: if True (default), only quadratic b-splines are generated.
if False, quadratic curves or cubic curves are generated depending
on which one is more economical.
""" """
def __init__(self, other_point_pen, max_err, reverse_direction=False, __points_required = {
stats=None): "move": (1, operator.eq),
"line": (1, operator.eq),
"qcurve": (2, operator.ge),
"curve": (3, operator.eq),
}
def __init__(
self,
other_point_pen,
max_err,
reverse_direction=False,
stats=None,
all_quadratic=True,
):
BasePointToSegmentPen.__init__(self) BasePointToSegmentPen.__init__(self)
if reverse_direction: if reverse_direction:
self.pen = ReverseContourPointPen(other_point_pen) self.pen = ReverseContourPointPen(other_point_pen)
@ -158,6 +118,7 @@ class Cu2QuPointPen(BasePointToSegmentPen):
self.pen = other_point_pen self.pen = other_point_pen
self.max_err = max_err self.max_err = max_err
self.stats = stats self.stats = stats
self.all_quadratic = all_quadratic
def _flushContour(self, segments): def _flushContour(self, segments):
assert len(segments) >= 1 assert len(segments) >= 1
@ -166,18 +127,21 @@ class Cu2QuPointPen(BasePointToSegmentPen):
prev_points = segments[-1][1] prev_points = segments[-1][1]
prev_on_curve = prev_points[-1][0] prev_on_curve = prev_points[-1][0]
for segment_type, points in segments: for segment_type, points in segments:
if segment_type == 'curve': if segment_type == "curve":
for sub_points in self._split_super_bezier_segments(points): for sub_points in self._split_super_bezier_segments(points):
on_curve, smooth, name, kwargs = sub_points[-1] on_curve, smooth, name, kwargs = sub_points[-1]
bcp1, bcp2 = sub_points[0][0], sub_points[1][0] bcp1, bcp2 = sub_points[0][0], sub_points[1][0]
cubic = [prev_on_curve, bcp1, bcp2, on_curve] cubic = [prev_on_curve, bcp1, bcp2, on_curve]
quad = curve_to_quadratic(cubic, self.max_err) quad = curve_to_quadratic(cubic, self.max_err, self.all_quadratic)
if self.stats is not None: if self.stats is not None:
n = str(len(quad) - 2) n = str(len(quad) - 2)
self.stats[n] = self.stats.get(n, 0) + 1 self.stats[n] = self.stats.get(n, 0) + 1
new_points = [(pt, False, None, {}) for pt in quad[1:-1]] new_points = [(pt, False, None, {}) for pt in quad[1:-1]]
new_points.append((on_curve, smooth, name, kwargs)) new_points.append((on_curve, smooth, name, kwargs))
new_segments.append(["qcurve", new_points]) if self.all_quadratic or len(new_points) == 2:
new_segments.append(["qcurve", new_points])
else:
new_segments.append(["curve", new_points])
prev_on_curve = sub_points[-1][0] prev_on_curve = sub_points[-1][0]
else: else:
new_segments.append([segment_type, points]) new_segments.append([segment_type, points])
@ -200,8 +164,9 @@ class Cu2QuPointPen(BasePointToSegmentPen):
# a "super" bezier; decompose it # a "super" bezier; decompose it
on_curve, smooth, name, kwargs = points[-1] on_curve, smooth, name, kwargs = points[-1]
num_sub_segments = n - 1 num_sub_segments = n - 1
for i, sub_points in enumerate(decomposeSuperBezierSegment([ for i, sub_points in enumerate(
pt for pt, _, _, _ in points])): decomposeSuperBezierSegment([pt for pt, _, _, _ in points])
):
new_segment = [] new_segment = []
for point in sub_points[:-1]: for point in sub_points[:-1]:
new_segment.append((point, False, None, {})) new_segment.append((point, False, None, {}))
@ -213,34 +178,32 @@ class Cu2QuPointPen(BasePointToSegmentPen):
new_segment.append((sub_points[-1], True, None, {})) new_segment.append((sub_points[-1], True, None, {}))
sub_segments.append(new_segment) sub_segments.append(new_segment)
else: else:
raise AssertionError( raise AssertionError("expected 2 control points, found: %d" % n)
"expected 2 control points, found: %d" % n)
return sub_segments return sub_segments
def _drawPoints(self, segments): def _drawPoints(self, segments):
pen = self.pen pen = self.pen
pen.beginPath() pen.beginPath()
last_offcurves = [] last_offcurves = []
points_required = self.__points_required
for i, (segment_type, points) in enumerate(segments): for i, (segment_type, points) in enumerate(segments):
if segment_type in ("move", "line"): if segment_type in points_required:
assert len(points) == 1, ( n, op = points_required[segment_type]
"illegal line segment point count: %d" % len(points)) assert op(len(points), n), (
pt, smooth, name, kwargs = points[0] f"illegal {segment_type!r} segment point count: "
pen.addPoint(pt, segment_type, smooth, name, **kwargs) f"expected {n}, got {len(points)}"
elif segment_type == "qcurve": )
assert len(points) >= 2, (
"illegal qcurve segment point count: %d" % len(points))
offcurves = points[:-1] offcurves = points[:-1]
if offcurves: if i == 0:
if i == 0: # any off-curve points preceding the first on-curve
# any off-curve points preceding the first on-curve # will be appended at the end of the contour
# will be appended at the end of the contour last_offcurves = offcurves
last_offcurves = offcurves else:
else: for (pt, smooth, name, kwargs) in offcurves:
for (pt, smooth, name, kwargs) in offcurves: pen.addPoint(pt, None, smooth, name, **kwargs)
pen.addPoint(pt, None, smooth, name, **kwargs)
pt, smooth, name, kwargs = points[-1] pt, smooth, name, kwargs = points[-1]
if pt is None: if pt is None:
assert segment_type == "qcurve"
# special quadratic contour with no on-curve points: # special quadratic contour with no on-curve points:
# we need to skip the "None" point. See also the Pen # we need to skip the "None" point. See also the Pen
# protocol's qCurveTo() method and fontTools.pens.basePen # protocol's qCurveTo() method and fontTools.pens.basePen
@ -248,9 +211,7 @@ class Cu2QuPointPen(BasePointToSegmentPen):
else: else:
pen.addPoint(pt, segment_type, smooth, name, **kwargs) pen.addPoint(pt, segment_type, smooth, name, **kwargs)
else: else:
# 'curve' segments must have been converted to 'qcurve' by now raise AssertionError("unexpected segment type: %r" % segment_type)
raise AssertionError(
"unexpected segment type: %r" % segment_type)
for (pt, smooth, name, kwargs) in last_offcurves: for (pt, smooth, name, kwargs) in last_offcurves:
pen.addPoint(pt, None, smooth, name, **kwargs) pen.addPoint(pt, None, smooth, name, **kwargs)
pen.endPath() pen.endPath()
@ -258,3 +219,107 @@ class Cu2QuPointPen(BasePointToSegmentPen):
def addComponent(self, baseGlyphName, transformation): def addComponent(self, baseGlyphName, transformation):
assert self.currentPath is None assert self.currentPath is None
self.pen.addComponent(baseGlyphName, transformation) self.pen.addComponent(baseGlyphName, transformation)
class Cu2QuMultiPen:
"""A filter multi-pen to convert cubic bezier curves to quadratic b-splines
in a interpolation-compatible manner, using the FontTools SegmentPen protocol.
Args:
other_pens: list of SegmentPens used to draw the transformed outlines.
max_err: maximum approximation error in font units. For optimal results,
if you know the UPEM of the font, we recommend setting this to a
value equal, or close to UPEM / 1000.
reverse_direction: flip the contours' direction but keep starting point.
This pen does not follow the normal SegmentPen protocol. Instead, its
moveTo/lineTo/qCurveTo/curveTo methods take a list of tuples that are
arguments that would normally be passed to a SegmentPen, one item for
each of the pens in other_pens.
"""
# TODO Simplify like 3e8ebcdce592fe8a59ca4c3a294cc9724351e1ce
# Remove start_pts and _add_moveTO
def __init__(self, other_pens, max_err, reverse_direction=False):
if reverse_direction:
other_pens = [
ReverseContourPen(pen, outputImpliedClosingLine=True)
for pen in other_pens
]
self.pens = other_pens
self.max_err = max_err
self.start_pts = None
self.current_pts = None
def _check_contour_is_open(self):
if self.current_pts is None:
raise AssertionError("moveTo is required")
def _check_contour_is_closed(self):
if self.current_pts is not None:
raise AssertionError("closePath or endPath is required")
def _add_moveTo(self):
if self.start_pts is not None:
for pt, pen in zip(self.start_pts, self.pens):
pen.moveTo(*pt)
self.start_pts = None
def moveTo(self, pts):
self._check_contour_is_closed()
self.start_pts = self.current_pts = pts
self._add_moveTo()
def lineTo(self, pts):
self._check_contour_is_open()
self._add_moveTo()
for pt, pen in zip(pts, self.pens):
pen.lineTo(*pt)
self.current_pts = pts
def qCurveTo(self, pointsList):
self._check_contour_is_open()
if len(pointsList[0]) == 1:
self.lineTo([(points[0],) for points in pointsList])
return
self._add_moveTo()
current_pts = []
for points, pen in zip(pointsList, self.pens):
pen.qCurveTo(*points)
current_pts.append((points[-1],))
self.current_pts = current_pts
def _curves_to_quadratic(self, pointsList):
curves = []
for current_pt, points in zip(self.current_pts, pointsList):
curves.append(current_pt + points)
quadratics = curves_to_quadratic(curves, [self.max_err] * len(curves))
pointsList = []
for quadratic in quadratics:
pointsList.append(quadratic[1:])
self.qCurveTo(pointsList)
def curveTo(self, pointsList):
self._check_contour_is_open()
self._curves_to_quadratic(pointsList)
def closePath(self):
self._check_contour_is_open()
if self.start_pts is None:
for pen in self.pens:
pen.closePath()
self.current_pts = self.start_pts = None
def endPath(self):
self._check_contour_is_open()
if self.start_pts is None:
for pen in self.pens:
pen.endPath()
self.current_pts = self.start_pts = None
def addComponent(self, glyphName, transformations):
self._check_contour_is_closed()
for trans, pen in zip(transformations, self.pens):
pen.addComponent(glyphName, trans)

View File

@ -4,14 +4,13 @@ from fontTools.pens.recordingPen import RecordingPen
class _PassThruComponentsMixin(object): class _PassThruComponentsMixin(object):
def addComponent(self, glyphName, transformation, **kwargs): def addComponent(self, glyphName, transformation, **kwargs):
self._outPen.addComponent(glyphName, transformation, **kwargs) self._outPen.addComponent(glyphName, transformation, **kwargs)
class FilterPen(_PassThruComponentsMixin, AbstractPen): class FilterPen(_PassThruComponentsMixin, AbstractPen):
""" Base class for pens that apply some transformation to the coordinates """Base class for pens that apply some transformation to the coordinates
they receive and pass them to another pen. they receive and pass them to another pen.
You can override any of its methods. The default implementation does You can override any of its methods. The default implementation does
@ -57,24 +56,31 @@ class FilterPen(_PassThruComponentsMixin, AbstractPen):
def __init__(self, outPen): def __init__(self, outPen):
self._outPen = outPen self._outPen = outPen
self.current_pt = None
def moveTo(self, pt): def moveTo(self, pt):
self._outPen.moveTo(pt) self._outPen.moveTo(pt)
self.current_pt = pt
def lineTo(self, pt): def lineTo(self, pt):
self._outPen.lineTo(pt) self._outPen.lineTo(pt)
self.current_pt = pt
def curveTo(self, *points): def curveTo(self, *points):
self._outPen.curveTo(*points) self._outPen.curveTo(*points)
self.current_pt = points[-1]
def qCurveTo(self, *points): def qCurveTo(self, *points):
self._outPen.qCurveTo(*points) self._outPen.qCurveTo(*points)
self.current_pt = points[-1]
def closePath(self): def closePath(self):
self._outPen.closePath() self._outPen.closePath()
self.current_pt = None
def endPath(self): def endPath(self):
self._outPen.endPath() self._outPen.endPath()
self.current_pt = None
class ContourFilterPen(_PassThruComponentsMixin, RecordingPen): class ContourFilterPen(_PassThruComponentsMixin, RecordingPen):
@ -121,7 +127,7 @@ class ContourFilterPen(_PassThruComponentsMixin, RecordingPen):
class FilterPointPen(_PassThruComponentsMixin, AbstractPointPen): class FilterPointPen(_PassThruComponentsMixin, AbstractPointPen):
""" Baseclass for point pens that apply some transformation to the """Baseclass for point pens that apply some transformation to the
coordinates they receive and pass them to another point pen. coordinates they receive and pass them to another point pen.
You can override any of its methods. The default implementation does You can override any of its methods. The default implementation does

View File

@ -65,9 +65,7 @@ class HashPointPen(AbstractPointPen):
pt_type = segmentType[0] pt_type = segmentType[0]
self.data.append(f"{pt_type}{pt[0]:g}{pt[1]:+g}") self.data.append(f"{pt_type}{pt[0]:g}{pt[1]:+g}")
def addComponent( def addComponent(self, baseGlyphName, transformation, identifier=None, **kwargs):
self, baseGlyphName, transformation, identifier=None, **kwargs
):
tr = "".join([f"{t:+}" for t in transformation]) tr = "".join([f"{t:+}" for t in transformation])
self.data.append("[") self.data.append("[")
try: try:

File diff suppressed because it is too large Load Diff

View File

@ -2,7 +2,12 @@
"""Calculate the perimeter of a glyph.""" """Calculate the perimeter of a glyph."""
from fontTools.pens.basePen import BasePen from fontTools.pens.basePen import BasePen
from fontTools.misc.bezierTools import approximateQuadraticArcLengthC, calcQuadraticArcLengthC, approximateCubicArcLengthC, calcCubicArcLengthC from fontTools.misc.bezierTools import (
approximateQuadraticArcLengthC,
calcQuadraticArcLengthC,
approximateCubicArcLengthC,
calcCubicArcLengthC,
)
import math import math
@ -10,49 +15,55 @@ __all__ = ["PerimeterPen"]
def _distance(p0, p1): def _distance(p0, p1):
return math.hypot(p0[0] - p1[0], p0[1] - p1[1]) return math.hypot(p0[0] - p1[0], p0[1] - p1[1])
class PerimeterPen(BasePen): class PerimeterPen(BasePen):
def __init__(self, glyphset=None, tolerance=0.005):
BasePen.__init__(self, glyphset)
self.value = 0
self.tolerance = tolerance
def __init__(self, glyphset=None, tolerance=0.005): # Choose which algorithm to use for quadratic and for cubic.
BasePen.__init__(self, glyphset) # Quadrature is faster but has fixed error characteristic with no strong
self.value = 0 # error bound. The cutoff points are derived empirically.
self.tolerance = tolerance self._addCubic = (
self._addCubicQuadrature if tolerance >= 0.0015 else self._addCubicRecursive
)
self._addQuadratic = (
self._addQuadraticQuadrature
if tolerance >= 0.00075
else self._addQuadraticExact
)
# Choose which algorithm to use for quadratic and for cubic. def _moveTo(self, p0):
# Quadrature is faster but has fixed error characteristic with no strong self.__startPoint = p0
# error bound. The cutoff points are derived empirically.
self._addCubic = self._addCubicQuadrature if tolerance >= 0.0015 else self._addCubicRecursive
self._addQuadratic = self._addQuadraticQuadrature if tolerance >= 0.00075 else self._addQuadraticExact
def _moveTo(self, p0): def _closePath(self):
self.__startPoint = p0 p0 = self._getCurrentPoint()
if p0 != self.__startPoint:
self._lineTo(self.__startPoint)
def _closePath(self): def _lineTo(self, p1):
p0 = self._getCurrentPoint() p0 = self._getCurrentPoint()
if p0 != self.__startPoint: self.value += _distance(p0, p1)
self._lineTo(self.__startPoint)
def _lineTo(self, p1): def _addQuadraticExact(self, c0, c1, c2):
p0 = self._getCurrentPoint() self.value += calcQuadraticArcLengthC(c0, c1, c2)
self.value += _distance(p0, p1)
def _addQuadraticExact(self, c0, c1, c2): def _addQuadraticQuadrature(self, c0, c1, c2):
self.value += calcQuadraticArcLengthC(c0, c1, c2) self.value += approximateQuadraticArcLengthC(c0, c1, c2)
def _addQuadraticQuadrature(self, c0, c1, c2): def _qCurveToOne(self, p1, p2):
self.value += approximateQuadraticArcLengthC(c0, c1, c2) p0 = self._getCurrentPoint()
self._addQuadratic(complex(*p0), complex(*p1), complex(*p2))
def _qCurveToOne(self, p1, p2): def _addCubicRecursive(self, c0, c1, c2, c3):
p0 = self._getCurrentPoint() self.value += calcCubicArcLengthC(c0, c1, c2, c3, self.tolerance)
self._addQuadratic(complex(*p0), complex(*p1), complex(*p2))
def _addCubicRecursive(self, c0, c1, c2, c3): def _addCubicQuadrature(self, c0, c1, c2, c3):
self.value += calcCubicArcLengthC(c0, c1, c2, c3, self.tolerance) self.value += approximateCubicArcLengthC(c0, c1, c2, c3)
def _addCubicQuadrature(self, c0, c1, c2, c3): def _curveToOne(self, p1, p2, p3):
self.value += approximateCubicArcLengthC(c0, c1, c2, c3) p0 = self._getCurrentPoint()
self._addCubic(complex(*p0), complex(*p1), complex(*p2), complex(*p3))
def _curveToOne(self, p1, p2, p3):
p0 = self._getCurrentPoint()
self._addCubic(complex(*p0), complex(*p1), complex(*p2), complex(*p3))

View File

@ -11,180 +11,182 @@ __all__ = ["PointInsidePen"]
class PointInsidePen(BasePen): class PointInsidePen(BasePen):
"""This pen implements "point inside" testing: to test whether """This pen implements "point inside" testing: to test whether
a given point lies inside the shape (black) or outside (white). a given point lies inside the shape (black) or outside (white).
Instances of this class can be recycled, as long as the Instances of this class can be recycled, as long as the
setTestPoint() method is used to set the new point to test. setTestPoint() method is used to set the new point to test.
Typical usage: Typical usage:
pen = PointInsidePen(glyphSet, (100, 200)) pen = PointInsidePen(glyphSet, (100, 200))
outline.draw(pen) outline.draw(pen)
isInside = pen.getResult() isInside = pen.getResult()
Both the even-odd algorithm and the non-zero-winding-rule Both the even-odd algorithm and the non-zero-winding-rule
algorithm are implemented. The latter is the default, specify algorithm are implemented. The latter is the default, specify
True for the evenOdd argument of __init__ or setTestPoint True for the evenOdd argument of __init__ or setTestPoint
to use the even-odd algorithm. to use the even-odd algorithm.
""" """
# This class implements the classical "shoot a ray from the test point # This class implements the classical "shoot a ray from the test point
# to infinity and count how many times it intersects the outline" (as well # to infinity and count how many times it intersects the outline" (as well
# as the non-zero variant, where the counter is incremented if the outline # as the non-zero variant, where the counter is incremented if the outline
# intersects the ray in one direction and decremented if it intersects in # intersects the ray in one direction and decremented if it intersects in
# the other direction). # the other direction).
# I found an amazingly clear explanation of the subtleties involved in # I found an amazingly clear explanation of the subtleties involved in
# implementing this correctly for polygons here: # implementing this correctly for polygons here:
# http://graphics.cs.ucdavis.edu/~okreylos/TAship/Spring2000/PointInPolygon.html # http://graphics.cs.ucdavis.edu/~okreylos/TAship/Spring2000/PointInPolygon.html
# I extended the principles outlined on that page to curves. # I extended the principles outlined on that page to curves.
def __init__(self, glyphSet, testPoint, evenOdd=False): def __init__(self, glyphSet, testPoint, evenOdd=False):
BasePen.__init__(self, glyphSet) BasePen.__init__(self, glyphSet)
self.setTestPoint(testPoint, evenOdd) self.setTestPoint(testPoint, evenOdd)
def setTestPoint(self, testPoint, evenOdd=False): def setTestPoint(self, testPoint, evenOdd=False):
"""Set the point to test. Call this _before_ the outline gets drawn.""" """Set the point to test. Call this _before_ the outline gets drawn."""
self.testPoint = testPoint self.testPoint = testPoint
self.evenOdd = evenOdd self.evenOdd = evenOdd
self.firstPoint = None self.firstPoint = None
self.intersectionCount = 0 self.intersectionCount = 0
def getWinding(self): def getWinding(self):
if self.firstPoint is not None: if self.firstPoint is not None:
# always make sure the sub paths are closed; the algorithm only works # always make sure the sub paths are closed; the algorithm only works
# for closed paths. # for closed paths.
self.closePath() self.closePath()
return self.intersectionCount return self.intersectionCount
def getResult(self): def getResult(self):
"""After the shape has been drawn, getResult() returns True if the test """After the shape has been drawn, getResult() returns True if the test
point lies within the (black) shape, and False if it doesn't. point lies within the (black) shape, and False if it doesn't.
""" """
winding = self.getWinding() winding = self.getWinding()
if self.evenOdd: if self.evenOdd:
result = winding % 2 result = winding % 2
else: # non-zero else: # non-zero
result = self.intersectionCount != 0 result = self.intersectionCount != 0
return not not result return not not result
def _addIntersection(self, goingUp): def _addIntersection(self, goingUp):
if self.evenOdd or goingUp: if self.evenOdd or goingUp:
self.intersectionCount += 1 self.intersectionCount += 1
else: else:
self.intersectionCount -= 1 self.intersectionCount -= 1
def _moveTo(self, point): def _moveTo(self, point):
if self.firstPoint is not None: if self.firstPoint is not None:
# always make sure the sub paths are closed; the algorithm only works # always make sure the sub paths are closed; the algorithm only works
# for closed paths. # for closed paths.
self.closePath() self.closePath()
self.firstPoint = point self.firstPoint = point
def _lineTo(self, point): def _lineTo(self, point):
x, y = self.testPoint x, y = self.testPoint
x1, y1 = self._getCurrentPoint() x1, y1 = self._getCurrentPoint()
x2, y2 = point x2, y2 = point
if x1 < x and x2 < x: if x1 < x and x2 < x:
return return
if y1 < y and y2 < y: if y1 < y and y2 < y:
return return
if y1 >= y and y2 >= y: if y1 >= y and y2 >= y:
return return
dx = x2 - x1 dx = x2 - x1
dy = y2 - y1 dy = y2 - y1
t = (y - y1) / dy t = (y - y1) / dy
ix = dx * t + x1 ix = dx * t + x1
if ix < x: if ix < x:
return return
self._addIntersection(y2 > y1) self._addIntersection(y2 > y1)
def _curveToOne(self, bcp1, bcp2, point): def _curveToOne(self, bcp1, bcp2, point):
x, y = self.testPoint x, y = self.testPoint
x1, y1 = self._getCurrentPoint() x1, y1 = self._getCurrentPoint()
x2, y2 = bcp1 x2, y2 = bcp1
x3, y3 = bcp2 x3, y3 = bcp2
x4, y4 = point x4, y4 = point
if x1 < x and x2 < x and x3 < x and x4 < x: if x1 < x and x2 < x and x3 < x and x4 < x:
return return
if y1 < y and y2 < y and y3 < y and y4 < y: if y1 < y and y2 < y and y3 < y and y4 < y:
return return
if y1 >= y and y2 >= y and y3 >= y and y4 >= y: if y1 >= y and y2 >= y and y3 >= y and y4 >= y:
return return
dy = y1 dy = y1
cy = (y2 - dy) * 3.0 cy = (y2 - dy) * 3.0
by = (y3 - y2) * 3.0 - cy by = (y3 - y2) * 3.0 - cy
ay = y4 - dy - cy - by ay = y4 - dy - cy - by
solutions = sorted(solveCubic(ay, by, cy, dy - y)) solutions = sorted(solveCubic(ay, by, cy, dy - y))
solutions = [t for t in solutions if -0. <= t <= 1.] solutions = [t for t in solutions if -0.0 <= t <= 1.0]
if not solutions: if not solutions:
return return
dx = x1 dx = x1
cx = (x2 - dx) * 3.0 cx = (x2 - dx) * 3.0
bx = (x3 - x2) * 3.0 - cx bx = (x3 - x2) * 3.0 - cx
ax = x4 - dx - cx - bx ax = x4 - dx - cx - bx
above = y1 >= y above = y1 >= y
lastT = None lastT = None
for t in solutions: for t in solutions:
if t == lastT: if t == lastT:
continue continue
lastT = t lastT = t
t2 = t * t t2 = t * t
t3 = t2 * t t3 = t2 * t
direction = 3*ay*t2 + 2*by*t + cy direction = 3 * ay * t2 + 2 * by * t + cy
incomingGoingUp = outgoingGoingUp = direction > 0.0 incomingGoingUp = outgoingGoingUp = direction > 0.0
if direction == 0.0: if direction == 0.0:
direction = 6*ay*t + 2*by direction = 6 * ay * t + 2 * by
outgoingGoingUp = direction > 0.0 outgoingGoingUp = direction > 0.0
incomingGoingUp = not outgoingGoingUp incomingGoingUp = not outgoingGoingUp
if direction == 0.0: if direction == 0.0:
direction = ay direction = ay
incomingGoingUp = outgoingGoingUp = direction > 0.0 incomingGoingUp = outgoingGoingUp = direction > 0.0
xt = ax*t3 + bx*t2 + cx*t + dx xt = ax * t3 + bx * t2 + cx * t + dx
if xt < x: if xt < x:
continue continue
if t in (0.0, -0.0): if t in (0.0, -0.0):
if not outgoingGoingUp: if not outgoingGoingUp:
self._addIntersection(outgoingGoingUp) self._addIntersection(outgoingGoingUp)
elif t == 1.0: elif t == 1.0:
if incomingGoingUp: if incomingGoingUp:
self._addIntersection(incomingGoingUp) self._addIntersection(incomingGoingUp)
else: else:
if incomingGoingUp == outgoingGoingUp: if incomingGoingUp == outgoingGoingUp:
self._addIntersection(outgoingGoingUp) self._addIntersection(outgoingGoingUp)
#else: # else:
# we're not really intersecting, merely touching # we're not really intersecting, merely touching
def _qCurveToOne_unfinished(self, bcp, point): def _qCurveToOne_unfinished(self, bcp, point):
# XXX need to finish this, for now doing it through a cubic # XXX need to finish this, for now doing it through a cubic
# (BasePen implements _qCurveTo in terms of a cubic) will # (BasePen implements _qCurveTo in terms of a cubic) will
# have to do. # have to do.
x, y = self.testPoint x, y = self.testPoint
x1, y1 = self._getCurrentPoint() x1, y1 = self._getCurrentPoint()
x2, y2 = bcp x2, y2 = bcp
x3, y3 = point x3, y3 = point
c = y1 c = y1
b = (y2 - c) * 2.0 b = (y2 - c) * 2.0
a = y3 - c - b a = y3 - c - b
solutions = sorted(solveQuadratic(a, b, c - y)) solutions = sorted(solveQuadratic(a, b, c - y))
solutions = [t for t in solutions if ZERO_MINUS_EPSILON <= t <= ONE_PLUS_EPSILON] solutions = [
if not solutions: t for t in solutions if ZERO_MINUS_EPSILON <= t <= ONE_PLUS_EPSILON
return ]
# XXX if not solutions:
return
# XXX
def _closePath(self): def _closePath(self):
if self._getCurrentPoint() != self.firstPoint: if self._getCurrentPoint() != self.firstPoint:
self.lineTo(self.firstPoint) self.lineTo(self.firstPoint)
self.firstPoint = None self.firstPoint = None
def _endPath(self): def _endPath(self):
"""Insideness is not defined for open contours.""" """Insideness is not defined for open contours."""
raise NotImplementedError raise NotImplementedError

View File

@ -13,481 +13,513 @@ For instance, whether or not a point is smooth, and its name.
""" """
import math import math
from typing import Any, Optional, Tuple from typing import Any, Optional, Tuple, Dict
from fontTools.pens.basePen import AbstractPen, PenError from fontTools.pens.basePen import AbstractPen, PenError
from fontTools.misc.transform import DecomposedTransform
__all__ = [ __all__ = [
"AbstractPointPen", "AbstractPointPen",
"BasePointToSegmentPen", "BasePointToSegmentPen",
"PointToSegmentPen", "PointToSegmentPen",
"SegmentToPointPen", "SegmentToPointPen",
"GuessSmoothPointPen", "GuessSmoothPointPen",
"ReverseContourPointPen", "ReverseContourPointPen",
] ]
class AbstractPointPen: class AbstractPointPen:
"""Baseclass for all PointPens.""" """Baseclass for all PointPens."""
def beginPath(self, identifier: Optional[str] = None, **kwargs: Any) -> None: def beginPath(self, identifier: Optional[str] = None, **kwargs: Any) -> None:
"""Start a new sub path.""" """Start a new sub path."""
raise NotImplementedError raise NotImplementedError
def endPath(self) -> None: def endPath(self) -> None:
"""End the current sub path.""" """End the current sub path."""
raise NotImplementedError raise NotImplementedError
def addPoint( def addPoint(
self, self,
pt: Tuple[float, float], pt: Tuple[float, float],
segmentType: Optional[str] = None, segmentType: Optional[str] = None,
smooth: bool = False, smooth: bool = False,
name: Optional[str] = None, name: Optional[str] = None,
identifier: Optional[str] = None, identifier: Optional[str] = None,
**kwargs: Any **kwargs: Any,
) -> None: ) -> None:
"""Add a point to the current sub path.""" """Add a point to the current sub path."""
raise NotImplementedError raise NotImplementedError
def addComponent( def addComponent(
self, self,
baseGlyphName: str, baseGlyphName: str,
transformation: Tuple[float, float, float, float, float, float], transformation: Tuple[float, float, float, float, float, float],
identifier: Optional[str] = None, identifier: Optional[str] = None,
**kwargs: Any **kwargs: Any,
) -> None: ) -> None:
"""Add a sub glyph.""" """Add a sub glyph."""
raise NotImplementedError raise NotImplementedError
def addVarComponent(
self,
glyphName: str,
transformation: DecomposedTransform,
location: Dict[str, float],
identifier: Optional[str] = None,
**kwargs: Any,
) -> None:
"""Add a VarComponent sub glyph. The 'transformation' argument
must be a DecomposedTransform from the fontTools.misc.transform module,
and the 'location' argument must be a dictionary mapping axis tags
to their locations.
"""
# ttGlyphSet decomposes for us
raise AttributeError
class BasePointToSegmentPen(AbstractPointPen): class BasePointToSegmentPen(AbstractPointPen):
""" """
Base class for retrieving the outline in a segment-oriented Base class for retrieving the outline in a segment-oriented
way. The PointPen protocol is simple yet also a little tricky, way. The PointPen protocol is simple yet also a little tricky,
so when you need an outline presented as segments but you have so when you need an outline presented as segments but you have
as points, do use this base implementation as it properly takes as points, do use this base implementation as it properly takes
care of all the edge cases. care of all the edge cases.
""" """
def __init__(self): def __init__(self):
self.currentPath = None self.currentPath = None
def beginPath(self, identifier=None, **kwargs): def beginPath(self, identifier=None, **kwargs):
if self.currentPath is not None: if self.currentPath is not None:
raise PenError("Path already begun.") raise PenError("Path already begun.")
self.currentPath = [] self.currentPath = []
def _flushContour(self, segments): def _flushContour(self, segments):
"""Override this method. """Override this method.
It will be called for each non-empty sub path with a list It will be called for each non-empty sub path with a list
of segments: the 'segments' argument. of segments: the 'segments' argument.
The segments list contains tuples of length 2: The segments list contains tuples of length 2:
(segmentType, points) (segmentType, points)
segmentType is one of "move", "line", "curve" or "qcurve". segmentType is one of "move", "line", "curve" or "qcurve".
"move" may only occur as the first segment, and it signifies "move" may only occur as the first segment, and it signifies
an OPEN path. A CLOSED path does NOT start with a "move", in an OPEN path. A CLOSED path does NOT start with a "move", in
fact it will not contain a "move" at ALL. fact it will not contain a "move" at ALL.
The 'points' field in the 2-tuple is a list of point info The 'points' field in the 2-tuple is a list of point info
tuples. The list has 1 or more items, a point tuple has tuples. The list has 1 or more items, a point tuple has
four items: four items:
(point, smooth, name, kwargs) (point, smooth, name, kwargs)
'point' is an (x, y) coordinate pair. 'point' is an (x, y) coordinate pair.
For a closed path, the initial moveTo point is defined as For a closed path, the initial moveTo point is defined as
the last point of the last segment. the last point of the last segment.
The 'points' list of "move" and "line" segments always contains The 'points' list of "move" and "line" segments always contains
exactly one point tuple. exactly one point tuple.
""" """
raise NotImplementedError raise NotImplementedError
def endPath(self): def endPath(self):
if self.currentPath is None: if self.currentPath is None:
raise PenError("Path not begun.") raise PenError("Path not begun.")
points = self.currentPath points = self.currentPath
self.currentPath = None self.currentPath = None
if not points: if not points:
return return
if len(points) == 1: if len(points) == 1:
# Not much more we can do than output a single move segment. # Not much more we can do than output a single move segment.
pt, segmentType, smooth, name, kwargs = points[0] pt, segmentType, smooth, name, kwargs = points[0]
segments = [("move", [(pt, smooth, name, kwargs)])] segments = [("move", [(pt, smooth, name, kwargs)])]
self._flushContour(segments) self._flushContour(segments)
return return
segments = [] segments = []
if points[0][1] == "move": if points[0][1] == "move":
# It's an open contour, insert a "move" segment for the first # It's an open contour, insert a "move" segment for the first
# point and remove that first point from the point list. # point and remove that first point from the point list.
pt, segmentType, smooth, name, kwargs = points[0] pt, segmentType, smooth, name, kwargs = points[0]
segments.append(("move", [(pt, smooth, name, kwargs)])) segments.append(("move", [(pt, smooth, name, kwargs)]))
points.pop(0) points.pop(0)
else: else:
# It's a closed contour. Locate the first on-curve point, and # It's a closed contour. Locate the first on-curve point, and
# rotate the point list so that it _ends_ with an on-curve # rotate the point list so that it _ends_ with an on-curve
# point. # point.
firstOnCurve = None firstOnCurve = None
for i in range(len(points)): for i in range(len(points)):
segmentType = points[i][1] segmentType = points[i][1]
if segmentType is not None: if segmentType is not None:
firstOnCurve = i firstOnCurve = i
break break
if firstOnCurve is None: if firstOnCurve is None:
# Special case for quadratics: a contour with no on-curve # Special case for quadratics: a contour with no on-curve
# points. Add a "None" point. (See also the Pen protocol's # points. Add a "None" point. (See also the Pen protocol's
# qCurveTo() method and fontTools.pens.basePen.py.) # qCurveTo() method and fontTools.pens.basePen.py.)
points.append((None, "qcurve", None, None, None)) points.append((None, "qcurve", None, None, None))
else: else:
points = points[firstOnCurve+1:] + points[:firstOnCurve+1] points = points[firstOnCurve + 1 :] + points[: firstOnCurve + 1]
currentSegment = [] currentSegment = []
for pt, segmentType, smooth, name, kwargs in points: for pt, segmentType, smooth, name, kwargs in points:
currentSegment.append((pt, smooth, name, kwargs)) currentSegment.append((pt, smooth, name, kwargs))
if segmentType is None: if segmentType is None:
continue continue
segments.append((segmentType, currentSegment)) segments.append((segmentType, currentSegment))
currentSegment = [] currentSegment = []
self._flushContour(segments) self._flushContour(segments)
def addPoint(self, pt, segmentType=None, smooth=False, name=None, def addPoint(
identifier=None, **kwargs): self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs
if self.currentPath is None: ):
raise PenError("Path not begun") if self.currentPath is None:
self.currentPath.append((pt, segmentType, smooth, name, kwargs)) raise PenError("Path not begun")
self.currentPath.append((pt, segmentType, smooth, name, kwargs))
class PointToSegmentPen(BasePointToSegmentPen): class PointToSegmentPen(BasePointToSegmentPen):
""" """
Adapter class that converts the PointPen protocol to the Adapter class that converts the PointPen protocol to the
(Segment)Pen protocol. (Segment)Pen protocol.
NOTE: The segment pen does not support and will drop point names, identifiers NOTE: The segment pen does not support and will drop point names, identifiers
and kwargs. and kwargs.
""" """
def __init__(self, segmentPen, outputImpliedClosingLine=False): def __init__(self, segmentPen, outputImpliedClosingLine=False):
BasePointToSegmentPen.__init__(self) BasePointToSegmentPen.__init__(self)
self.pen = segmentPen self.pen = segmentPen
self.outputImpliedClosingLine = outputImpliedClosingLine self.outputImpliedClosingLine = outputImpliedClosingLine
def _flushContour(self, segments): def _flushContour(self, segments):
if not segments: if not segments:
raise PenError("Must have at least one segment.") raise PenError("Must have at least one segment.")
pen = self.pen pen = self.pen
if segments[0][0] == "move": if segments[0][0] == "move":
# It's an open path. # It's an open path.
closed = False closed = False
points = segments[0][1] points = segments[0][1]
if len(points) != 1: if len(points) != 1:
raise PenError(f"Illegal move segment point count: {len(points)}") raise PenError(f"Illegal move segment point count: {len(points)}")
movePt, _, _ , _ = points[0] movePt, _, _, _ = points[0]
del segments[0] del segments[0]
else: else:
# It's a closed path, do a moveTo to the last # It's a closed path, do a moveTo to the last
# point of the last segment. # point of the last segment.
closed = True closed = True
segmentType, points = segments[-1] segmentType, points = segments[-1]
movePt, _, _ , _ = points[-1] movePt, _, _, _ = points[-1]
if movePt is None: if movePt is None:
# quad special case: a contour with no on-curve points contains # quad special case: a contour with no on-curve points contains
# one "qcurve" segment that ends with a point that's None. We # one "qcurve" segment that ends with a point that's None. We
# must not output a moveTo() in that case. # must not output a moveTo() in that case.
pass pass
else: else:
pen.moveTo(movePt) pen.moveTo(movePt)
outputImpliedClosingLine = self.outputImpliedClosingLine outputImpliedClosingLine = self.outputImpliedClosingLine
nSegments = len(segments) nSegments = len(segments)
lastPt = movePt lastPt = movePt
for i in range(nSegments): for i in range(nSegments):
segmentType, points = segments[i] segmentType, points = segments[i]
points = [pt for pt, _, _ , _ in points] points = [pt for pt, _, _, _ in points]
if segmentType == "line": if segmentType == "line":
if len(points) != 1: if len(points) != 1:
raise PenError(f"Illegal line segment point count: {len(points)}") raise PenError(f"Illegal line segment point count: {len(points)}")
pt = points[0] pt = points[0]
# For closed contours, a 'lineTo' is always implied from the last oncurve # For closed contours, a 'lineTo' is always implied from the last oncurve
# point to the starting point, thus we can omit it when the last and # point to the starting point, thus we can omit it when the last and
# starting point don't overlap. # starting point don't overlap.
# However, when the last oncurve point is a "line" segment and has same # However, when the last oncurve point is a "line" segment and has same
# coordinates as the starting point of a closed contour, we need to output # coordinates as the starting point of a closed contour, we need to output
# the closing 'lineTo' explicitly (regardless of the value of the # the closing 'lineTo' explicitly (regardless of the value of the
# 'outputImpliedClosingLine' option) in order to disambiguate this case from # 'outputImpliedClosingLine' option) in order to disambiguate this case from
# the implied closing 'lineTo', otherwise the duplicate point would be lost. # the implied closing 'lineTo', otherwise the duplicate point would be lost.
# See https://github.com/googlefonts/fontmake/issues/572. # See https://github.com/googlefonts/fontmake/issues/572.
if ( if (
i + 1 != nSegments i + 1 != nSegments
or outputImpliedClosingLine or outputImpliedClosingLine
or not closed or not closed
or pt == lastPt or pt == lastPt
): ):
pen.lineTo(pt) pen.lineTo(pt)
lastPt = pt lastPt = pt
elif segmentType == "curve": elif segmentType == "curve":
pen.curveTo(*points) pen.curveTo(*points)
lastPt = points[-1] lastPt = points[-1]
elif segmentType == "qcurve": elif segmentType == "qcurve":
pen.qCurveTo(*points) pen.qCurveTo(*points)
lastPt = points[-1] lastPt = points[-1]
else: else:
raise PenError(f"Illegal segmentType: {segmentType}") raise PenError(f"Illegal segmentType: {segmentType}")
if closed: if closed:
pen.closePath() pen.closePath()
else: else:
pen.endPath() pen.endPath()
def addComponent(self, glyphName, transform, identifier=None, **kwargs): def addComponent(self, glyphName, transform, identifier=None, **kwargs):
del identifier # unused del identifier # unused
del kwargs # unused del kwargs # unused
self.pen.addComponent(glyphName, transform) self.pen.addComponent(glyphName, transform)
class SegmentToPointPen(AbstractPen): class SegmentToPointPen(AbstractPen):
""" """
Adapter class that converts the (Segment)Pen protocol to the Adapter class that converts the (Segment)Pen protocol to the
PointPen protocol. PointPen protocol.
""" """
def __init__(self, pointPen, guessSmooth=True): def __init__(self, pointPen, guessSmooth=True):
if guessSmooth: if guessSmooth:
self.pen = GuessSmoothPointPen(pointPen) self.pen = GuessSmoothPointPen(pointPen)
else: else:
self.pen = pointPen self.pen = pointPen
self.contour = None self.contour = None
def _flushContour(self): def _flushContour(self):
pen = self.pen pen = self.pen
pen.beginPath() pen.beginPath()
for pt, segmentType in self.contour: for pt, segmentType in self.contour:
pen.addPoint(pt, segmentType=segmentType) pen.addPoint(pt, segmentType=segmentType)
pen.endPath() pen.endPath()
def moveTo(self, pt): def moveTo(self, pt):
self.contour = [] self.contour = []
self.contour.append((pt, "move")) self.contour.append((pt, "move"))
def lineTo(self, pt): def lineTo(self, pt):
if self.contour is None: if self.contour is None:
raise PenError("Contour missing required initial moveTo") raise PenError("Contour missing required initial moveTo")
self.contour.append((pt, "line")) self.contour.append((pt, "line"))
def curveTo(self, *pts): def curveTo(self, *pts):
if not pts: if not pts:
raise TypeError("Must pass in at least one point") raise TypeError("Must pass in at least one point")
if self.contour is None: if self.contour is None:
raise PenError("Contour missing required initial moveTo") raise PenError("Contour missing required initial moveTo")
for pt in pts[:-1]: for pt in pts[:-1]:
self.contour.append((pt, None)) self.contour.append((pt, None))
self.contour.append((pts[-1], "curve")) self.contour.append((pts[-1], "curve"))
def qCurveTo(self, *pts): def qCurveTo(self, *pts):
if not pts: if not pts:
raise TypeError("Must pass in at least one point") raise TypeError("Must pass in at least one point")
if pts[-1] is None: if pts[-1] is None:
self.contour = [] self.contour = []
else: else:
if self.contour is None: if self.contour is None:
raise PenError("Contour missing required initial moveTo") raise PenError("Contour missing required initial moveTo")
for pt in pts[:-1]: for pt in pts[:-1]:
self.contour.append((pt, None)) self.contour.append((pt, None))
if pts[-1] is not None: if pts[-1] is not None:
self.contour.append((pts[-1], "qcurve")) self.contour.append((pts[-1], "qcurve"))
def closePath(self): def closePath(self):
if self.contour is None: if self.contour is None:
raise PenError("Contour missing required initial moveTo") raise PenError("Contour missing required initial moveTo")
if len(self.contour) > 1 and self.contour[0][0] == self.contour[-1][0]: if len(self.contour) > 1 and self.contour[0][0] == self.contour[-1][0]:
self.contour[0] = self.contour[-1] self.contour[0] = self.contour[-1]
del self.contour[-1] del self.contour[-1]
else: else:
# There's an implied line at the end, replace "move" with "line" # There's an implied line at the end, replace "move" with "line"
# for the first point # for the first point
pt, tp = self.contour[0] pt, tp = self.contour[0]
if tp == "move": if tp == "move":
self.contour[0] = pt, "line" self.contour[0] = pt, "line"
self._flushContour() self._flushContour()
self.contour = None self.contour = None
def endPath(self): def endPath(self):
if self.contour is None: if self.contour is None:
raise PenError("Contour missing required initial moveTo") raise PenError("Contour missing required initial moveTo")
self._flushContour() self._flushContour()
self.contour = None self.contour = None
def addComponent(self, glyphName, transform): def addComponent(self, glyphName, transform):
if self.contour is not None: if self.contour is not None:
raise PenError("Components must be added before or after contours") raise PenError("Components must be added before or after contours")
self.pen.addComponent(glyphName, transform) self.pen.addComponent(glyphName, transform)
class GuessSmoothPointPen(AbstractPointPen): class GuessSmoothPointPen(AbstractPointPen):
""" """
Filtering PointPen that tries to determine whether an on-curve point Filtering PointPen that tries to determine whether an on-curve point
should be "smooth", ie. that it's a "tangent" point or a "curve" point. should be "smooth", ie. that it's a "tangent" point or a "curve" point.
""" """
def __init__(self, outPen, error=0.05): def __init__(self, outPen, error=0.05):
self._outPen = outPen self._outPen = outPen
self._error = error self._error = error
self._points = None self._points = None
def _flushContour(self): def _flushContour(self):
if self._points is None: if self._points is None:
raise PenError("Path not begun") raise PenError("Path not begun")
points = self._points points = self._points
nPoints = len(points) nPoints = len(points)
if not nPoints: if not nPoints:
return return
if points[0][1] == "move": if points[0][1] == "move":
# Open path. # Open path.
indices = range(1, nPoints - 1) indices = range(1, nPoints - 1)
elif nPoints > 1: elif nPoints > 1:
# Closed path. To avoid having to mod the contour index, we # Closed path. To avoid having to mod the contour index, we
# simply abuse Python's negative index feature, and start at -1 # simply abuse Python's negative index feature, and start at -1
indices = range(-1, nPoints - 1) indices = range(-1, nPoints - 1)
else: else:
# closed path containing 1 point (!), ignore. # closed path containing 1 point (!), ignore.
indices = [] indices = []
for i in indices: for i in indices:
pt, segmentType, _, name, kwargs = points[i] pt, segmentType, _, name, kwargs = points[i]
if segmentType is None: if segmentType is None:
continue continue
prev = i - 1 prev = i - 1
next = i + 1 next = i + 1
if points[prev][1] is not None and points[next][1] is not None: if points[prev][1] is not None and points[next][1] is not None:
continue continue
# At least one of our neighbors is an off-curve point # At least one of our neighbors is an off-curve point
pt = points[i][0] pt = points[i][0]
prevPt = points[prev][0] prevPt = points[prev][0]
nextPt = points[next][0] nextPt = points[next][0]
if pt != prevPt and pt != nextPt: if pt != prevPt and pt != nextPt:
dx1, dy1 = pt[0] - prevPt[0], pt[1] - prevPt[1] dx1, dy1 = pt[0] - prevPt[0], pt[1] - prevPt[1]
dx2, dy2 = nextPt[0] - pt[0], nextPt[1] - pt[1] dx2, dy2 = nextPt[0] - pt[0], nextPt[1] - pt[1]
a1 = math.atan2(dy1, dx1) a1 = math.atan2(dy1, dx1)
a2 = math.atan2(dy2, dx2) a2 = math.atan2(dy2, dx2)
if abs(a1 - a2) < self._error: if abs(a1 - a2) < self._error:
points[i] = pt, segmentType, True, name, kwargs points[i] = pt, segmentType, True, name, kwargs
for pt, segmentType, smooth, name, kwargs in points: for pt, segmentType, smooth, name, kwargs in points:
self._outPen.addPoint(pt, segmentType, smooth, name, **kwargs) self._outPen.addPoint(pt, segmentType, smooth, name, **kwargs)
def beginPath(self, identifier=None, **kwargs): def beginPath(self, identifier=None, **kwargs):
if self._points is not None: if self._points is not None:
raise PenError("Path already begun") raise PenError("Path already begun")
self._points = [] self._points = []
if identifier is not None: if identifier is not None:
kwargs["identifier"] = identifier kwargs["identifier"] = identifier
self._outPen.beginPath(**kwargs) self._outPen.beginPath(**kwargs)
def endPath(self): def endPath(self):
self._flushContour() self._flushContour()
self._outPen.endPath() self._outPen.endPath()
self._points = None self._points = None
def addPoint(self, pt, segmentType=None, smooth=False, name=None, def addPoint(
identifier=None, **kwargs): self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs
if self._points is None: ):
raise PenError("Path not begun") if self._points is None:
if identifier is not None: raise PenError("Path not begun")
kwargs["identifier"] = identifier if identifier is not None:
self._points.append((pt, segmentType, False, name, kwargs)) kwargs["identifier"] = identifier
self._points.append((pt, segmentType, False, name, kwargs))
def addComponent(self, glyphName, transformation, identifier=None, **kwargs): def addComponent(self, glyphName, transformation, identifier=None, **kwargs):
if self._points is not None: if self._points is not None:
raise PenError("Components must be added before or after contours") raise PenError("Components must be added before or after contours")
if identifier is not None: if identifier is not None:
kwargs["identifier"] = identifier kwargs["identifier"] = identifier
self._outPen.addComponent(glyphName, transformation, **kwargs) self._outPen.addComponent(glyphName, transformation, **kwargs)
def addVarComponent(
self, glyphName, transformation, location, identifier=None, **kwargs
):
if self._points is not None:
raise PenError("VarComponents must be added before or after contours")
if identifier is not None:
kwargs["identifier"] = identifier
self._outPen.addVarComponent(glyphName, transformation, location, **kwargs)
class ReverseContourPointPen(AbstractPointPen): class ReverseContourPointPen(AbstractPointPen):
""" """
This is a PointPen that passes outline data to another PointPen, but This is a PointPen that passes outline data to another PointPen, but
reversing the winding direction of all contours. Components are simply reversing the winding direction of all contours. Components are simply
passed through unchanged. passed through unchanged.
Closed contours are reversed in such a way that the first point remains Closed contours are reversed in such a way that the first point remains
the first point. the first point.
""" """
def __init__(self, outputPointPen): def __init__(self, outputPointPen):
self.pen = outputPointPen self.pen = outputPointPen
# a place to store the points for the current sub path # a place to store the points for the current sub path
self.currentContour = None self.currentContour = None
def _flushContour(self): def _flushContour(self):
pen = self.pen pen = self.pen
contour = self.currentContour contour = self.currentContour
if not contour: if not contour:
pen.beginPath(identifier=self.currentContourIdentifier) pen.beginPath(identifier=self.currentContourIdentifier)
pen.endPath() pen.endPath()
return return
closed = contour[0][1] != "move" closed = contour[0][1] != "move"
if not closed: if not closed:
lastSegmentType = "move" lastSegmentType = "move"
else: else:
# Remove the first point and insert it at the end. When # Remove the first point and insert it at the end. When
# the list of points gets reversed, this point will then # the list of points gets reversed, this point will then
# again be at the start. In other words, the following # again be at the start. In other words, the following
# will hold: # will hold:
# for N in range(len(originalContour)): # for N in range(len(originalContour)):
# originalContour[N] == reversedContour[-N] # originalContour[N] == reversedContour[-N]
contour.append(contour.pop(0)) contour.append(contour.pop(0))
# Find the first on-curve point. # Find the first on-curve point.
firstOnCurve = None firstOnCurve = None
for i in range(len(contour)): for i in range(len(contour)):
if contour[i][1] is not None: if contour[i][1] is not None:
firstOnCurve = i firstOnCurve = i
break break
if firstOnCurve is None: if firstOnCurve is None:
# There are no on-curve points, be basically have to # There are no on-curve points, be basically have to
# do nothing but contour.reverse(). # do nothing but contour.reverse().
lastSegmentType = None lastSegmentType = None
else: else:
lastSegmentType = contour[firstOnCurve][1] lastSegmentType = contour[firstOnCurve][1]
contour.reverse() contour.reverse()
if not closed: if not closed:
# Open paths must start with a move, so we simply dump # Open paths must start with a move, so we simply dump
# all off-curve points leading up to the first on-curve. # all off-curve points leading up to the first on-curve.
while contour[0][1] is None: while contour[0][1] is None:
contour.pop(0) contour.pop(0)
pen.beginPath(identifier=self.currentContourIdentifier) pen.beginPath(identifier=self.currentContourIdentifier)
for pt, nextSegmentType, smooth, name, kwargs in contour: for pt, nextSegmentType, smooth, name, kwargs in contour:
if nextSegmentType is not None: if nextSegmentType is not None:
segmentType = lastSegmentType segmentType = lastSegmentType
lastSegmentType = nextSegmentType lastSegmentType = nextSegmentType
else: else:
segmentType = None segmentType = None
pen.addPoint(pt, segmentType=segmentType, smooth=smooth, name=name, **kwargs) pen.addPoint(
pen.endPath() pt, segmentType=segmentType, smooth=smooth, name=name, **kwargs
)
pen.endPath()
def beginPath(self, identifier=None, **kwargs): def beginPath(self, identifier=None, **kwargs):
if self.currentContour is not None: if self.currentContour is not None:
raise PenError("Path already begun") raise PenError("Path already begun")
self.currentContour = [] self.currentContour = []
self.currentContourIdentifier = identifier self.currentContourIdentifier = identifier
self.onCurve = [] self.onCurve = []
def endPath(self): def endPath(self):
if self.currentContour is None: if self.currentContour is None:
raise PenError("Path not begun") raise PenError("Path not begun")
self._flushContour() self._flushContour()
self.currentContour = None self.currentContour = None
def addPoint(self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs): def addPoint(
if self.currentContour is None: self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs
raise PenError("Path not begun") ):
if identifier is not None: if self.currentContour is None:
kwargs["identifier"] = identifier raise PenError("Path not begun")
self.currentContour.append((pt, segmentType, smooth, name, kwargs)) if identifier is not None:
kwargs["identifier"] = identifier
self.currentContour.append((pt, segmentType, smooth, name, kwargs))
def addComponent(self, glyphName, transform, identifier=None, **kwargs): def addComponent(self, glyphName, transform, identifier=None, **kwargs):
if self.currentContour is not None: if self.currentContour is not None:
raise PenError("Components must be added before or after contours") raise PenError("Components must be added before or after contours")
self.pen.addComponent(glyphName, transform, identifier=identifier, **kwargs) self.pen.addComponent(glyphName, transform, identifier=identifier, **kwargs)

View File

@ -5,25 +5,25 @@ __all__ = ["QtPen"]
class QtPen(BasePen): class QtPen(BasePen):
def __init__(self, glyphSet, path=None):
BasePen.__init__(self, glyphSet)
if path is None:
from PyQt5.QtGui import QPainterPath
def __init__(self, glyphSet, path=None): path = QPainterPath()
BasePen.__init__(self, glyphSet) self.path = path
if path is None:
from PyQt5.QtGui import QPainterPath
path = QPainterPath()
self.path = path
def _moveTo(self, p): def _moveTo(self, p):
self.path.moveTo(*p) self.path.moveTo(*p)
def _lineTo(self, p): def _lineTo(self, p):
self.path.lineTo(*p) self.path.lineTo(*p)
def _curveToOne(self, p1, p2, p3): def _curveToOne(self, p1, p2, p3):
self.path.cubicTo(*p1, *p2, *p3) self.path.cubicTo(*p1, *p2, *p3)
def _qCurveToOne(self, p1, p2): def _qCurveToOne(self, p1, p2):
self.path.quadTo(*p1, *p2) self.path.quadTo(*p1, *p2)
def _closePath(self): def _closePath(self):
self.path.closeSubpath() self.path.closeSubpath()

View File

@ -0,0 +1,105 @@
# Copyright 2016 Google Inc. All Rights Reserved.
# Copyright 2023 Behdad Esfahbod. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fontTools.qu2cu import quadratic_to_curves
from fontTools.pens.filterPen import ContourFilterPen
from fontTools.pens.reverseContourPen import ReverseContourPen
import math
class Qu2CuPen(ContourFilterPen):
"""A filter pen to convert quadratic bezier splines to cubic curves
using the FontTools SegmentPen protocol.
Args:
other_pen: another SegmentPen used to draw the transformed outline.
max_err: maximum approximation error in font units. For optimal results,
if you know the UPEM of the font, we recommend setting this to a
value equal, or close to UPEM / 1000.
reverse_direction: flip the contours' direction but keep starting point.
stats: a dictionary counting the point numbers of cubic segments.
"""
def __init__(
self,
other_pen,
max_err,
all_cubic=False,
reverse_direction=False,
stats=None,
):
if reverse_direction:
other_pen = ReverseContourPen(other_pen)
super().__init__(other_pen)
self.all_cubic = all_cubic
self.max_err = max_err
self.stats = stats
def _quadratics_to_curve(self, q):
curves = quadratic_to_curves(q, self.max_err, all_cubic=self.all_cubic)
if self.stats is not None:
for curve in curves:
n = str(len(curve) - 2)
self.stats[n] = self.stats.get(n, 0) + 1
for curve in curves:
if len(curve) == 4:
yield ("curveTo", curve[1:])
else:
yield ("qCurveTo", curve[1:])
def filterContour(self, contour):
quadratics = []
currentPt = None
newContour = []
for op, args in contour:
if op == "qCurveTo" and (
self.all_cubic or (len(args) > 2 and args[-1] is not None)
):
if args[-1] is None:
raise NotImplementedError(
"oncurve-less contours with all_cubic not implemented"
)
quadratics.append((currentPt,) + args)
else:
if quadratics:
newContour.extend(self._quadratics_to_curve(quadratics))
quadratics = []
newContour.append((op, args))
currentPt = args[-1] if args else None
if quadratics:
newContour.extend(self._quadratics_to_curve(quadratics))
if not self.all_cubic:
# Add back implicit oncurve points
contour = newContour
newContour = []
for op, args in contour:
if op == "qCurveTo" and newContour and newContour[-1][0] == "qCurveTo":
pt0 = newContour[-1][1][-2]
pt1 = newContour[-1][1][-1]
pt2 = args[0]
if (
pt1 is not None
and math.isclose(pt2[0] - pt1[0], pt1[0] - pt0[0])
and math.isclose(pt2[1] - pt1[1], pt1[1] - pt0[1])
):
newArgs = newContour[-1][1][:-1] + args
newContour[-1] = (op, newArgs)
continue
newContour.append((op, args))
return newContour

View File

@ -10,36 +10,35 @@ __all__ = ["QuartzPen"]
class QuartzPen(BasePen): class QuartzPen(BasePen):
"""A pen that creates a CGPath """A pen that creates a CGPath
Parameters Parameters
- path: an optional CGPath to add to - path: an optional CGPath to add to
- xform: an optional CGAffineTransform to apply to the path - xform: an optional CGAffineTransform to apply to the path
""" """
def __init__(self, glyphSet, path=None, xform=None): def __init__(self, glyphSet, path=None, xform=None):
BasePen.__init__(self, glyphSet) BasePen.__init__(self, glyphSet)
if path is None: if path is None:
path = CGPathCreateMutable() path = CGPathCreateMutable()
self.path = path self.path = path
self.xform = xform self.xform = xform
def _moveTo(self, pt): def _moveTo(self, pt):
x, y = pt x, y = pt
CGPathMoveToPoint(self.path, self.xform, x, y) CGPathMoveToPoint(self.path, self.xform, x, y)
def _lineTo(self, pt): def _lineTo(self, pt):
x, y = pt x, y = pt
CGPathAddLineToPoint(self.path, self.xform, x, y) CGPathAddLineToPoint(self.path, self.xform, x, y)
def _curveToOne(self, p1, p2, p3): def _curveToOne(self, p1, p2, p3):
(x1, y1), (x2, y2), (x3, y3) = p1, p2, p3 (x1, y1), (x2, y2), (x3, y3) = p1, p2, p3
CGPathAddCurveToPoint(self.path, self.xform, x1, y1, x2, y2, x3, y3) CGPathAddCurveToPoint(self.path, self.xform, x1, y1, x2, y2, x3, y3)
def _qCurveToOne(self, p1, p2): def _qCurveToOne(self, p1, p2):
(x1, y1), (x2, y2) = p1, p2 (x1, y1), (x2, y2) = p1, p2
CGPathAddQuadCurveToPoint(self.path, self.xform, x1, y1, x2, y2) CGPathAddQuadCurveToPoint(self.path, self.xform, x1, y1, x2, y2)
def _closePath(self):
CGPathCloseSubpath(self.path)
def _closePath(self):
CGPathCloseSubpath(self.path)

View File

@ -4,152 +4,176 @@ from fontTools.pens.pointPen import AbstractPointPen
__all__ = [ __all__ = [
"replayRecording", "replayRecording",
"RecordingPen", "RecordingPen",
"DecomposingRecordingPen", "DecomposingRecordingPen",
"RecordingPointPen", "RecordingPointPen",
] ]
def replayRecording(recording, pen): def replayRecording(recording, pen):
"""Replay a recording, as produced by RecordingPen or DecomposingRecordingPen, """Replay a recording, as produced by RecordingPen or DecomposingRecordingPen,
to a pen. to a pen.
Note that recording does not have to be produced by those pens. Note that recording does not have to be produced by those pens.
It can be any iterable of tuples of method name and tuple-of-arguments. It can be any iterable of tuples of method name and tuple-of-arguments.
Likewise, pen can be any objects receiving those method calls. Likewise, pen can be any objects receiving those method calls.
""" """
for operator,operands in recording: for operator, operands in recording:
getattr(pen, operator)(*operands) getattr(pen, operator)(*operands)
class RecordingPen(AbstractPen): class RecordingPen(AbstractPen):
"""Pen recording operations that can be accessed or replayed. """Pen recording operations that can be accessed or replayed.
The recording can be accessed as pen.value; or replayed using The recording can be accessed as pen.value; or replayed using
pen.replay(otherPen). pen.replay(otherPen).
:Example: :Example:
from fontTools.ttLib import TTFont from fontTools.ttLib import TTFont
from fontTools.pens.recordingPen import RecordingPen from fontTools.pens.recordingPen import RecordingPen
glyph_name = 'dollar' glyph_name = 'dollar'
font_path = 'MyFont.otf' font_path = 'MyFont.otf'
font = TTFont(font_path) font = TTFont(font_path)
glyphset = font.getGlyphSet() glyphset = font.getGlyphSet()
glyph = glyphset[glyph_name] glyph = glyphset[glyph_name]
pen = RecordingPen() pen = RecordingPen()
glyph.draw(pen) glyph.draw(pen)
print(pen.value) print(pen.value)
""" """
def __init__(self): def __init__(self):
self.value = [] self.value = []
def moveTo(self, p0):
self.value.append(('moveTo', (p0,))) def moveTo(self, p0):
def lineTo(self, p1): self.value.append(("moveTo", (p0,)))
self.value.append(('lineTo', (p1,)))
def qCurveTo(self, *points): def lineTo(self, p1):
self.value.append(('qCurveTo', points)) self.value.append(("lineTo", (p1,)))
def curveTo(self, *points):
self.value.append(('curveTo', points)) def qCurveTo(self, *points):
def closePath(self): self.value.append(("qCurveTo", points))
self.value.append(('closePath', ()))
def endPath(self): def curveTo(self, *points):
self.value.append(('endPath', ())) self.value.append(("curveTo", points))
def addComponent(self, glyphName, transformation):
self.value.append(('addComponent', (glyphName, transformation))) def closePath(self):
def replay(self, pen): self.value.append(("closePath", ()))
replayRecording(self.value, pen)
def endPath(self):
self.value.append(("endPath", ()))
def addComponent(self, glyphName, transformation):
self.value.append(("addComponent", (glyphName, transformation)))
def addVarComponent(self, glyphName, transformation, location):
self.value.append(("addVarComponent", (glyphName, transformation, location)))
def replay(self, pen):
replayRecording(self.value, pen)
class DecomposingRecordingPen(DecomposingPen, RecordingPen): class DecomposingRecordingPen(DecomposingPen, RecordingPen):
""" Same as RecordingPen, except that it doesn't keep components """Same as RecordingPen, except that it doesn't keep components
as references, but draws them decomposed as regular contours. as references, but draws them decomposed as regular contours.
The constructor takes a single 'glyphSet' positional argument, The constructor takes a single 'glyphSet' positional argument,
a dictionary of glyph objects (i.e. with a 'draw' method) keyed a dictionary of glyph objects (i.e. with a 'draw' method) keyed
by thir name:: by thir name::
>>> class SimpleGlyph(object): >>> class SimpleGlyph(object):
... def draw(self, pen): ... def draw(self, pen):
... pen.moveTo((0, 0)) ... pen.moveTo((0, 0))
... pen.curveTo((1, 1), (2, 2), (3, 3)) ... pen.curveTo((1, 1), (2, 2), (3, 3))
... pen.closePath() ... pen.closePath()
>>> class CompositeGlyph(object): >>> class CompositeGlyph(object):
... def draw(self, pen): ... def draw(self, pen):
... pen.addComponent('a', (1, 0, 0, 1, -1, 1)) ... pen.addComponent('a', (1, 0, 0, 1, -1, 1))
>>> glyphSet = {'a': SimpleGlyph(), 'b': CompositeGlyph()} >>> glyphSet = {'a': SimpleGlyph(), 'b': CompositeGlyph()}
>>> for name, glyph in sorted(glyphSet.items()): >>> for name, glyph in sorted(glyphSet.items()):
... pen = DecomposingRecordingPen(glyphSet) ... pen = DecomposingRecordingPen(glyphSet)
... glyph.draw(pen) ... glyph.draw(pen)
... print("{}: {}".format(name, pen.value)) ... print("{}: {}".format(name, pen.value))
a: [('moveTo', ((0, 0),)), ('curveTo', ((1, 1), (2, 2), (3, 3))), ('closePath', ())] a: [('moveTo', ((0, 0),)), ('curveTo', ((1, 1), (2, 2), (3, 3))), ('closePath', ())]
b: [('moveTo', ((-1, 1),)), ('curveTo', ((0, 2), (1, 3), (2, 4))), ('closePath', ())] b: [('moveTo', ((-1, 1),)), ('curveTo', ((0, 2), (1, 3), (2, 4))), ('closePath', ())]
""" """
# raises KeyError if base glyph is not found in glyphSet
skipMissingComponents = False # raises KeyError if base glyph is not found in glyphSet
skipMissingComponents = False
class RecordingPointPen(AbstractPointPen): class RecordingPointPen(AbstractPointPen):
"""PointPen recording operations that can be accessed or replayed. """PointPen recording operations that can be accessed or replayed.
The recording can be accessed as pen.value; or replayed using The recording can be accessed as pen.value; or replayed using
pointPen.replay(otherPointPen). pointPen.replay(otherPointPen).
:Example: :Example:
from defcon import Font from defcon import Font
from fontTools.pens.recordingPen import RecordingPointPen from fontTools.pens.recordingPen import RecordingPointPen
glyph_name = 'a' glyph_name = 'a'
font_path = 'MyFont.ufo' font_path = 'MyFont.ufo'
font = Font(font_path) font = Font(font_path)
glyph = font[glyph_name] glyph = font[glyph_name]
pen = RecordingPointPen() pen = RecordingPointPen()
glyph.drawPoints(pen) glyph.drawPoints(pen)
print(pen.value) print(pen.value)
new_glyph = font.newGlyph('b') new_glyph = font.newGlyph('b')
pen.replay(new_glyph.getPointPen()) pen.replay(new_glyph.getPointPen())
""" """
def __init__(self): def __init__(self):
self.value = [] self.value = []
def beginPath(self, identifier=None, **kwargs): def beginPath(self, identifier=None, **kwargs):
if identifier is not None: if identifier is not None:
kwargs["identifier"] = identifier kwargs["identifier"] = identifier
self.value.append(("beginPath", (), kwargs)) self.value.append(("beginPath", (), kwargs))
def endPath(self): def endPath(self):
self.value.append(("endPath", (), {})) self.value.append(("endPath", (), {}))
def addPoint(self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs): def addPoint(
if identifier is not None: self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs
kwargs["identifier"] = identifier ):
self.value.append(("addPoint", (pt, segmentType, smooth, name), kwargs)) if identifier is not None:
kwargs["identifier"] = identifier
self.value.append(("addPoint", (pt, segmentType, smooth, name), kwargs))
def addComponent(self, baseGlyphName, transformation, identifier=None, **kwargs): def addComponent(self, baseGlyphName, transformation, identifier=None, **kwargs):
if identifier is not None: if identifier is not None:
kwargs["identifier"] = identifier kwargs["identifier"] = identifier
self.value.append(("addComponent", (baseGlyphName, transformation), kwargs)) self.value.append(("addComponent", (baseGlyphName, transformation), kwargs))
def replay(self, pointPen): def addVarComponent(
for operator, args, kwargs in self.value: self, baseGlyphName, transformation, location, identifier=None, **kwargs
getattr(pointPen, operator)(*args, **kwargs) ):
if identifier is not None:
kwargs["identifier"] = identifier
self.value.append(
("addVarComponent", (baseGlyphName, transformation, location), kwargs)
)
def replay(self, pointPen):
for operator, args, kwargs in self.value:
getattr(pointPen, operator)(*args, **kwargs)
if __name__ == "__main__": if __name__ == "__main__":
pen = RecordingPen() pen = RecordingPen()
pen.moveTo((0, 0)) pen.moveTo((0, 0))
pen.lineTo((0, 100)) pen.lineTo((0, 100))
pen.curveTo((50, 75), (60, 50), (50, 25)) pen.curveTo((50, 75), (60, 50), (50, 25))
pen.closePath() pen.closePath()
from pprint import pprint from pprint import pprint
pprint(pen.value)
pprint(pen.value)

View File

@ -7,67 +7,74 @@ __all__ = ["ReportLabPen"]
class ReportLabPen(BasePen): class ReportLabPen(BasePen):
"""A pen for drawing onto a ``reportlab.graphics.shapes.Path`` object.""" """A pen for drawing onto a ``reportlab.graphics.shapes.Path`` object."""
def __init__(self, glyphSet, path=None): def __init__(self, glyphSet, path=None):
BasePen.__init__(self, glyphSet) BasePen.__init__(self, glyphSet)
if path is None: if path is None:
path = Path() path = Path()
self.path = path self.path = path
def _moveTo(self, p): def _moveTo(self, p):
(x,y) = p (x, y) = p
self.path.moveTo(x,y) self.path.moveTo(x, y)
def _lineTo(self, p): def _lineTo(self, p):
(x,y) = p (x, y) = p
self.path.lineTo(x,y) self.path.lineTo(x, y)
def _curveToOne(self, p1, p2, p3): def _curveToOne(self, p1, p2, p3):
(x1,y1) = p1 (x1, y1) = p1
(x2,y2) = p2 (x2, y2) = p2
(x3,y3) = p3 (x3, y3) = p3
self.path.curveTo(x1, y1, x2, y2, x3, y3) self.path.curveTo(x1, y1, x2, y2, x3, y3)
def _closePath(self): def _closePath(self):
self.path.closePath() self.path.closePath()
if __name__=="__main__": if __name__ == "__main__":
import sys import sys
if len(sys.argv) < 3:
print("Usage: reportLabPen.py <OTF/TTF font> <glyphname> [<image file to create>]")
print(" If no image file name is created, by default <glyphname>.png is created.")
print(" example: reportLabPen.py Arial.TTF R test.png")
print(" (The file format will be PNG, regardless of the image file name supplied)")
sys.exit(0)
from fontTools.ttLib import TTFont if len(sys.argv) < 3:
from reportlab.lib import colors print(
"Usage: reportLabPen.py <OTF/TTF font> <glyphname> [<image file to create>]"
)
print(
" If no image file name is created, by default <glyphname>.png is created."
)
print(" example: reportLabPen.py Arial.TTF R test.png")
print(
" (The file format will be PNG, regardless of the image file name supplied)"
)
sys.exit(0)
path = sys.argv[1] from fontTools.ttLib import TTFont
glyphName = sys.argv[2] from reportlab.lib import colors
if (len(sys.argv) > 3):
imageFile = sys.argv[3]
else:
imageFile = "%s.png" % glyphName
font = TTFont(path) # it would work just as well with fontTools.t1Lib.T1Font path = sys.argv[1]
gs = font.getGlyphSet() glyphName = sys.argv[2]
pen = ReportLabPen(gs, Path(fillColor=colors.red, strokeWidth=5)) if len(sys.argv) > 3:
g = gs[glyphName] imageFile = sys.argv[3]
g.draw(pen) else:
imageFile = "%s.png" % glyphName
w, h = g.width, 1000 font = TTFont(path) # it would work just as well with fontTools.t1Lib.T1Font
from reportlab.graphics import renderPM gs = font.getGlyphSet()
from reportlab.graphics.shapes import Group, Drawing, scale pen = ReportLabPen(gs, Path(fillColor=colors.red, strokeWidth=5))
g = gs[glyphName]
g.draw(pen)
# Everything is wrapped in a group to allow transformations. w, h = g.width, 1000
g = Group(pen.path) from reportlab.graphics import renderPM
g.translate(0, 200) from reportlab.graphics.shapes import Group, Drawing, scale
g.scale(0.3, 0.3)
d = Drawing(w, h) # Everything is wrapped in a group to allow transformations.
d.add(g) g = Group(pen.path)
g.translate(0, 200)
g.scale(0.3, 0.3)
renderPM.drawToFile(d, imageFile, fmt="PNG") d = Drawing(w, h)
d.add(g)
renderPM.drawToFile(d, imageFile, fmt="PNG")

View File

@ -14,12 +14,16 @@ class ReverseContourPen(ContourFilterPen):
the first point. the first point.
""" """
def __init__(self, outPen, outputImpliedClosingLine=False):
super().__init__(outPen)
self.outputImpliedClosingLine = outputImpliedClosingLine
def filterContour(self, contour): def filterContour(self, contour):
return reversedContour(contour) return reversedContour(contour, self.outputImpliedClosingLine)
def reversedContour(contour): def reversedContour(contour, outputImpliedClosingLine=False):
""" Generator that takes a list of pen's (operator, operands) tuples, """Generator that takes a list of pen's (operator, operands) tuples,
and yields them with the winding direction reversed. and yields them with the winding direction reversed.
""" """
if not contour: if not contour:
@ -36,16 +40,14 @@ def reversedContour(contour):
firstType, firstPts = contour.pop(0) firstType, firstPts = contour.pop(0)
assert firstType in ("moveTo", "qCurveTo"), ( assert firstType in ("moveTo", "qCurveTo"), (
"invalid initial segment type: %r" % firstType) "invalid initial segment type: %r" % firstType
)
firstOnCurve = firstPts[-1] firstOnCurve = firstPts[-1]
if firstType == "qCurveTo": if firstType == "qCurveTo":
# special case for TrueType paths contaning only off-curve points # special case for TrueType paths contaning only off-curve points
assert firstOnCurve is None, ( assert firstOnCurve is None, "off-curve only paths must end with 'None'"
"off-curve only paths must end with 'None'") assert not contour, "only one qCurveTo allowed per off-curve path"
assert not contour, ( firstPts = (firstPts[0],) + tuple(reversed(firstPts[1:-1])) + (None,)
"only one qCurveTo allowed per off-curve path")
firstPts = ((firstPts[0],) + tuple(reversed(firstPts[1:-1])) +
(None,))
if not contour: if not contour:
# contour contains only one segment, nothing to reverse # contour contains only one segment, nothing to reverse
@ -63,23 +65,23 @@ def reversedContour(contour):
if firstOnCurve != lastOnCurve: if firstOnCurve != lastOnCurve:
# emit an implied line between the last and first points # emit an implied line between the last and first points
yield "lineTo", (lastOnCurve,) yield "lineTo", (lastOnCurve,)
contour[-1] = (lastType, contour[-1] = (lastType, tuple(lastPts[:-1]) + (firstOnCurve,))
tuple(lastPts[:-1]) + (firstOnCurve,))
if len(contour) > 1: if len(contour) > 1:
secondType, secondPts = contour[0] secondType, secondPts = contour[0]
else: else:
# contour has only two points, the second and last are the same # contour has only two points, the second and last are the same
secondType, secondPts = lastType, lastPts secondType, secondPts = lastType, lastPts
# if a lineTo follows the initial moveTo, after reversing it
# will be implied by the closePath, so we don't emit one; if not outputImpliedClosingLine:
# unless the lineTo and moveTo overlap, in which case we keep the # if a lineTo follows the initial moveTo, after reversing it
# duplicate points # will be implied by the closePath, so we don't emit one;
if secondType == "lineTo" and firstPts != secondPts: # unless the lineTo and moveTo overlap, in which case we keep the
del contour[0] # duplicate points
if contour: if secondType == "lineTo" and firstPts != secondPts:
contour[-1] = (lastType, del contour[0]
tuple(lastPts[:-1]) + secondPts) if contour:
contour[-1] = (lastType, tuple(lastPts[:-1]) + secondPts)
else: else:
# for open paths, the last point will become the first # for open paths, the last point will become the first
yield firstType, (lastOnCurve,) yield firstType, (lastOnCurve,)
@ -88,8 +90,7 @@ def reversedContour(contour):
# we iterate over all segment pairs in reverse order, and yield # we iterate over all segment pairs in reverse order, and yield
# each one with the off-curve points reversed (if any), and # each one with the off-curve points reversed (if any), and
# with the on-curve point of the following segment # with the on-curve point of the following segment
for (curType, curPts), (_, nextPts) in pairwise( for (curType, curPts), (_, nextPts) in pairwise(contour, reverse=True):
contour, reverse=True):
yield curType, tuple(reversed(curPts[:-1])) + (nextPts[-1],) yield curType, tuple(reversed(curPts[:-1])) + (nextPts[-1],)
yield "closePath" if closed else "endPath", () yield "closePath" if closed else "endPath", ()

View File

@ -8,95 +8,115 @@ __all__ = ["StatisticsPen"]
class StatisticsPen(MomentsPen): class StatisticsPen(MomentsPen):
"""Pen calculating area, center of mass, variance and """Pen calculating area, center of mass, variance and
standard-deviation, covariance and correlation, and slant, standard-deviation, covariance and correlation, and slant,
of glyph shapes. of glyph shapes.
Note that all the calculated values are 'signed'. Ie. if the Note that all the calculated values are 'signed'. Ie. if the
glyph shape is self-intersecting, the values are not correct glyph shape is self-intersecting, the values are not correct
(but well-defined). As such, area will be negative if contour (but well-defined). As such, area will be negative if contour
directions are clockwise. Moreover, variance might be negative directions are clockwise. Moreover, variance might be negative
if the shapes are self-intersecting in certain ways.""" if the shapes are self-intersecting in certain ways."""
def __init__(self, glyphset=None): def __init__(self, glyphset=None):
MomentsPen.__init__(self, glyphset=glyphset) MomentsPen.__init__(self, glyphset=glyphset)
self.__zero() self.__zero()
def _closePath(self): def _closePath(self):
MomentsPen._closePath(self) MomentsPen._closePath(self)
self.__update() self.__update()
def __zero(self): def __zero(self):
self.meanX = 0 self.meanX = 0
self.meanY = 0 self.meanY = 0
self.varianceX = 0 self.varianceX = 0
self.varianceY = 0 self.varianceY = 0
self.stddevX = 0 self.stddevX = 0
self.stddevY = 0 self.stddevY = 0
self.covariance = 0 self.covariance = 0
self.correlation = 0 self.correlation = 0
self.slant = 0 self.slant = 0
def __update(self): def __update(self):
area = self.area area = self.area
if not area: if not area:
self.__zero() self.__zero()
return return
# Center of mass # Center of mass
# https://en.wikipedia.org/wiki/Center_of_mass#A_continuous_volume # https://en.wikipedia.org/wiki/Center_of_mass#A_continuous_volume
self.meanX = meanX = self.momentX / area self.meanX = meanX = self.momentX / area
self.meanY = meanY = self.momentY / area self.meanY = meanY = self.momentY / area
# Var(X) = E[X^2] - E[X]^2 # Var(X) = E[X^2] - E[X]^2
self.varianceX = varianceX = self.momentXX / area - meanX**2 self.varianceX = varianceX = self.momentXX / area - meanX**2
self.varianceY = varianceY = self.momentYY / area - meanY**2 self.varianceY = varianceY = self.momentYY / area - meanY**2
self.stddevX = stddevX = math.copysign(abs(varianceX)**.5, varianceX) self.stddevX = stddevX = math.copysign(abs(varianceX) ** 0.5, varianceX)
self.stddevY = stddevY = math.copysign(abs(varianceY)**.5, varianceY) self.stddevY = stddevY = math.copysign(abs(varianceY) ** 0.5, varianceY)
# Covariance(X,Y) = ( E[X.Y] - E[X]E[Y] ) # Covariance(X,Y) = ( E[X.Y] - E[X]E[Y] )
self.covariance = covariance = self.momentXY / area - meanX*meanY self.covariance = covariance = self.momentXY / area - meanX * meanY
# Correlation(X,Y) = Covariance(X,Y) / ( stddev(X) * stddev(Y) ) # Correlation(X,Y) = Covariance(X,Y) / ( stddev(X) * stddev(Y) )
# https://en.wikipedia.org/wiki/Pearson_product-moment_correlation_coefficient # https://en.wikipedia.org/wiki/Pearson_product-moment_correlation_coefficient
if stddevX * stddevY == 0: if stddevX * stddevY == 0:
correlation = float("NaN") correlation = float("NaN")
else: else:
correlation = covariance / (stddevX * stddevY) correlation = covariance / (stddevX * stddevY)
self.correlation = correlation if abs(correlation) > 1e-3 else 0 self.correlation = correlation if abs(correlation) > 1e-3 else 0
slant = covariance / varianceY if varianceY != 0 else float("NaN") slant = covariance / varianceY if varianceY != 0 else float("NaN")
self.slant = slant if abs(slant) > 1e-3 else 0 self.slant = slant if abs(slant) > 1e-3 else 0
def _test(glyphset, upem, glyphs): def _test(glyphset, upem, glyphs):
from fontTools.pens.transformPen import TransformPen from fontTools.pens.transformPen import TransformPen
from fontTools.misc.transform import Scale from fontTools.misc.transform import Scale
print('upem', upem) print("upem", upem)
for glyph_name in glyphs:
print()
print("glyph:", glyph_name)
glyph = glyphset[glyph_name]
pen = StatisticsPen(glyphset=glyphset)
transformer = TransformPen(pen, Scale(1.0 / upem))
glyph.draw(transformer)
for item in [
"area",
"momentX",
"momentY",
"momentXX",
"momentYY",
"momentXY",
"meanX",
"meanY",
"varianceX",
"varianceY",
"stddevX",
"stddevY",
"covariance",
"correlation",
"slant",
]:
print("%s: %g" % (item, getattr(pen, item)))
for glyph_name in glyphs:
print()
print("glyph:", glyph_name)
glyph = glyphset[glyph_name]
pen = StatisticsPen(glyphset=glyphset)
transformer = TransformPen(pen, Scale(1./upem))
glyph.draw(transformer)
for item in ['area', 'momentX', 'momentY', 'momentXX', 'momentYY', 'momentXY', 'meanX', 'meanY', 'varianceX', 'varianceY', 'stddevX', 'stddevY', 'covariance', 'correlation', 'slant']:
print ("%s: %g" % (item, getattr(pen, item)))
def main(args): def main(args):
if not args: if not args:
return return
filename, glyphs = args[0], args[1:] filename, glyphs = args[0], args[1:]
from fontTools.ttLib import TTFont from fontTools.ttLib import TTFont
font = TTFont(filename)
if not glyphs:
glyphs = font.getGlyphOrder()
_test(font.getGlyphSet(), font['head'].unitsPerEm, glyphs)
if __name__ == '__main__': font = TTFont(filename)
import sys if not glyphs:
main(sys.argv[1:]) glyphs = font.getGlyphOrder()
_test(font.getGlyphSet(), font["head"].unitsPerEm, glyphs)
if __name__ == "__main__":
import sys
main(sys.argv[1:])

View File

@ -7,7 +7,7 @@ def pointToString(pt, ntos=str):
class SVGPathPen(BasePen): class SVGPathPen(BasePen):
""" Pen to draw SVG path d commands. """Pen to draw SVG path d commands.
Example:: Example::
>>> pen = SVGPathPen(None) >>> pen = SVGPathPen(None)
@ -36,6 +36,7 @@ class SVGPathPen(BasePen):
glyphset[glyphname].draw(pen) glyphset[glyphname].draw(pen)
print(tpen.getCommands()) print(tpen.getCommands())
""" """
def __init__(self, glyphSet, ntos: Callable[[float], str] = str): def __init__(self, glyphSet, ntos: Callable[[float], str] = str):
BasePen.__init__(self, glyphSet) BasePen.__init__(self, glyphSet)
self._commands = [] self._commands = []
@ -209,22 +210,25 @@ def main(args=None):
if args is None: if args is None:
import sys import sys
args = sys.argv[1:] args = sys.argv[1:]
from fontTools.ttLib import TTFont from fontTools.ttLib import TTFont
import argparse import argparse
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
"fonttools pens.svgPathPen", description="Generate SVG from text") "fonttools pens.svgPathPen", description="Generate SVG from text"
)
parser.add_argument("font", metavar="font.ttf", help="Font file.")
parser.add_argument("text", metavar="text", help="Text string.")
parser.add_argument( parser.add_argument(
"font", metavar="font.ttf", help="Font file.") "--variations",
parser.add_argument( metavar="AXIS=LOC",
"text", metavar="text", help="Text string.") default="",
parser.add_argument(
"--variations", metavar="AXIS=LOC", default='',
help="List of space separated locations. A location consist in " help="List of space separated locations. A location consist in "
"the name of a variation axis, followed by '=' and a number. E.g.: " "the name of a variation axis, followed by '=' and a number. E.g.: "
"wght=700 wdth=80. The default is the location of the base master.") "wght=700 wdth=80. The default is the location of the base master.",
)
options = parser.parse_args(args) options = parser.parse_args(args)
@ -233,18 +237,18 @@ def main(args=None):
location = {} location = {}
for tag_v in options.variations.split(): for tag_v in options.variations.split():
fields = tag_v.split('=') fields = tag_v.split("=")
tag = fields[0].strip() tag = fields[0].strip()
v = int(fields[1]) v = int(fields[1])
location[tag] = v location[tag] = v
hhea = font['hhea'] hhea = font["hhea"]
ascent, descent = hhea.ascent, hhea.descent ascent, descent = hhea.ascent, hhea.descent
glyphset = font.getGlyphSet(location=location) glyphset = font.getGlyphSet(location=location)
cmap = font['cmap'].getBestCmap() cmap = font["cmap"].getBestCmap()
s = '' s = ""
width = 0 width = 0
for u in text: for u in text:
g = cmap[ord(u)] g = cmap[ord(u)]
@ -254,20 +258,29 @@ def main(args=None):
glyph.draw(pen) glyph.draw(pen)
commands = pen.getCommands() commands = pen.getCommands()
s += '<g transform="translate(%d %d) scale(1 -1)"><path d="%s"/></g>\n' % (width, ascent, commands) s += '<g transform="translate(%d %d) scale(1 -1)"><path d="%s"/></g>\n' % (
width,
ascent,
commands,
)
width += glyph.width width += glyph.width
print('<?xml version="1.0" encoding="UTF-8"?>') print('<?xml version="1.0" encoding="UTF-8"?>')
print('<svg width="%d" height="%d" xmlns="http://www.w3.org/2000/svg">' % (width, ascent-descent)) print(
print(s, end='') '<svg width="%d" height="%d" xmlns="http://www.w3.org/2000/svg">'
print('</svg>') % (width, ascent - descent)
)
print(s, end="")
print("</svg>")
if __name__ == "__main__": if __name__ == "__main__":
import sys import sys
if len(sys.argv) == 1: if len(sys.argv) == 1:
import doctest import doctest
sys.exit(doctest.testmod().failed) sys.exit(doctest.testmod().failed)
sys.exit(main()) sys.exit(main())

View File

@ -24,22 +24,22 @@ class T2CharStringPen(BasePen):
self._CFF2 = CFF2 self._CFF2 = CFF2
self._width = width self._width = width
self._commands = [] self._commands = []
self._p0 = (0,0) self._p0 = (0, 0)
def _p(self, pt): def _p(self, pt):
p0 = self._p0 p0 = self._p0
pt = self._p0 = (self.round(pt[0]), self.round(pt[1])) pt = self._p0 = (self.round(pt[0]), self.round(pt[1]))
return [pt[0]-p0[0], pt[1]-p0[1]] return [pt[0] - p0[0], pt[1] - p0[1]]
def _moveTo(self, pt): def _moveTo(self, pt):
self._commands.append(('rmoveto', self._p(pt))) self._commands.append(("rmoveto", self._p(pt)))
def _lineTo(self, pt): def _lineTo(self, pt):
self._commands.append(('rlineto', self._p(pt))) self._commands.append(("rlineto", self._p(pt)))
def _curveToOne(self, pt1, pt2, pt3): def _curveToOne(self, pt1, pt2, pt3):
_p = self._p _p = self._p
self._commands.append(('rrcurveto', _p(pt1)+_p(pt2)+_p(pt3))) self._commands.append(("rrcurveto", _p(pt1) + _p(pt2) + _p(pt3)))
def _closePath(self): def _closePath(self):
pass pass
@ -51,15 +51,18 @@ class T2CharStringPen(BasePen):
commands = self._commands commands = self._commands
if optimize: if optimize:
maxstack = 48 if not self._CFF2 else 513 maxstack = 48 if not self._CFF2 else 513
commands = specializeCommands(commands, commands = specializeCommands(
generalizeFirst=False, commands, generalizeFirst=False, maxstack=maxstack
maxstack=maxstack) )
program = commandsToProgram(commands) program = commandsToProgram(commands)
if self._width is not None: if self._width is not None:
assert not self._CFF2, "CFF2 does not allow encoding glyph width in CharString." assert (
not self._CFF2
), "CFF2 does not allow encoding glyph width in CharString."
program.insert(0, otRound(self._width)) program.insert(0, otRound(self._width))
if not self._CFF2: if not self._CFF2:
program.append('endchar') program.append("endchar")
charString = T2CharString( charString = T2CharString(
program=program, private=private, globalSubrs=globalSubrs) program=program, private=private, globalSubrs=globalSubrs
)
return charString return charString

View File

@ -6,41 +6,49 @@ __all__ = ["TeePen"]
class TeePen(AbstractPen): class TeePen(AbstractPen):
"""Pen multiplexing drawing to one or more pens. """Pen multiplexing drawing to one or more pens.
Use either as TeePen(pen1, pen2, ...) or TeePen(iterableOfPens).""" Use either as TeePen(pen1, pen2, ...) or TeePen(iterableOfPens)."""
def __init__(self, *pens): def __init__(self, *pens):
if len(pens) == 1: if len(pens) == 1:
pens = pens[0] pens = pens[0]
self.pens = pens self.pens = pens
def moveTo(self, p0):
for pen in self.pens: def moveTo(self, p0):
pen.moveTo(p0) for pen in self.pens:
def lineTo(self, p1): pen.moveTo(p0)
for pen in self.pens:
pen.lineTo(p1) def lineTo(self, p1):
def qCurveTo(self, *points): for pen in self.pens:
for pen in self.pens: pen.lineTo(p1)
pen.qCurveTo(*points)
def curveTo(self, *points): def qCurveTo(self, *points):
for pen in self.pens: for pen in self.pens:
pen.curveTo(*points) pen.qCurveTo(*points)
def closePath(self):
for pen in self.pens: def curveTo(self, *points):
pen.closePath() for pen in self.pens:
def endPath(self): pen.curveTo(*points)
for pen in self.pens:
pen.endPath() def closePath(self):
def addComponent(self, glyphName, transformation): for pen in self.pens:
for pen in self.pens: pen.closePath()
pen.addComponent(glyphName, transformation)
def endPath(self):
for pen in self.pens:
pen.endPath()
def addComponent(self, glyphName, transformation):
for pen in self.pens:
pen.addComponent(glyphName, transformation)
if __name__ == "__main__": if __name__ == "__main__":
from fontTools.pens.basePen import _TestPen from fontTools.pens.basePen import _TestPen
pen = TeePen(_TestPen(), _TestPen())
pen.moveTo((0, 0)) pen = TeePen(_TestPen(), _TestPen())
pen.lineTo((0, 100)) pen.moveTo((0, 0))
pen.curveTo((50, 75), (60, 50), (50, 25)) pen.lineTo((0, 100))
pen.closePath() pen.curveTo((50, 75), (60, 50), (50, 25))
pen.closePath()

View File

@ -6,103 +6,106 @@ __all__ = ["TransformPen", "TransformPointPen"]
class TransformPen(FilterPen): class TransformPen(FilterPen):
"""Pen that transforms all coordinates using a Affine transformation, """Pen that transforms all coordinates using a Affine transformation,
and passes them to another pen. and passes them to another pen.
""" """
def __init__(self, outPen, transformation): def __init__(self, outPen, transformation):
"""The 'outPen' argument is another pen object. It will receive the """The 'outPen' argument is another pen object. It will receive the
transformed coordinates. The 'transformation' argument can either transformed coordinates. The 'transformation' argument can either
be a six-tuple, or a fontTools.misc.transform.Transform object. be a six-tuple, or a fontTools.misc.transform.Transform object.
""" """
super(TransformPen, self).__init__(outPen) super(TransformPen, self).__init__(outPen)
if not hasattr(transformation, "transformPoint"): if not hasattr(transformation, "transformPoint"):
from fontTools.misc.transform import Transform from fontTools.misc.transform import Transform
transformation = Transform(*transformation)
self._transformation = transformation
self._transformPoint = transformation.transformPoint
self._stack = []
def moveTo(self, pt): transformation = Transform(*transformation)
self._outPen.moveTo(self._transformPoint(pt)) self._transformation = transformation
self._transformPoint = transformation.transformPoint
self._stack = []
def lineTo(self, pt): def moveTo(self, pt):
self._outPen.lineTo(self._transformPoint(pt)) self._outPen.moveTo(self._transformPoint(pt))
def curveTo(self, *points): def lineTo(self, pt):
self._outPen.curveTo(*self._transformPoints(points)) self._outPen.lineTo(self._transformPoint(pt))
def qCurveTo(self, *points): def curveTo(self, *points):
if points[-1] is None: self._outPen.curveTo(*self._transformPoints(points))
points = self._transformPoints(points[:-1]) + [None]
else:
points = self._transformPoints(points)
self._outPen.qCurveTo(*points)
def _transformPoints(self, points): def qCurveTo(self, *points):
transformPoint = self._transformPoint if points[-1] is None:
return [transformPoint(pt) for pt in points] points = self._transformPoints(points[:-1]) + [None]
else:
points = self._transformPoints(points)
self._outPen.qCurveTo(*points)
def closePath(self): def _transformPoints(self, points):
self._outPen.closePath() transformPoint = self._transformPoint
return [transformPoint(pt) for pt in points]
def endPath(self): def closePath(self):
self._outPen.endPath() self._outPen.closePath()
def addComponent(self, glyphName, transformation): def endPath(self):
transformation = self._transformation.transform(transformation) self._outPen.endPath()
self._outPen.addComponent(glyphName, transformation)
def addComponent(self, glyphName, transformation):
transformation = self._transformation.transform(transformation)
self._outPen.addComponent(glyphName, transformation)
class TransformPointPen(FilterPointPen): class TransformPointPen(FilterPointPen):
"""PointPen that transforms all coordinates using a Affine transformation, """PointPen that transforms all coordinates using a Affine transformation,
and passes them to another PointPen. and passes them to another PointPen.
>>> from fontTools.pens.recordingPen import RecordingPointPen >>> from fontTools.pens.recordingPen import RecordingPointPen
>>> rec = RecordingPointPen() >>> rec = RecordingPointPen()
>>> pen = TransformPointPen(rec, (2, 0, 0, 2, -10, 5)) >>> pen = TransformPointPen(rec, (2, 0, 0, 2, -10, 5))
>>> v = iter(rec.value) >>> v = iter(rec.value)
>>> pen.beginPath(identifier="contour-0") >>> pen.beginPath(identifier="contour-0")
>>> next(v) >>> next(v)
('beginPath', (), {'identifier': 'contour-0'}) ('beginPath', (), {'identifier': 'contour-0'})
>>> pen.addPoint((100, 100), "line") >>> pen.addPoint((100, 100), "line")
>>> next(v) >>> next(v)
('addPoint', ((190, 205), 'line', False, None), {}) ('addPoint', ((190, 205), 'line', False, None), {})
>>> pen.endPath() >>> pen.endPath()
>>> next(v) >>> next(v)
('endPath', (), {}) ('endPath', (), {})
>>> pen.addComponent("a", (1, 0, 0, 1, -10, 5), identifier="component-0") >>> pen.addComponent("a", (1, 0, 0, 1, -10, 5), identifier="component-0")
>>> next(v) >>> next(v)
('addComponent', ('a', <Transform [2 0 0 2 -30 15]>), {'identifier': 'component-0'}) ('addComponent', ('a', <Transform [2 0 0 2 -30 15]>), {'identifier': 'component-0'})
""" """
def __init__(self, outPointPen, transformation): def __init__(self, outPointPen, transformation):
"""The 'outPointPen' argument is another point pen object. """The 'outPointPen' argument is another point pen object.
It will receive the transformed coordinates. It will receive the transformed coordinates.
The 'transformation' argument can either be a six-tuple, or a The 'transformation' argument can either be a six-tuple, or a
fontTools.misc.transform.Transform object. fontTools.misc.transform.Transform object.
""" """
super().__init__(outPointPen) super().__init__(outPointPen)
if not hasattr(transformation, "transformPoint"): if not hasattr(transformation, "transformPoint"):
from fontTools.misc.transform import Transform from fontTools.misc.transform import Transform
transformation = Transform(*transformation)
self._transformation = transformation
self._transformPoint = transformation.transformPoint
def addPoint(self, pt, segmentType=None, smooth=False, name=None, **kwargs): transformation = Transform(*transformation)
self._outPen.addPoint( self._transformation = transformation
self._transformPoint(pt), segmentType, smooth, name, **kwargs self._transformPoint = transformation.transformPoint
)
def addComponent(self, baseGlyphName, transformation, **kwargs): def addPoint(self, pt, segmentType=None, smooth=False, name=None, **kwargs):
transformation = self._transformation.transform(transformation) self._outPen.addPoint(
self._outPen.addComponent(baseGlyphName, transformation, **kwargs) self._transformPoint(pt), segmentType, smooth, name, **kwargs
)
def addComponent(self, baseGlyphName, transformation, **kwargs):
transformation = self._transformation.transform(transformation)
self._outPen.addComponent(baseGlyphName, transformation, **kwargs)
if __name__ == "__main__": if __name__ == "__main__":
from fontTools.pens.basePen import _TestPen from fontTools.pens.basePen import _TestPen
pen = TransformPen(_TestPen(None), (2, 0, 0.5, 2, -10, 0))
pen.moveTo((0, 0)) pen = TransformPen(_TestPen(None), (2, 0, 0.5, 2, -10, 0))
pen.lineTo((0, 100)) pen.moveTo((0, 0))
pen.curveTo((50, 75), (60, 50), (50, 25), (0, 0)) pen.lineTo((0, 100))
pen.closePath() pen.curveTo((50, 75), (60, 50), (50, 25), (0, 0))
pen.closePath()

View File

@ -7,14 +7,60 @@ from fontTools.misc.roundTools import otRound
from fontTools.pens.basePen import LoggingPen, PenError from fontTools.pens.basePen import LoggingPen, PenError
from fontTools.pens.transformPen import TransformPen, TransformPointPen from fontTools.pens.transformPen import TransformPen, TransformPointPen
from fontTools.ttLib.tables import ttProgram from fontTools.ttLib.tables import ttProgram
from fontTools.ttLib.tables._g_l_y_f import flagOnCurve, flagCubic
from fontTools.ttLib.tables._g_l_y_f import Glyph from fontTools.ttLib.tables._g_l_y_f import Glyph
from fontTools.ttLib.tables._g_l_y_f import GlyphComponent from fontTools.ttLib.tables._g_l_y_f import GlyphComponent
from fontTools.ttLib.tables._g_l_y_f import GlyphCoordinates from fontTools.ttLib.tables._g_l_y_f import GlyphCoordinates
import math
__all__ = ["TTGlyphPen", "TTGlyphPointPen"] __all__ = ["TTGlyphPen", "TTGlyphPointPen"]
def drop_implied_oncurves(glyph):
drop = set()
start = 0
flags = glyph.flags
coords = glyph.coordinates
for last in glyph.endPtsOfContours:
for i in range(start, last + 1):
if not (flags[i] & flagOnCurve):
continue
prv = i - 1 if i > start else last
nxt = i + 1 if i < last else start
if (flags[prv] & flagOnCurve) or flags[prv] != flags[nxt]:
continue
p0 = coords[prv]
p1 = coords[i]
p2 = coords[nxt]
if not math.isclose(p1[0] - p0[0], p2[0] - p1[0]) or not math.isclose(
p1[1] - p0[1], p2[1] - p1[1]
):
continue
drop.add(i)
if drop:
# Do the actual dropping
glyph.coordinates = GlyphCoordinates(
coords[i] for i in range(len(coords)) if i not in drop
)
glyph.flags = array("B", (flags[i] for i in range(len(flags)) if i not in drop))
endPts = glyph.endPtsOfContours
newEndPts = []
i = 0
delta = 0
for d in sorted(drop):
while d > endPts[i]:
newEndPts.append(endPts[i] - delta)
i += 1
delta += 1
while i < len(endPts):
newEndPts.append(endPts[i] - delta)
i += 1
glyph.endPtsOfContours = newEndPts
class _TTGlyphBasePen: class _TTGlyphBasePen:
def __init__( def __init__(
self, self,
@ -124,9 +170,14 @@ class _TTGlyphBasePen:
components.append(component) components.append(component)
return components return components
def glyph(self, componentFlags: int = 0x4) -> Glyph: def glyph(self, componentFlags: int = 0x04, dropImpliedOnCurves=False) -> Glyph:
""" """
Returns a :py:class:`~._g_l_y_f.Glyph` object representing the glyph. Returns a :py:class:`~._g_l_y_f.Glyph` object representing the glyph.
Args:
componentFlags: Flags to use for component glyphs. (default: 0x04)
dropImpliedOnCurves: Whether to remove implied-oncurve points. (default: False)
""" """
if not self._isClosed(): if not self._isClosed():
raise PenError("Didn't close last contour.") raise PenError("Didn't close last contour.")
@ -134,9 +185,13 @@ class _TTGlyphBasePen:
glyph = Glyph() glyph = Glyph()
glyph.coordinates = GlyphCoordinates(self.points) glyph.coordinates = GlyphCoordinates(self.points)
glyph.coordinates.toInt()
glyph.endPtsOfContours = self.endPts glyph.endPtsOfContours = self.endPts
glyph.flags = array("B", self.types) glyph.flags = array("B", self.types)
glyph.coordinates.toInt()
if dropImpliedOnCurves:
drop_implied_oncurves(glyph)
self.init() self.init()
if components: if components:
@ -164,9 +219,18 @@ class TTGlyphPen(_TTGlyphBasePen, LoggingPen):
drawMethod = "draw" drawMethod = "draw"
transformPen = TransformPen transformPen = TransformPen
def _addPoint(self, pt: Tuple[float, float], onCurve: int) -> None: def __init__(
self,
glyphSet: Optional[Dict[str, Any]] = None,
handleOverflowingTransforms: bool = True,
outputImpliedClosingLine: bool = False,
) -> None:
super().__init__(glyphSet, handleOverflowingTransforms)
self.outputImpliedClosingLine = outputImpliedClosingLine
def _addPoint(self, pt: Tuple[float, float], tp: int) -> None:
self.points.append(pt) self.points.append(pt)
self.types.append(onCurve) self.types.append(tp)
def _popPoint(self) -> None: def _popPoint(self) -> None:
self.points.pop() self.points.pop()
@ -178,15 +242,21 @@ class TTGlyphPen(_TTGlyphBasePen, LoggingPen):
) )
def lineTo(self, pt: Tuple[float, float]) -> None: def lineTo(self, pt: Tuple[float, float]) -> None:
self._addPoint(pt, 1) self._addPoint(pt, flagOnCurve)
def moveTo(self, pt: Tuple[float, float]) -> None: def moveTo(self, pt: Tuple[float, float]) -> None:
if not self._isClosed(): if not self._isClosed():
raise PenError('"move"-type point must begin a new contour.') raise PenError('"move"-type point must begin a new contour.')
self._addPoint(pt, 1) self._addPoint(pt, flagOnCurve)
def curveTo(self, *points) -> None: def curveTo(self, *points) -> None:
raise NotImplementedError assert len(points) % 2 == 1
for pt in points[:-1]:
self._addPoint(pt, flagCubic)
# last point is None if there are no on-curve points
if points[-1] is not None:
self._addPoint(points[-1], 1)
def qCurveTo(self, *points) -> None: def qCurveTo(self, *points) -> None:
assert len(points) >= 1 assert len(points) >= 1
@ -205,13 +275,14 @@ class TTGlyphPen(_TTGlyphBasePen, LoggingPen):
self._popPoint() self._popPoint()
return return
# if first and last point on this path are the same, remove last if not self.outputImpliedClosingLine:
startPt = 0 # if first and last point on this path are the same, remove last
if self.endPts: startPt = 0
startPt = self.endPts[-1] + 1 if self.endPts:
if self.points[startPt] == self.points[endPt]: startPt = self.endPts[-1] + 1
self._popPoint() if self.points[startPt] == self.points[endPt]:
endPt -= 1 self._popPoint()
endPt -= 1
self.endPts.append(endPt) self.endPts.append(endPt)
@ -256,9 +327,23 @@ class TTGlyphPointPen(_TTGlyphBasePen, LogMixin, AbstractPointPen):
raise PenError("Contour is already closed.") raise PenError("Contour is already closed.")
if self._currentContourStartIndex == len(self.points): if self._currentContourStartIndex == len(self.points):
raise PenError("Tried to end an empty contour.") raise PenError("Tried to end an empty contour.")
contourStart = self.endPts[-1] + 1 if self.endPts else 0
self.endPts.append(len(self.points) - 1) self.endPts.append(len(self.points) - 1)
self._currentContourStartIndex = None self._currentContourStartIndex = None
# Resolve types for any cubic segments
flags = self.types
for i in range(contourStart, len(flags)):
if flags[i] == "curve":
j = i - 1
if j < contourStart:
j = len(flags) - 1
while flags[j] == 0:
flags[j] = flagCubic
j -= 1
flags[i] = flagOnCurve
def addPoint( def addPoint(
self, self,
pt: Tuple[float, float], pt: Tuple[float, float],
@ -274,11 +359,13 @@ class TTGlyphPointPen(_TTGlyphBasePen, LogMixin, AbstractPointPen):
if self._isClosed(): if self._isClosed():
raise PenError("Can't add a point to a closed contour.") raise PenError("Can't add a point to a closed contour.")
if segmentType is None: if segmentType is None:
self.types.append(0) # offcurve self.types.append(0)
elif segmentType in ("qcurve", "line", "move"): elif segmentType in ("line", "move"):
self.types.append(1) # oncurve self.types.append(flagOnCurve)
elif segmentType == "qcurve":
self.types.append(flagOnCurve)
elif segmentType == "curve": elif segmentType == "curve":
raise NotImplementedError("cubic curves are not supported") self.types.append("curve")
else: else:
raise AssertionError(segmentType) raise AssertionError(segmentType)

View File

@ -5,25 +5,25 @@ __all__ = ["WxPen"]
class WxPen(BasePen): class WxPen(BasePen):
def __init__(self, glyphSet, path=None):
BasePen.__init__(self, glyphSet)
if path is None:
import wx
def __init__(self, glyphSet, path=None): path = wx.GraphicsRenderer.GetDefaultRenderer().CreatePath()
BasePen.__init__(self, glyphSet) self.path = path
if path is None:
import wx
path = wx.GraphicsRenderer.GetDefaultRenderer().CreatePath()
self.path = path
def _moveTo(self, p): def _moveTo(self, p):
self.path.MoveToPoint(*p) self.path.MoveToPoint(*p)
def _lineTo(self, p): def _lineTo(self, p):
self.path.AddLineToPoint(*p) self.path.AddLineToPoint(*p)
def _curveToOne(self, p1, p2, p3): def _curveToOne(self, p1, p2, p3):
self.path.AddCurveToPoint(*p1+p2+p3) self.path.AddCurveToPoint(*p1 + p2 + p3)
def _qCurveToOne(self, p1, p2): def _qCurveToOne(self, p1, p2):
self.path.AddQuadCurveToPoint(*p1+p2) self.path.AddQuadCurveToPoint(*p1 + p2)
def _closePath(self): def _closePath(self):
self.path.CloseSubpath() self.path.CloseSubpath()

View File

@ -0,0 +1,15 @@
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .qu2cu import *

View File

@ -0,0 +1,7 @@
import sys
from .cli import main
if __name__ == "__main__":
sys.exit(main())

View File

@ -0,0 +1,57 @@
"""Benchmark the qu2cu algorithm performance."""
from .qu2cu import *
from fontTools.cu2qu import curve_to_quadratic
import random
import timeit
MAX_ERR = 0.5
NUM_CURVES = 5
def generate_curves(n):
points = [
tuple(float(random.randint(0, 2048)) for coord in range(2))
for point in range(1 + 3 * n)
]
curves = []
for i in range(n):
curves.append(tuple(points[i * 3 : i * 3 + 4]))
return curves
def setup_quadratic_to_curves():
curves = generate_curves(NUM_CURVES)
quadratics = [curve_to_quadratic(curve, MAX_ERR) for curve in curves]
return quadratics, MAX_ERR
def run_benchmark(module, function, setup_suffix="", repeat=25, number=1):
setup_func = "setup_" + function
if setup_suffix:
print("%s with %s:" % (function, setup_suffix), end="")
setup_func += "_" + setup_suffix
else:
print("%s:" % function, end="")
def wrapper(function, setup_func):
function = globals()[function]
setup_func = globals()[setup_func]
def wrapped():
return function(*setup_func())
return wrapped
results = timeit.repeat(wrapper(function, setup_func), repeat=repeat, number=number)
print("\t%5.1fus" % (min(results) * 1000000.0 / number))
def main():
"""Benchmark the qu2cu algorithm performance."""
run_benchmark("qu2cu", "quadratic_to_curves")
if __name__ == "__main__":
random.seed(1)
main()

124
Lib/fontTools/qu2cu/cli.py Normal file
View File

@ -0,0 +1,124 @@
import os
import argparse
import logging
from fontTools.misc.cliTools import makeOutputFileName
from fontTools.ttLib import TTFont
from fontTools.pens.qu2cuPen import Qu2CuPen
from fontTools.pens.ttGlyphPen import TTGlyphPen
import fontTools
logger = logging.getLogger("fontTools.qu2cu")
def _font_to_cubic(input_path, output_path=None, **kwargs):
font = TTFont(input_path)
logger.info("Converting curves for %s", input_path)
stats = {} if kwargs["dump_stats"] else None
qu2cu_kwargs = {
"stats": stats,
"max_err": kwargs["max_err_em"] * font["head"].unitsPerEm,
"all_cubic": kwargs["all_cubic"],
}
assert "gvar" not in font, "Cannot convert variable font"
glyphSet = font.getGlyphSet()
glyphOrder = font.getGlyphOrder()
glyf = font["glyf"]
for glyphName in glyphOrder:
glyph = glyphSet[glyphName]
ttpen = TTGlyphPen(glyphSet)
pen = Qu2CuPen(ttpen, **qu2cu_kwargs)
glyph.draw(pen)
glyf[glyphName] = ttpen.glyph(dropImpliedOnCurves=True)
font["head"].glyphDataFormat = 1
if kwargs["dump_stats"]:
logger.info("Stats: %s", stats)
logger.info("Saving %s", output_path)
font.save(output_path)
def main(args=None):
parser = argparse.ArgumentParser(prog="qu2cu")
parser.add_argument("--version", action="version", version=fontTools.__version__)
parser.add_argument(
"infiles",
nargs="+",
metavar="INPUT",
help="one or more input TTF source file(s).",
)
parser.add_argument("-v", "--verbose", action="count", default=0)
parser.add_argument(
"-e",
"--conversion-error",
type=float,
metavar="ERROR",
default=0.001,
help="maxiumum approximation error measured in EM (default: 0.001)",
)
parser.add_argument(
"-c",
"--all-cubic",
default=False,
action="store_true",
help="whether to only use cubic curves",
)
output_parser = parser.add_mutually_exclusive_group()
output_parser.add_argument(
"-o",
"--output-file",
default=None,
metavar="OUTPUT",
help=("output filename for the converted TTF."),
)
output_parser.add_argument(
"-d",
"--output-dir",
default=None,
metavar="DIRECTORY",
help="output directory where to save converted TTFs",
)
options = parser.parse_args(args)
if not options.verbose:
level = "WARNING"
elif options.verbose == 1:
level = "INFO"
else:
level = "DEBUG"
logging.basicConfig(level=level)
if len(options.infiles) > 1 and options.output_file:
parser.error("-o/--output-file can't be used with multile inputs")
if options.output_dir:
output_dir = options.output_dir
if not os.path.exists(output_dir):
os.mkdir(output_dir)
elif not os.path.isdir(output_dir):
parser.error("'%s' is not a directory" % output_dir)
output_paths = [
os.path.join(output_dir, os.path.basename(p)) for p in options.infiles
]
elif options.output_file:
output_paths = [options.output_file]
else:
output_paths = [
makeOutputFileName(p, overWrite=True, suffix=".cubic")
for p in options.infiles
]
kwargs = dict(
dump_stats=options.verbose > 0,
max_err_em=options.conversion_error,
all_cubic=options.all_cubic,
)
for input_path, output_path in zip(options.infiles, output_paths):
_font_to_cubic(input_path, output_path, **kwargs)

View File

@ -0,0 +1,409 @@
# cython: language_level=3
# distutils: define_macros=CYTHON_TRACE_NOGIL=1
# Copyright 2023 Google Inc. All Rights Reserved.
# Copyright 2023 Behdad Esfahbod. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import cython
COMPILED = cython.compiled
except (AttributeError, ImportError):
# if cython not installed, use mock module with no-op decorators and types
from fontTools.misc import cython
COMPILED = False
from fontTools.misc.bezierTools import splitCubicAtTC
from collections import namedtuple
import math
from typing import (
List,
Tuple,
Union,
)
__all__ = ["quadratic_to_curves"]
# Copied from cu2qu
@cython.cfunc
@cython.returns(cython.int)
@cython.locals(
tolerance=cython.double,
p0=cython.complex,
p1=cython.complex,
p2=cython.complex,
p3=cython.complex,
)
@cython.locals(mid=cython.complex, deriv3=cython.complex)
def cubic_farthest_fit_inside(p0, p1, p2, p3, tolerance):
"""Check if a cubic Bezier lies within a given distance of the origin.
"Origin" means *the* origin (0,0), not the start of the curve. Note that no
checks are made on the start and end positions of the curve; this function
only checks the inside of the curve.
Args:
p0 (complex): Start point of curve.
p1 (complex): First handle of curve.
p2 (complex): Second handle of curve.
p3 (complex): End point of curve.
tolerance (double): Distance from origin.
Returns:
bool: True if the cubic Bezier ``p`` entirely lies within a distance
``tolerance`` of the origin, False otherwise.
"""
# First check p2 then p1, as p2 has higher error early on.
if abs(p2) <= tolerance and abs(p1) <= tolerance:
return True
# Split.
mid = (p0 + 3 * (p1 + p2) + p3) * 0.125
if abs(mid) > tolerance:
return False
deriv3 = (p3 + p2 - p1 - p0) * 0.125
return cubic_farthest_fit_inside(
p0, (p0 + p1) * 0.5, mid - deriv3, mid, tolerance
) and cubic_farthest_fit_inside(mid, mid + deriv3, (p2 + p3) * 0.5, p3, tolerance)
@cython.locals(_1_3=cython.double, _2_3=cython.double)
@cython.locals(
p0=cython.complex,
p1=cython.complex,
p2=cython.complex,
p1_2_3=cython.complex,
)
def elevate_quadratic(p0, p1, p2, _1_3=1 / 3, _2_3=2 / 3):
"""Given a quadratic bezier curve, return its degree-elevated cubic."""
# https://pomax.github.io/bezierinfo/#reordering
p1_2_3 = p1 * _2_3
return (
p0,
(p0 * _1_3 + p1_2_3),
(p2 * _1_3 + p1_2_3),
p2,
)
@cython.locals(
start=cython.int,
n=cython.int,
k=cython.int,
prod_ratio=cython.double,
sum_ratio=cython.double,
ratio=cython.double,
t=cython.double,
p0=cython.complex,
p1=cython.complex,
p2=cython.complex,
p3=cython.complex,
)
def merge_curves(curves, start, n):
"""Give a cubic-Bezier spline, reconstruct one cubic-Bezier
that has the same endpoints and tangents and approxmates
the spline."""
# Reconstruct the t values of the cut segments
prod_ratio = 1.0
sum_ratio = 1.0
ts = [1]
for k in range(1, n):
ck = curves[start + k]
c_before = curves[start + k - 1]
# |t_(k+1) - t_k| / |t_k - t_(k - 1)| = ratio
assert ck[0] == c_before[3]
ratio = abs(ck[1] - ck[0]) / abs(c_before[3] - c_before[2])
prod_ratio *= ratio
sum_ratio += prod_ratio
ts.append(sum_ratio)
# (t(n) - t(n - 1)) / (t_(1) - t(0)) = prod_ratio
ts = [t / sum_ratio for t in ts[:-1]]
p0 = curves[start][0]
p1 = curves[start][1]
p2 = curves[start + n - 1][2]
p3 = curves[start + n - 1][3]
# Build the curve by scaling the control-points.
p1 = p0 + (p1 - p0) / (ts[0] if ts else 1)
p2 = p3 + (p2 - p3) / ((1 - ts[-1]) if ts else 1)
curve = (p0, p1, p2, p3)
return curve, ts
@cython.locals(
count=cython.int,
num_offcurves=cython.int,
i=cython.int,
off1=cython.complex,
off2=cython.complex,
on=cython.complex,
)
def add_implicit_on_curves(p):
q = list(p)
count = 0
num_offcurves = len(p) - 2
for i in range(1, num_offcurves):
off1 = p[i]
off2 = p[i + 1]
on = off1 + (off2 - off1) * 0.5
q.insert(i + 1 + count, on)
count += 1
return q
Point = Union[Tuple[float, float], complex]
@cython.locals(
cost=cython.int,
is_complex=cython.int,
)
def quadratic_to_curves(
quads: List[List[Point]],
max_err: float = 0.5,
all_cubic: bool = False,
) -> List[Tuple[Point, ...]]:
"""Converts a connecting list of quadratic splines to a list of quadratic
and cubic curves.
A quadratic spline is specified as a list of points. Either each point is
a 2-tuple of X,Y coordinates, or each point is a complex number with
real/imaginary components representing X,Y coordinates.
The first and last points are on-curve points and the rest are off-curve
points, with an implied on-curve point in the middle between every two
consequtive off-curve points.
Returns:
The output is a list of tuples of points. Points are represented
in the same format as the input, either as 2-tuples or complex numbers.
Each tuple is either of length three, for a quadratic curve, or four,
for a cubic curve. Each curve's last point is the same as the next
curve's first point.
Args:
quads: quadratic splines
max_err: absolute error tolerance; defaults to 0.5
all_cubic: if True, only cubic curves are generated; defaults to False
"""
is_complex = type(quads[0][0]) is complex
if not is_complex:
quads = [[complex(x, y) for (x, y) in p] for p in quads]
q = [quads[0][0]]
costs = [1]
cost = 1
for p in quads:
assert q[-1] == p[0]
for i in range(len(p) - 2):
cost += 1
costs.append(cost)
costs.append(cost)
qq = add_implicit_on_curves(p)[1:]
costs.pop()
q.extend(qq)
cost += 1
costs.append(cost)
curves = spline_to_curves(q, costs, max_err, all_cubic)
if not is_complex:
curves = [tuple((c.real, c.imag) for c in curve) for curve in curves]
return curves
Solution = namedtuple("Solution", ["num_points", "error", "start_index", "is_cubic"])
@cython.locals(
i=cython.int,
j=cython.int,
k=cython.int,
start=cython.int,
i_sol_count=cython.int,
j_sol_count=cython.int,
this_sol_count=cython.int,
tolerance=cython.double,
err=cython.double,
error=cython.double,
i_sol_error=cython.double,
j_sol_error=cython.double,
all_cubic=cython.int,
is_cubic=cython.int,
count=cython.int,
p0=cython.complex,
p1=cython.complex,
p2=cython.complex,
p3=cython.complex,
v=cython.complex,
u=cython.complex,
)
def spline_to_curves(q, costs, tolerance=0.5, all_cubic=False):
"""
q: quadratic spline with alternating on-curve / off-curve points.
costs: cumulative list of encoding cost of q in terms of number of
points that need to be encoded. Implied on-curve points do not
contribute to the cost. If all points need to be encoded, then
costs will be range(1, len(q)+1).
"""
assert len(q) >= 3, "quadratic spline requires at least 3 points"
# Elevate quadratic segments to cubic
elevated_quadratics = [
elevate_quadratic(*q[i : i + 3]) for i in range(0, len(q) - 2, 2)
]
# Find sharp corners; they have to be oncurves for sure.
forced = set()
for i in range(1, len(elevated_quadratics)):
p0 = elevated_quadratics[i - 1][2]
p1 = elevated_quadratics[i][0]
p2 = elevated_quadratics[i][1]
if abs(p1 - p0) + abs(p2 - p1) > tolerance + abs(p2 - p0):
forced.add(i)
# Dynamic-Programming to find the solution with fewest number of
# cubic curves, and within those the one with smallest error.
sols = [Solution(0, 0, 0, False)]
impossible = Solution(len(elevated_quadratics) * 3 + 1, 0, 1, False)
start = 0
for i in range(1, len(elevated_quadratics) + 1):
best_sol = impossible
for j in range(start, i):
j_sol_count, j_sol_error = sols[j].num_points, sols[j].error
if not all_cubic:
# Solution with quadratics between j:i
this_count = costs[2 * i - 1] - costs[2 * j] + 1
i_sol_count = j_sol_count + this_count
i_sol_error = j_sol_error
i_sol = Solution(i_sol_count, i_sol_error, i - j, False)
if i_sol < best_sol:
best_sol = i_sol
if this_count <= 3:
# Can't get any better than this in the path below
continue
# Fit elevated_quadratics[j:i] into one cubic
try:
curve, ts = merge_curves(elevated_quadratics, j, i - j)
except ZeroDivisionError:
continue
# Now reconstruct the segments from the fitted curve
reconstructed_iter = splitCubicAtTC(*curve, *ts)
reconstructed = []
# Knot errors
error = 0
for k, reconst in enumerate(reconstructed_iter):
orig = elevated_quadratics[j + k]
err = abs(reconst[3] - orig[3])
error = max(error, err)
if error > tolerance:
break
reconstructed.append(reconst)
if error > tolerance:
# Not feasible
continue
# Interior errors
for k, reconst in enumerate(reconstructed):
orig = elevated_quadratics[j + k]
p0, p1, p2, p3 = tuple(v - u for v, u in zip(reconst, orig))
if not cubic_farthest_fit_inside(p0, p1, p2, p3, tolerance):
error = tolerance + 1
break
if error > tolerance:
# Not feasible
continue
# Save best solution
i_sol_count = j_sol_count + 3
i_sol_error = max(j_sol_error, error)
i_sol = Solution(i_sol_count, i_sol_error, i - j, True)
if i_sol < best_sol:
best_sol = i_sol
if i_sol_count == 3:
# Can't get any better than this
break
sols.append(best_sol)
if i in forced:
start = i
# Reconstruct solution
splits = []
cubic = []
i = len(sols) - 1
while i:
count, is_cubic = sols[i].start_index, sols[i].is_cubic
splits.append(i)
cubic.append(is_cubic)
i -= count
curves = []
j = 0
for i, is_cubic in reversed(list(zip(splits, cubic))):
if is_cubic:
curves.append(merge_curves(elevated_quadratics, j, i - j)[0])
else:
for k in range(j, i):
curves.append(q[k * 2 : k * 2 + 3])
j = i
return curves
def main():
from fontTools.cu2qu.benchmark import generate_curve
from fontTools.cu2qu import curve_to_quadratic
tolerance = 0.05
reconstruct_tolerance = tolerance * 1
curve = generate_curve()
quadratics = curve_to_quadratic(curve, tolerance)
print(
"cu2qu tolerance %g. qu2cu tolerance %g." % (tolerance, reconstruct_tolerance)
)
print("One random cubic turned into %d quadratics." % len(quadratics))
curves = quadratic_to_curves([quadratics], reconstruct_tolerance)
print("Those quadratics turned back into %d cubics. " % len(curves))
print("Original curve:", curve)
print("Reconstructed curve(s):", curves)
if __name__ == "__main__":
main()

Some files were not shown because too many files have changed in this diff Show More