2013-07-23 11:17:35 -04:00
|
|
|
# Copyright 2013 Google, Inc. All Rights Reserved.
|
|
|
|
#
|
|
|
|
# Google Author(s): Behdad Esfahbod
|
2013-08-13 20:02:59 -04:00
|
|
|
|
2014-01-14 15:07:50 +08:00
|
|
|
from __future__ import print_function, division, absolute_import
|
2013-11-27 17:27:35 -05:00
|
|
|
from fontTools.misc.py23 import *
|
2013-09-19 20:36:49 -04:00
|
|
|
from fontTools import ttLib
|
|
|
|
from fontTools.ttLib.tables import otTables
|
|
|
|
from fontTools.misc import psCharStrings
|
2017-01-14 14:45:35 +00:00
|
|
|
from fontTools.pens.basePen import NullPen
|
2016-01-24 16:13:31 +00:00
|
|
|
from fontTools.misc.loggingTools import Timer
|
2013-11-27 17:27:35 -05:00
|
|
|
import sys
|
|
|
|
import struct
|
|
|
|
import array
|
2016-01-24 16:13:31 +00:00
|
|
|
import logging
|
|
|
|
from types import MethodType
|
2013-07-21 18:16:55 -04:00
|
|
|
|
2014-08-13 18:06:31 -04:00
|
|
|
__usage__ = "pyftsubset font-file [glyph...] [--option=value]..."
|
2014-08-13 16:06:49 -04:00
|
|
|
|
|
|
|
__doc__="""\
|
|
|
|
pyftsubset -- OpenType font subsetter and optimizer
|
|
|
|
|
|
|
|
pyftsubset is an OpenType font subsetter and optimizer, based on fontTools.
|
|
|
|
It accepts any TT- or CFF-flavored OpenType (.otf or .ttf) or WOFF (.woff)
|
|
|
|
font file. The subsetted glyph set is based on the specified glyphs
|
|
|
|
or characters, and specified OpenType layout features.
|
|
|
|
|
|
|
|
The tool also performs some size-reducing optimizations, aimed for using
|
|
|
|
subset fonts as webfonts. Individual optimizations can be enabled or
|
|
|
|
disabled, and are enabled by default when they are safe.
|
|
|
|
|
|
|
|
Usage:
|
|
|
|
"""+__usage__+"""
|
|
|
|
|
2014-08-14 11:37:35 -04:00
|
|
|
At least one glyph or one of --gids, --gids-file, --glyphs, --glyphs-file,
|
|
|
|
--text, --text-file, --unicodes, or --unicodes-file, must be specified.
|
2014-08-13 16:06:49 -04:00
|
|
|
|
|
|
|
Arguments:
|
|
|
|
font-file
|
|
|
|
The input font file.
|
|
|
|
glyph
|
2014-08-13 21:30:25 -04:00
|
|
|
Specify one or more glyph identifiers to include in the subset. Must be
|
2014-09-22 23:32:23 +02:00
|
|
|
PS glyph names, or the special string '*' to keep the entire glyph set.
|
2014-08-13 16:06:49 -04:00
|
|
|
|
2014-08-13 21:30:25 -04:00
|
|
|
Initial glyph set specification:
|
2014-08-13 19:30:03 -04:00
|
|
|
These options populate the initial glyph set. Same option can appear
|
|
|
|
multiple times, and the results are accummulated.
|
2014-08-14 11:37:35 -04:00
|
|
|
--gids=<NNN>[,<NNN>...]
|
|
|
|
Specify comma/whitespace-separated list of glyph IDs or ranges as
|
|
|
|
decimal numbers. For example, --gids=10-12,14 adds glyphs with
|
|
|
|
numbers 10, 11, 12, and 14.
|
|
|
|
--gids-file=<path>
|
|
|
|
Like --gids but reads from a file. Anything after a '#' on any line
|
|
|
|
is ignored as comments.
|
2014-08-13 19:30:03 -04:00
|
|
|
--glyphs=<glyphname>[,<glyphname>...]
|
|
|
|
Specify comma/whitespace-separated PS glyph names to add to the subset.
|
|
|
|
Note that only PS glyph names are accepted, not gidNNN, U+XXXX, etc
|
2014-09-22 23:32:23 +02:00
|
|
|
that are accepted on the command line. The special string '*' wil keep
|
|
|
|
the entire glyph set.
|
2014-08-13 19:30:03 -04:00
|
|
|
--glyphs-file=<path>
|
|
|
|
Like --glyphs but reads from a file. Anything after a '#' on any line
|
|
|
|
is ignored as comments.
|
2014-08-13 16:06:49 -04:00
|
|
|
--text=<text>
|
|
|
|
Specify characters to include in the subset, as UTF-8 string.
|
|
|
|
--text-file=<path>
|
2014-08-13 19:30:03 -04:00
|
|
|
Like --text but reads from a file. Newline character are not added to
|
|
|
|
the subset.
|
|
|
|
--unicodes=<XXXX>[,<XXXX>...]
|
2014-08-13 20:02:08 -04:00
|
|
|
Specify comma/whitespace-separated list of Unicode codepoints or
|
|
|
|
ranges as hex numbers, optionally prefixed with 'U+', 'u', etc.
|
|
|
|
For example, --unicodes=41-5a,61-7a adds ASCII letters, so does
|
|
|
|
the more verbose --unicodes=U+0041-005A,U+0061-007A.
|
2014-09-22 23:32:23 +02:00
|
|
|
The special strings '*' will choose all Unicode characters mapped
|
|
|
|
by the font.
|
2014-08-13 19:30:03 -04:00
|
|
|
--unicodes-file=<path>
|
|
|
|
Like --unicodes, but reads from a file. Anything after a '#' on any
|
|
|
|
line in the file is ignored as comments.
|
2014-08-13 21:17:24 -04:00
|
|
|
--ignore-missing-glyphs
|
|
|
|
Do not fail if some requested glyphs or gids are not available in
|
|
|
|
the font.
|
|
|
|
--no-ignore-missing-glyphs
|
|
|
|
Stop and fail if some requested glyphs or gids are not available
|
|
|
|
in the font. [default]
|
|
|
|
--ignore-missing-unicodes [default]
|
|
|
|
Do not fail if some requested Unicode characters (including those
|
|
|
|
indirectly specified using --text or --text-file) are not available
|
|
|
|
in the font.
|
|
|
|
--no-ignore-missing-unicodes
|
|
|
|
Stop and fail if some requested Unicode characters are not available
|
|
|
|
in the font.
|
|
|
|
Note the default discrepancy between ignoring missing glyphs versus
|
|
|
|
unicodes. This is for historical reasons and in the future
|
|
|
|
--no-ignore-missing-unicodes might become default.
|
2014-08-13 18:10:42 -04:00
|
|
|
|
2014-08-13 21:30:25 -04:00
|
|
|
Other options:
|
|
|
|
For the other options listed below, to see the current value of the option,
|
|
|
|
pass a value of '?' to it, with or without a '='.
|
|
|
|
Examples:
|
|
|
|
$ pyftsubset --glyph-names?
|
|
|
|
Current setting for 'glyph-names' is: False
|
|
|
|
$ ./pyftsubset --name-IDs=?
|
|
|
|
Current setting for 'name-IDs' is: [1, 2]
|
|
|
|
$ ./pyftsubset --hinting? --no-hinting --hinting?
|
|
|
|
Current setting for 'hinting' is: True
|
|
|
|
Current setting for 'hinting' is: False
|
|
|
|
|
|
|
|
Output options:
|
|
|
|
--output-file=<path>
|
|
|
|
The output font file. If not specified, the subsetted font
|
|
|
|
will be saved in as font-file.subset.
|
|
|
|
--flavor=<type>
|
2015-07-31 19:56:13 +01:00
|
|
|
Specify flavor of output font file. May be 'woff' or 'woff2'.
|
|
|
|
Note that WOFF2 requires the Brotli Python extension, available
|
|
|
|
at https://github.com/google/brotli
|
2016-01-31 14:02:10 +00:00
|
|
|
--with-zopfli
|
|
|
|
Use the Google Zopfli algorithm to compress WOFF. The output is 3-8 %
|
|
|
|
smaller than pure zlib, but the compression speed is much slower.
|
|
|
|
The Zopfli Python bindings are available at:
|
2017-03-01 14:01:15 +00:00
|
|
|
https://pypi.python.org/pypi/zopfli
|
2014-08-13 21:30:25 -04:00
|
|
|
|
2014-08-13 18:10:42 -04:00
|
|
|
Glyph set expansion:
|
|
|
|
These options control how additional glyphs are added to the subset.
|
2014-08-13 16:06:49 -04:00
|
|
|
--notdef-glyph
|
|
|
|
Add the '.notdef' glyph to the subset (ie, keep it). [default]
|
|
|
|
--no-notdef-glyph
|
|
|
|
Drop the '.notdef' glyph unless specified in the glyph set. This
|
|
|
|
saves a few bytes, but is not possible for Postscript-flavored
|
|
|
|
fonts, as those require '.notdef'. For TrueType-flavored fonts,
|
|
|
|
this works fine as long as no unsupported glyphs are requested
|
|
|
|
from the font.
|
|
|
|
--notdef-outline
|
|
|
|
Keep the outline of '.notdef' glyph. The '.notdef' glyph outline is
|
|
|
|
used when glyphs not supported by the font are to be shown. It is not
|
|
|
|
needed otherwise.
|
|
|
|
--no-notdef-outline
|
|
|
|
When including a '.notdef' glyph, remove its outline. This saves
|
|
|
|
a few bytes. [default]
|
|
|
|
--recommended-glyphs
|
|
|
|
Add glyphs 0, 1, 2, and 3 to the subset, as recommended for
|
|
|
|
TrueType-flavored fonts: '.notdef', 'NULL' or '.null', 'CR', 'space'.
|
|
|
|
Some legacy software might require this, but no modern system does.
|
|
|
|
--no-recommended-glyphs
|
|
|
|
Do not add glyphs 0, 1, 2, and 3 to the subset, unless specified in
|
|
|
|
glyph set. [default]
|
|
|
|
--layout-features[+|-]=<feature>[,<feature>...]
|
|
|
|
Specify (=), add to (+=) or exclude from (-=) the comma-separated
|
|
|
|
set of OpenType layout feature tags that will be preserved.
|
|
|
|
Glyph variants used by the preserved features are added to the
|
|
|
|
specified subset glyph set. By default, 'calt', 'ccmp', 'clig', 'curs',
|
2017-08-07 21:48:11 -07:00
|
|
|
'dnom', 'frac', 'kern', 'liga', 'locl', 'mark', 'mkmk', 'numr', 'rclt',
|
|
|
|
'rlig', 'rvrn', and all features required for script shaping are
|
|
|
|
preserved. To see the full list, try '--layout-features=?'.
|
|
|
|
Use '*' to keep all features.
|
2014-08-13 16:06:49 -04:00
|
|
|
Multiple --layout-features options can be provided if necessary.
|
|
|
|
Examples:
|
|
|
|
--layout-features+=onum,pnum,ss01
|
|
|
|
* Keep the default set of features and 'onum', 'pnum', 'ss01'.
|
|
|
|
--layout-features-='mark','mkmk'
|
|
|
|
* Keep the default set of features but drop 'mark' and 'mkmk'.
|
|
|
|
--layout-features='kern'
|
|
|
|
* Only keep the 'kern' feature, drop all others.
|
|
|
|
--layout-features=''
|
|
|
|
* Drop all features.
|
|
|
|
--layout-features='*'
|
|
|
|
* Keep all features.
|
|
|
|
--layout-features+=aalt --layout-features-=vrt2
|
|
|
|
* Keep default set of features plus 'aalt', but drop 'vrt2'.
|
|
|
|
|
2014-08-13 19:56:11 -04:00
|
|
|
Hinting options:
|
|
|
|
--hinting
|
|
|
|
Keep hinting [default]
|
|
|
|
--no-hinting
|
|
|
|
Drop glyph-specific hinting and font-wide hinting tables, as well
|
|
|
|
as remove hinting-related bits and pieces from other tables (eg. GPOS).
|
|
|
|
See --hinting-tables for list of tables that are dropped by default.
|
|
|
|
Instructions and hints are stripped from 'glyf' and 'CFF ' tables
|
|
|
|
respectively. This produces (sometimes up to 30%) smaller fonts that
|
|
|
|
are suitable for extremely high-resolution systems, like high-end
|
|
|
|
mobile devices and retina displays.
|
2014-06-20 15:30:26 -07:00
|
|
|
|
|
|
|
Optimization options:
|
|
|
|
--desubroutinize
|
|
|
|
Remove CFF use of subroutinizes. Subroutinization is a way to make CFF
|
|
|
|
fonts smaller. For small subsets however, desubroutinizing might make
|
2015-04-22 16:02:19 -07:00
|
|
|
the font smaller. It has even been reported that desubroutinized CFF
|
|
|
|
fonts compress better (produce smaller output) WOFF and WOFF2 fonts.
|
|
|
|
Also see note under --no-hinting.
|
2014-06-20 15:30:26 -07:00
|
|
|
--no-desubroutinize [default]
|
|
|
|
Leave CFF subroutinizes as is, only throw away unused subroutinizes.
|
2014-08-13 19:56:11 -04:00
|
|
|
|
2014-08-13 16:06:49 -04:00
|
|
|
Font table options:
|
|
|
|
--drop-tables[+|-]=<table>[,<table>...]
|
|
|
|
Specify (=), add to (+=) or exclude from (-=) the comma-separated
|
|
|
|
set of tables that will be be dropped.
|
|
|
|
By default, the following tables are dropped:
|
|
|
|
'BASE', 'JSTF', 'DSIG', 'EBDT', 'EBLC', 'EBSC', 'SVG ', 'PCLT', 'LTSH'
|
|
|
|
and Graphite tables: 'Feat', 'Glat', 'Gloc', 'Silf', 'Sill'
|
2015-11-07 14:09:00 +04:00
|
|
|
and color tables: 'CBLC', 'CBDT', 'sbix'.
|
2014-08-13 16:06:49 -04:00
|
|
|
The tool will attempt to subset the remaining tables.
|
|
|
|
Examples:
|
|
|
|
--drop-tables-='SVG '
|
|
|
|
* Drop the default set of tables but keep 'SVG '.
|
|
|
|
--drop-tables+=GSUB
|
|
|
|
* Drop the default set of tables and 'GSUB'.
|
|
|
|
--drop-tables=DSIG
|
|
|
|
* Only drop the 'DSIG' table, keep all others.
|
|
|
|
--drop-tables=
|
|
|
|
* Keep all tables.
|
|
|
|
--no-subset-tables+=<table>[,<table>...]
|
|
|
|
Add to the set of tables that will not be subsetted.
|
|
|
|
By default, the following tables are included in this list, as
|
|
|
|
they do not need subsetting (ignore the fact that 'loca' is listed
|
|
|
|
here): 'gasp', 'head', 'hhea', 'maxp', 'vhea', 'OS/2', 'loca',
|
2017-04-14 15:27:52 -07:00
|
|
|
'name', 'cvt ', 'fpgm', 'prep', 'VMDX', 'DSIG', 'CPAL', 'MVAR', 'STAT'.
|
2016-09-26 19:13:04 +01:00
|
|
|
By default, tables that the tool does not know how to subset and are not
|
|
|
|
specified here will be dropped from the font, unless --passthrough-tables
|
|
|
|
option is passed.
|
2014-08-13 16:06:49 -04:00
|
|
|
Example:
|
|
|
|
--no-subset-tables+=FFTM
|
|
|
|
* Keep 'FFTM' table in the font by preventing subsetting.
|
2016-09-26 19:13:04 +01:00
|
|
|
--passthrough-tables
|
|
|
|
Do not drop tables that the tool does not know how to subset.
|
|
|
|
--no-passthrough-tables
|
|
|
|
Tables that the tool does not know how to subset and are not specified
|
|
|
|
in --no-subset-tables will be dropped from the font. [default]
|
2014-08-13 16:06:49 -04:00
|
|
|
--hinting-tables[-]=<table>[,<table>...]
|
2014-08-13 19:56:11 -04:00
|
|
|
Specify (=), add to (+=) or exclude from (-=) the list of font-wide
|
|
|
|
hinting tables that will be dropped if --no-hinting is specified,
|
2014-08-13 16:06:49 -04:00
|
|
|
Examples:
|
|
|
|
--hinting-tables-='VDMX'
|
|
|
|
* Drop font-wide hinting tables except 'VDMX'.
|
|
|
|
--hinting-tables=''
|
|
|
|
* Keep all font-wide hinting tables (but strip hints from glyphs).
|
2014-09-25 16:18:58 +01:00
|
|
|
--legacy-kern
|
|
|
|
Keep TrueType 'kern' table even when OpenType 'GPOS' is available.
|
|
|
|
--no-legacy-kern
|
|
|
|
Drop TrueType 'kern' table if OpenType 'GPOS' is available. [default]
|
2014-08-13 16:06:49 -04:00
|
|
|
|
|
|
|
Font naming options:
|
2014-08-13 19:30:03 -04:00
|
|
|
These options control what is retained in the 'name' table. For numerical
|
2014-08-13 16:06:49 -04:00
|
|
|
codes, see: http://www.microsoft.com/typography/otspec/name.htm
|
|
|
|
--name-IDs[+|-]=<nameID>[,<nameID>...]
|
|
|
|
Specify (=), add to (+=) or exclude from (-=) the set of 'name' table
|
|
|
|
entry nameIDs that will be preserved. By default only nameID 1 (Family)
|
|
|
|
and nameID 2 (Style) are preserved. Use '*' to keep all entries.
|
|
|
|
Examples:
|
|
|
|
--name-IDs+=0,4,6
|
|
|
|
* Also keep Copyright, Full name and PostScript name entry.
|
|
|
|
--name-IDs=''
|
|
|
|
* Drop all 'name' table entries.
|
|
|
|
--name-IDs='*'
|
|
|
|
* keep all 'name' table entries
|
|
|
|
--name-legacy
|
|
|
|
Keep legacy (non-Unicode) 'name' table entries (0.x, 1.x etc.).
|
|
|
|
XXX Note: This might be needed for some fonts that have no Unicode name
|
|
|
|
entires for English. See: https://github.com/behdad/fonttools/issues/146
|
|
|
|
--no-name-legacy
|
|
|
|
Drop legacy (non-Unicode) 'name' table entries [default]
|
|
|
|
--name-languages[+|-]=<langID>[,<langID>]
|
|
|
|
Specify (=), add to (+=) or exclude from (-=) the set of 'name' table
|
|
|
|
langIDs that will be preserved. By default only records with langID
|
|
|
|
0x0409 (English) are preserved. Use '*' to keep all langIDs.
|
2014-09-25 15:42:13 +01:00
|
|
|
--obfuscate-names
|
|
|
|
Make the font unusable as a system font by replacing name IDs 1, 2, 3, 4,
|
|
|
|
and 6 with dummy strings (it is still fully functional as webfont).
|
2014-08-13 16:06:49 -04:00
|
|
|
|
|
|
|
Glyph naming and encoding options:
|
|
|
|
--glyph-names
|
|
|
|
Keep PS glyph names in TT-flavored fonts. In general glyph names are
|
|
|
|
not needed for correct use of the font. However, some PDF generators
|
|
|
|
and PDF viewers might rely on glyph names to extract Unicode text
|
|
|
|
from PDF documents.
|
|
|
|
--no-glyph-names
|
|
|
|
Drop PS glyph names in TT-flavored fonts, by using 'post' table
|
|
|
|
version 3.0. [default]
|
|
|
|
--legacy-cmap
|
|
|
|
Keep the legacy 'cmap' subtables (0.x, 1.x, 4.x etc.).
|
|
|
|
--no-legacy-cmap
|
|
|
|
Drop the legacy 'cmap' subtables. [default]
|
|
|
|
--symbol-cmap
|
|
|
|
Keep the 3.0 symbol 'cmap'.
|
|
|
|
--no-symbol-cmap
|
|
|
|
Drop the 3.0 symbol 'cmap'. [default]
|
|
|
|
|
|
|
|
Other font-specific options:
|
|
|
|
--recalc-bounds
|
|
|
|
Recalculate font bounding boxes.
|
|
|
|
--no-recalc-bounds
|
|
|
|
Keep original font bounding boxes. This is faster and still safe
|
|
|
|
for all practical purposes. [default]
|
|
|
|
--recalc-timestamp
|
|
|
|
Set font 'modified' timestamp to current time.
|
|
|
|
--no-recalc-timestamp
|
|
|
|
Do not modify font 'modified' timestamp. [default]
|
|
|
|
--canonical-order
|
|
|
|
Order tables as recommended in the OpenType standard. This is not
|
|
|
|
required by the standard, nor by any known implementation.
|
|
|
|
--no-canonical-order
|
|
|
|
Keep original order of font tables. This is faster. [default]
|
2016-01-27 16:43:55 +00:00
|
|
|
--prune-unicode-ranges
|
|
|
|
Update the 'OS/2 ulUnicodeRange*' bits after subsetting. The Unicode
|
|
|
|
ranges defined in the OpenType specification v1.7 are intersected with
|
|
|
|
the Unicode codepoints specified in the font's Unicode 'cmap' subtables:
|
|
|
|
when no overlap is found, the bit will be switched off. However, it will
|
|
|
|
*not* be switched on if an intersection is found. [default]
|
|
|
|
--no-prune-unicode-ranges
|
|
|
|
Don't change the 'OS/2 ulUnicodeRange*' bits.
|
2016-07-19 22:54:05 +01:00
|
|
|
--recalc-average-width
|
2016-07-20 09:33:18 +01:00
|
|
|
Update the 'OS/2 xAvgCharWidth' field after subsetting.
|
2016-07-19 22:54:05 +01:00
|
|
|
--no-recalc-average-width
|
2016-07-20 09:33:18 +01:00
|
|
|
Don't change the 'OS/2 xAvgCharWidth' field. [default]
|
2014-08-13 16:06:49 -04:00
|
|
|
|
|
|
|
Application options:
|
|
|
|
--verbose
|
|
|
|
Display verbose information of the subsetting process.
|
|
|
|
--timing
|
|
|
|
Display detailed timing information of the subsetting process.
|
|
|
|
--xml
|
|
|
|
Display the TTX XML representation of subsetted font.
|
|
|
|
|
|
|
|
Example:
|
|
|
|
Produce a subset containing the characters ' !"#$%' without performing
|
|
|
|
size-reducing optimizations:
|
|
|
|
|
2014-10-15 14:15:10 -07:00
|
|
|
$ pyftsubset font.ttf --unicodes="U+0020-0025" \\
|
2014-08-13 16:06:49 -04:00
|
|
|
--layout-features='*' --glyph-names --symbol-cmap --legacy-cmap \\
|
|
|
|
--notdef-glyph --notdef-outline --recommended-glyphs \\
|
|
|
|
--name-IDs='*' --name-legacy --name-languages='*'
|
|
|
|
"""
|
|
|
|
|
2013-07-21 18:16:55 -04:00
|
|
|
|
2017-01-11 11:58:17 +00:00
|
|
|
log = logging.getLogger("fontTools.subset")
|
2016-01-24 16:13:31 +00:00
|
|
|
|
|
|
|
def _log_glyphs(self, glyphs, font=None):
|
|
|
|
self.info("Glyph names: %s", sorted(glyphs))
|
|
|
|
if font:
|
|
|
|
reverseGlyphMap = font.getReverseGlyphMap()
|
|
|
|
self.info("Glyph IDs: %s", sorted(reverseGlyphMap[g] for g in glyphs))
|
|
|
|
|
|
|
|
# bind "glyphs" function to 'log' object
|
|
|
|
log.glyphs = MethodType(_log_glyphs, log)
|
|
|
|
|
|
|
|
# I use a different timing channel so I can configure it separately from the
|
|
|
|
# main module's logger
|
2017-01-11 11:58:17 +00:00
|
|
|
timer = Timer(logger=logging.getLogger("fontTools.subset.timer"))
|
2016-01-24 16:13:31 +00:00
|
|
|
|
|
|
|
|
2013-08-13 20:25:37 -04:00
|
|
|
def _add_method(*clazzes):
|
2015-05-07 10:40:29 +02:00
|
|
|
"""Returns a decorator function that adds a new method to one or
|
|
|
|
more classes."""
|
|
|
|
def wrapper(method):
|
2016-01-13 17:00:29 +00:00
|
|
|
done = []
|
2015-05-07 10:40:29 +02:00
|
|
|
for clazz in clazzes:
|
2016-01-13 17:00:29 +00:00
|
|
|
if clazz in done: continue # Support multiple names of a clazz
|
|
|
|
done.append(clazz)
|
2015-05-07 10:40:29 +02:00
|
|
|
assert clazz.__name__ != 'DefaultTable', \
|
|
|
|
'Oops, table class not found.'
|
|
|
|
assert not hasattr(clazz, method.__name__), \
|
|
|
|
"Oops, class '%s' has method '%s'." % (clazz.__name__,
|
|
|
|
method.__name__)
|
|
|
|
setattr(clazz, method.__name__, method)
|
|
|
|
return None
|
|
|
|
return wrapper
|
2013-07-21 18:16:55 -04:00
|
|
|
|
2013-08-13 20:25:37 -04:00
|
|
|
def _uniq_sort(l):
|
2015-05-07 10:40:29 +02:00
|
|
|
return sorted(set(l))
|
2013-07-23 10:23:42 -04:00
|
|
|
|
2013-08-16 16:16:22 -04:00
|
|
|
def _set_update(s, *others):
|
2015-05-07 10:40:29 +02:00
|
|
|
# Jython's set.update only takes one other argument.
|
|
|
|
# Emulate real set.update...
|
|
|
|
for other in others:
|
|
|
|
s.update(other)
|
2013-08-16 16:16:22 -04:00
|
|
|
|
2015-08-23 20:39:34 +01:00
|
|
|
def _dict_subset(d, glyphs):
|
2015-12-09 13:32:35 -08:00
|
|
|
return {g:d[g] for g in glyphs}
|
2015-08-23 20:39:34 +01:00
|
|
|
|
2013-07-23 10:23:42 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.Coverage)
|
2013-08-13 19:53:30 -04:00
|
|
|
def intersect(self, glyphs):
|
2015-05-07 10:40:29 +02:00
|
|
|
"""Returns ascending list of matching coverage values."""
|
|
|
|
return [i for i,g in enumerate(self.glyphs) if g in glyphs]
|
2013-07-23 14:52:18 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.Coverage)
|
2013-08-13 19:53:30 -04:00
|
|
|
def intersect_glyphs(self, glyphs):
|
2015-05-07 10:40:29 +02:00
|
|
|
"""Returns set of intersecting glyphs."""
|
|
|
|
return set(g for g in self.glyphs if g in glyphs)
|
2013-08-12 19:24:24 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.Coverage)
|
2013-08-13 19:53:30 -04:00
|
|
|
def subset(self, glyphs):
|
2015-05-07 10:40:29 +02:00
|
|
|
"""Returns ascending list of remaining coverage values."""
|
|
|
|
indices = self.intersect(glyphs)
|
|
|
|
self.glyphs = [g for g in self.glyphs if g in glyphs]
|
|
|
|
return indices
|
2013-07-21 23:15:32 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.Coverage)
|
2013-08-13 19:53:30 -04:00
|
|
|
def remap(self, coverage_map):
|
2015-05-07 10:40:29 +02:00
|
|
|
"""Remaps coverage."""
|
|
|
|
self.glyphs = [self.glyphs[i] for i in coverage_map]
|
2013-08-08 22:26:49 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.ClassDef)
|
2013-08-13 19:53:30 -04:00
|
|
|
def intersect(self, glyphs):
|
2015-05-07 10:40:29 +02:00
|
|
|
"""Returns ascending list of matching class values."""
|
|
|
|
return _uniq_sort(
|
|
|
|
([0] if any(g not in self.classDefs for g in glyphs) else []) +
|
|
|
|
[v for g,v in self.classDefs.items() if g in glyphs])
|
2013-07-23 22:17:39 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.ClassDef)
|
2013-08-13 19:53:30 -04:00
|
|
|
def intersect_class(self, glyphs, klass):
|
2015-05-07 10:40:29 +02:00
|
|
|
"""Returns set of glyphs matching class."""
|
|
|
|
if klass == 0:
|
|
|
|
return set(g for g in glyphs if g not in self.classDefs)
|
|
|
|
return set(g for g,v in self.classDefs.items()
|
|
|
|
if v == klass and g in glyphs)
|
2013-07-23 22:17:39 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.ClassDef)
|
2013-08-13 19:53:30 -04:00
|
|
|
def subset(self, glyphs, remap=False):
|
2015-05-07 10:40:29 +02:00
|
|
|
"""Returns ascending list of remaining classes."""
|
2015-06-11 17:05:15 -07:00
|
|
|
self.classDefs = {g:v for g,v in self.classDefs.items() if g in glyphs}
|
2015-05-07 10:40:29 +02:00
|
|
|
# Note: while class 0 has the special meaning of "not matched",
|
|
|
|
# if no glyph will ever /not match/, we can optimize class 0 out too.
|
|
|
|
indices = _uniq_sort(
|
|
|
|
([0] if any(g not in self.classDefs for g in glyphs) else []) +
|
|
|
|
list(self.classDefs.values()))
|
|
|
|
if remap:
|
|
|
|
self.remap(indices)
|
|
|
|
return indices
|
2013-07-22 12:15:36 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.ClassDef)
|
2013-08-13 19:53:30 -04:00
|
|
|
def remap(self, class_map):
|
2015-05-07 10:40:29 +02:00
|
|
|
"""Remaps classes."""
|
2015-06-11 17:05:15 -07:00
|
|
|
self.classDefs = {g:class_map.index(v) for g,v in self.classDefs.items()}
|
2013-07-21 23:15:32 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.SingleSubst)
|
2015-04-01 17:30:21 -07:00
|
|
|
def closure_glyphs(self, s, cur_glyphs):
|
2015-05-07 10:40:29 +02:00
|
|
|
s.glyphs.update(v for g,v in self.mapping.items() if g in cur_glyphs)
|
2013-07-23 14:52:18 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.SingleSubst)
|
2013-08-13 19:53:30 -04:00
|
|
|
def subset_glyphs(self, s):
|
2015-06-11 17:05:15 -07:00
|
|
|
self.mapping = {g:v for g,v in self.mapping.items()
|
|
|
|
if g in s.glyphs and v in s.glyphs}
|
2015-05-07 10:40:29 +02:00
|
|
|
return bool(self.mapping)
|
2013-07-21 18:16:55 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.MultipleSubst)
|
2015-04-01 17:30:21 -07:00
|
|
|
def closure_glyphs(self, s, cur_glyphs):
|
2015-09-10 14:44:07 +02:00
|
|
|
for glyph, subst in self.mapping.items():
|
|
|
|
if glyph in cur_glyphs:
|
|
|
|
_set_update(s.glyphs, subst)
|
2013-07-23 14:52:18 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.MultipleSubst)
|
2013-08-13 19:53:30 -04:00
|
|
|
def subset_glyphs(self, s):
|
2015-09-10 14:44:07 +02:00
|
|
|
self.mapping = {g:v for g,v in self.mapping.items()
|
|
|
|
if g in s.glyphs and all(sub in s.glyphs for sub in v)}
|
|
|
|
return bool(self.mapping)
|
2013-07-21 18:16:55 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.AlternateSubst)
|
2015-04-01 17:30:21 -07:00
|
|
|
def closure_glyphs(self, s, cur_glyphs):
|
2015-05-07 10:40:29 +02:00
|
|
|
_set_update(s.glyphs, *(vlist for g,vlist in self.alternates.items()
|
|
|
|
if g in cur_glyphs))
|
2013-07-23 14:52:18 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.AlternateSubst)
|
2013-08-13 19:53:30 -04:00
|
|
|
def subset_glyphs(self, s):
|
2015-06-11 17:05:15 -07:00
|
|
|
self.alternates = {g:vlist
|
|
|
|
for g,vlist in self.alternates.items()
|
|
|
|
if g in s.glyphs and
|
|
|
|
all(v in s.glyphs for v in vlist)}
|
2015-05-07 10:40:29 +02:00
|
|
|
return bool(self.alternates)
|
2013-07-21 18:16:55 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.LigatureSubst)
|
2015-04-01 17:30:21 -07:00
|
|
|
def closure_glyphs(self, s, cur_glyphs):
|
2015-05-07 10:40:29 +02:00
|
|
|
_set_update(s.glyphs, *([seq.LigGlyph for seq in seqs
|
|
|
|
if all(c in s.glyphs for c in seq.Component)]
|
|
|
|
for g,seqs in self.ligatures.items()
|
|
|
|
if g in cur_glyphs))
|
2013-07-23 14:52:18 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.LigatureSubst)
|
2013-08-13 19:53:30 -04:00
|
|
|
def subset_glyphs(self, s):
|
2015-06-11 17:05:15 -07:00
|
|
|
self.ligatures = {g:v for g,v in self.ligatures.items()
|
|
|
|
if g in s.glyphs}
|
|
|
|
self.ligatures = {g:[seq for seq in seqs
|
|
|
|
if seq.LigGlyph in s.glyphs and
|
|
|
|
all(c in s.glyphs for c in seq.Component)]
|
|
|
|
for g,seqs in self.ligatures.items()}
|
|
|
|
self.ligatures = {g:v for g,v in self.ligatures.items() if v}
|
2015-05-07 10:40:29 +02:00
|
|
|
return bool(self.ligatures)
|
2013-07-21 18:16:55 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.ReverseChainSingleSubst)
|
2015-04-01 17:30:21 -07:00
|
|
|
def closure_glyphs(self, s, cur_glyphs):
|
2015-05-07 10:40:29 +02:00
|
|
|
if self.Format == 1:
|
|
|
|
indices = self.Coverage.intersect(cur_glyphs)
|
|
|
|
if(not indices or
|
|
|
|
not all(c.intersect(s.glyphs)
|
|
|
|
for c in self.LookAheadCoverage + self.BacktrackCoverage)):
|
|
|
|
return
|
|
|
|
s.glyphs.update(self.Substitute[i] for i in indices)
|
|
|
|
else:
|
|
|
|
assert 0, "unknown format: %s" % self.Format
|
2013-07-23 14:52:18 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.ReverseChainSingleSubst)
|
2013-08-13 19:53:30 -04:00
|
|
|
def subset_glyphs(self, s):
|
2015-05-07 10:40:29 +02:00
|
|
|
if self.Format == 1:
|
|
|
|
indices = self.Coverage.subset(s.glyphs)
|
|
|
|
self.Substitute = [self.Substitute[i] for i in indices]
|
|
|
|
# Now drop rules generating glyphs we don't want
|
|
|
|
indices = [i for i,sub in enumerate(self.Substitute)
|
|
|
|
if sub in s.glyphs]
|
|
|
|
self.Substitute = [self.Substitute[i] for i in indices]
|
|
|
|
self.Coverage.remap(indices)
|
|
|
|
self.GlyphCount = len(self.Substitute)
|
|
|
|
return bool(self.GlyphCount and
|
|
|
|
all(c.subset(s.glyphs)
|
|
|
|
for c in self.LookAheadCoverage+self.BacktrackCoverage))
|
|
|
|
else:
|
|
|
|
assert 0, "unknown format: %s" % self.Format
|
2013-07-21 18:16:55 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.SinglePos)
|
2013-08-13 19:53:30 -04:00
|
|
|
def subset_glyphs(self, s):
|
2015-05-07 10:40:29 +02:00
|
|
|
if self.Format == 1:
|
|
|
|
return len(self.Coverage.subset(s.glyphs))
|
|
|
|
elif self.Format == 2:
|
|
|
|
indices = self.Coverage.subset(s.glyphs)
|
2016-05-11 14:56:16 +02:00
|
|
|
values = self.Value
|
|
|
|
count = len(values)
|
|
|
|
self.Value = [values[i] for i in indices if i < count]
|
2015-05-07 10:40:29 +02:00
|
|
|
self.ValueCount = len(self.Value)
|
|
|
|
return bool(self.ValueCount)
|
|
|
|
else:
|
|
|
|
assert 0, "unknown format: %s" % self.Format
|
2013-07-21 18:16:55 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.SinglePos)
|
2013-08-15 19:24:36 -04:00
|
|
|
def prune_post_subset(self, options):
|
2015-05-07 10:40:29 +02:00
|
|
|
if not options.hinting:
|
|
|
|
# Drop device tables
|
|
|
|
self.ValueFormat &= ~0x00F0
|
|
|
|
return True
|
2013-08-15 19:24:36 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.PairPos)
|
2013-08-13 19:53:30 -04:00
|
|
|
def subset_glyphs(self, s):
|
2015-05-07 10:40:29 +02:00
|
|
|
if self.Format == 1:
|
|
|
|
indices = self.Coverage.subset(s.glyphs)
|
2016-05-11 14:56:16 +02:00
|
|
|
pairs = self.PairSet
|
|
|
|
count = len(pairs)
|
|
|
|
self.PairSet = [pairs[i] for i in indices if i < count]
|
2015-05-07 10:40:29 +02:00
|
|
|
for p in self.PairSet:
|
2016-05-11 15:10:09 +02:00
|
|
|
p.PairValueRecord = [r for r in p.PairValueRecord if r.SecondGlyph in s.glyphs]
|
2015-05-07 10:40:29 +02:00
|
|
|
p.PairValueCount = len(p.PairValueRecord)
|
|
|
|
# Remove empty pairsets
|
|
|
|
indices = [i for i,p in enumerate(self.PairSet) if p.PairValueCount]
|
|
|
|
self.Coverage.remap(indices)
|
|
|
|
self.PairSet = [self.PairSet[i] for i in indices]
|
|
|
|
self.PairSetCount = len(self.PairSet)
|
|
|
|
return bool(self.PairSetCount)
|
|
|
|
elif self.Format == 2:
|
2016-05-11 15:06:28 +02:00
|
|
|
class1_map = [c for c in self.ClassDef1.subset(s.glyphs, remap=True) if c < self.Class1Count]
|
|
|
|
class2_map = [c for c in self.ClassDef2.subset(s.glyphs, remap=True) if c < self.Class2Count]
|
2015-05-07 10:40:29 +02:00
|
|
|
self.Class1Record = [self.Class1Record[i] for i in class1_map]
|
|
|
|
for c in self.Class1Record:
|
|
|
|
c.Class2Record = [c.Class2Record[i] for i in class2_map]
|
|
|
|
self.Class1Count = len(class1_map)
|
|
|
|
self.Class2Count = len(class2_map)
|
|
|
|
return bool(self.Class1Count and
|
|
|
|
self.Class2Count and
|
|
|
|
self.Coverage.subset(s.glyphs))
|
|
|
|
else:
|
|
|
|
assert 0, "unknown format: %s" % self.Format
|
2013-07-21 18:16:55 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.PairPos)
|
2013-08-15 19:24:36 -04:00
|
|
|
def prune_post_subset(self, options):
|
2015-05-07 10:40:29 +02:00
|
|
|
if not options.hinting:
|
|
|
|
# Drop device tables
|
|
|
|
self.ValueFormat1 &= ~0x00F0
|
|
|
|
self.ValueFormat2 &= ~0x00F0
|
|
|
|
return True
|
2013-08-15 19:24:36 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.CursivePos)
|
2013-08-13 19:53:30 -04:00
|
|
|
def subset_glyphs(self, s):
|
2015-05-07 10:40:29 +02:00
|
|
|
if self.Format == 1:
|
|
|
|
indices = self.Coverage.subset(s.glyphs)
|
2016-05-11 15:06:28 +02:00
|
|
|
records = self.EntryExitRecord
|
|
|
|
count = len(records)
|
|
|
|
self.EntryExitRecord = [records[i] for i in indices if i < count]
|
2015-05-07 10:40:29 +02:00
|
|
|
self.EntryExitCount = len(self.EntryExitRecord)
|
|
|
|
return bool(self.EntryExitCount)
|
|
|
|
else:
|
|
|
|
assert 0, "unknown format: %s" % self.Format
|
2013-07-21 18:16:55 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.Anchor)
|
2013-08-15 19:24:36 -04:00
|
|
|
def prune_hints(self):
|
2015-05-07 10:40:29 +02:00
|
|
|
# Drop device tables / contour anchor point
|
|
|
|
self.ensureDecompiled()
|
|
|
|
self.Format = 1
|
2013-08-15 19:24:36 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.CursivePos)
|
2013-08-15 19:24:36 -04:00
|
|
|
def prune_post_subset(self, options):
|
2015-05-07 10:40:29 +02:00
|
|
|
if not options.hinting:
|
|
|
|
for rec in self.EntryExitRecord:
|
|
|
|
if rec.EntryAnchor: rec.EntryAnchor.prune_hints()
|
|
|
|
if rec.ExitAnchor: rec.ExitAnchor.prune_hints()
|
|
|
|
return True
|
2013-08-15 19:24:36 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.MarkBasePos)
|
2013-08-13 19:53:30 -04:00
|
|
|
def subset_glyphs(self, s):
|
2015-05-07 10:40:29 +02:00
|
|
|
if self.Format == 1:
|
|
|
|
mark_indices = self.MarkCoverage.subset(s.glyphs)
|
2016-05-11 15:10:09 +02:00
|
|
|
self.MarkArray.MarkRecord = [self.MarkArray.MarkRecord[i] for i in mark_indices]
|
2015-05-07 10:40:29 +02:00
|
|
|
self.MarkArray.MarkCount = len(self.MarkArray.MarkRecord)
|
|
|
|
base_indices = self.BaseCoverage.subset(s.glyphs)
|
2016-05-11 15:10:09 +02:00
|
|
|
self.BaseArray.BaseRecord = [self.BaseArray.BaseRecord[i] for i in base_indices]
|
2015-05-07 10:40:29 +02:00
|
|
|
self.BaseArray.BaseCount = len(self.BaseArray.BaseRecord)
|
|
|
|
# Prune empty classes
|
|
|
|
class_indices = _uniq_sort(v.Class for v in self.MarkArray.MarkRecord)
|
|
|
|
self.ClassCount = len(class_indices)
|
|
|
|
for m in self.MarkArray.MarkRecord:
|
|
|
|
m.Class = class_indices.index(m.Class)
|
|
|
|
for b in self.BaseArray.BaseRecord:
|
|
|
|
b.BaseAnchor = [b.BaseAnchor[i] for i in class_indices]
|
|
|
|
return bool(self.ClassCount and
|
|
|
|
self.MarkArray.MarkCount and
|
|
|
|
self.BaseArray.BaseCount)
|
|
|
|
else:
|
|
|
|
assert 0, "unknown format: %s" % self.Format
|
2013-07-21 18:16:55 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.MarkBasePos)
|
2013-08-15 19:24:36 -04:00
|
|
|
def prune_post_subset(self, options):
|
2015-05-07 10:40:29 +02:00
|
|
|
if not options.hinting:
|
|
|
|
for m in self.MarkArray.MarkRecord:
|
|
|
|
if m.MarkAnchor:
|
|
|
|
m.MarkAnchor.prune_hints()
|
|
|
|
for b in self.BaseArray.BaseRecord:
|
|
|
|
for a in b.BaseAnchor:
|
|
|
|
if a:
|
|
|
|
a.prune_hints()
|
|
|
|
return True
|
2013-08-15 19:24:36 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.MarkLigPos)
|
2013-08-13 19:53:30 -04:00
|
|
|
def subset_glyphs(self, s):
|
2015-05-07 10:40:29 +02:00
|
|
|
if self.Format == 1:
|
|
|
|
mark_indices = self.MarkCoverage.subset(s.glyphs)
|
2016-05-11 15:10:09 +02:00
|
|
|
self.MarkArray.MarkRecord = [self.MarkArray.MarkRecord[i] for i in mark_indices]
|
2015-05-07 10:40:29 +02:00
|
|
|
self.MarkArray.MarkCount = len(self.MarkArray.MarkRecord)
|
|
|
|
ligature_indices = self.LigatureCoverage.subset(s.glyphs)
|
2016-05-11 15:10:09 +02:00
|
|
|
self.LigatureArray.LigatureAttach = [self.LigatureArray.LigatureAttach[i] for i in ligature_indices]
|
2015-05-07 10:40:29 +02:00
|
|
|
self.LigatureArray.LigatureCount = len(self.LigatureArray.LigatureAttach)
|
|
|
|
# Prune empty classes
|
|
|
|
class_indices = _uniq_sort(v.Class for v in self.MarkArray.MarkRecord)
|
|
|
|
self.ClassCount = len(class_indices)
|
|
|
|
for m in self.MarkArray.MarkRecord:
|
|
|
|
m.Class = class_indices.index(m.Class)
|
|
|
|
for l in self.LigatureArray.LigatureAttach:
|
|
|
|
for c in l.ComponentRecord:
|
|
|
|
c.LigatureAnchor = [c.LigatureAnchor[i] for i in class_indices]
|
|
|
|
return bool(self.ClassCount and
|
|
|
|
self.MarkArray.MarkCount and
|
|
|
|
self.LigatureArray.LigatureCount)
|
|
|
|
else:
|
|
|
|
assert 0, "unknown format: %s" % self.Format
|
2013-07-21 18:16:55 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.MarkLigPos)
|
2013-08-15 19:24:36 -04:00
|
|
|
def prune_post_subset(self, options):
|
2015-05-07 10:40:29 +02:00
|
|
|
if not options.hinting:
|
|
|
|
for m in self.MarkArray.MarkRecord:
|
|
|
|
if m.MarkAnchor:
|
|
|
|
m.MarkAnchor.prune_hints()
|
|
|
|
for l in self.LigatureArray.LigatureAttach:
|
|
|
|
for c in l.ComponentRecord:
|
|
|
|
for a in c.LigatureAnchor:
|
|
|
|
if a:
|
|
|
|
a.prune_hints()
|
|
|
|
return True
|
2013-08-15 19:24:36 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.MarkMarkPos)
|
2013-08-13 19:53:30 -04:00
|
|
|
def subset_glyphs(self, s):
|
2015-05-07 10:40:29 +02:00
|
|
|
if self.Format == 1:
|
|
|
|
mark1_indices = self.Mark1Coverage.subset(s.glyphs)
|
2016-05-11 15:10:09 +02:00
|
|
|
self.Mark1Array.MarkRecord = [self.Mark1Array.MarkRecord[i] for i in mark1_indices]
|
2015-05-07 10:40:29 +02:00
|
|
|
self.Mark1Array.MarkCount = len(self.Mark1Array.MarkRecord)
|
|
|
|
mark2_indices = self.Mark2Coverage.subset(s.glyphs)
|
2016-05-11 15:10:09 +02:00
|
|
|
self.Mark2Array.Mark2Record = [self.Mark2Array.Mark2Record[i] for i in mark2_indices]
|
2015-05-07 10:40:29 +02:00
|
|
|
self.Mark2Array.MarkCount = len(self.Mark2Array.Mark2Record)
|
|
|
|
# Prune empty classes
|
|
|
|
class_indices = _uniq_sort(v.Class for v in self.Mark1Array.MarkRecord)
|
|
|
|
self.ClassCount = len(class_indices)
|
|
|
|
for m in self.Mark1Array.MarkRecord:
|
|
|
|
m.Class = class_indices.index(m.Class)
|
|
|
|
for b in self.Mark2Array.Mark2Record:
|
|
|
|
b.Mark2Anchor = [b.Mark2Anchor[i] for i in class_indices]
|
|
|
|
return bool(self.ClassCount and
|
|
|
|
self.Mark1Array.MarkCount and
|
|
|
|
self.Mark2Array.MarkCount)
|
|
|
|
else:
|
|
|
|
assert 0, "unknown format: %s" % self.Format
|
2013-07-21 18:16:55 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.MarkMarkPos)
|
2013-08-15 19:24:36 -04:00
|
|
|
def prune_post_subset(self, options):
|
2015-05-07 10:40:29 +02:00
|
|
|
if not options.hinting:
|
|
|
|
# Drop device tables or contour anchor point
|
|
|
|
for m in self.Mark1Array.MarkRecord:
|
|
|
|
if m.MarkAnchor:
|
|
|
|
m.MarkAnchor.prune_hints()
|
|
|
|
for b in self.Mark2Array.Mark2Record:
|
|
|
|
for m in b.Mark2Anchor:
|
|
|
|
if m:
|
|
|
|
m.prune_hints()
|
|
|
|
return True
|
2013-08-15 19:24:36 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.SingleSubst,
|
|
|
|
otTables.MultipleSubst,
|
|
|
|
otTables.AlternateSubst,
|
|
|
|
otTables.LigatureSubst,
|
|
|
|
otTables.ReverseChainSingleSubst,
|
|
|
|
otTables.SinglePos,
|
|
|
|
otTables.PairPos,
|
|
|
|
otTables.CursivePos,
|
|
|
|
otTables.MarkBasePos,
|
|
|
|
otTables.MarkLigPos,
|
|
|
|
otTables.MarkMarkPos)
|
2013-08-13 19:53:30 -04:00
|
|
|
def subset_lookups(self, lookup_indices):
|
2015-05-07 10:40:29 +02:00
|
|
|
pass
|
2013-07-23 10:23:42 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.SingleSubst,
|
|
|
|
otTables.MultipleSubst,
|
|
|
|
otTables.AlternateSubst,
|
|
|
|
otTables.LigatureSubst,
|
|
|
|
otTables.ReverseChainSingleSubst,
|
|
|
|
otTables.SinglePos,
|
|
|
|
otTables.PairPos,
|
|
|
|
otTables.CursivePos,
|
|
|
|
otTables.MarkBasePos,
|
|
|
|
otTables.MarkLigPos,
|
|
|
|
otTables.MarkMarkPos)
|
2013-08-13 19:53:30 -04:00
|
|
|
def collect_lookups(self):
|
2015-05-07 10:40:29 +02:00
|
|
|
return []
|
2013-07-23 10:23:42 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.SingleSubst,
|
|
|
|
otTables.MultipleSubst,
|
|
|
|
otTables.AlternateSubst,
|
|
|
|
otTables.LigatureSubst,
|
|
|
|
otTables.ReverseChainSingleSubst,
|
|
|
|
otTables.ContextSubst,
|
|
|
|
otTables.ChainContextSubst,
|
|
|
|
otTables.ContextPos,
|
|
|
|
otTables.ChainContextPos)
|
2013-08-15 19:24:36 -04:00
|
|
|
def prune_post_subset(self, options):
|
2015-05-07 10:40:29 +02:00
|
|
|
return True
|
2013-08-15 19:24:36 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.SingleSubst,
|
|
|
|
otTables.AlternateSubst,
|
|
|
|
otTables.ReverseChainSingleSubst)
|
2013-08-13 19:53:30 -04:00
|
|
|
def may_have_non_1to1(self):
|
2015-05-07 10:40:29 +02:00
|
|
|
return False
|
2013-08-12 20:24:33 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.MultipleSubst,
|
|
|
|
otTables.LigatureSubst,
|
|
|
|
otTables.ContextSubst,
|
|
|
|
otTables.ChainContextSubst)
|
2013-08-13 19:53:30 -04:00
|
|
|
def may_have_non_1to1(self):
|
2015-05-07 10:40:29 +02:00
|
|
|
return True
|
2013-08-12 20:24:33 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.ContextSubst,
|
|
|
|
otTables.ChainContextSubst,
|
|
|
|
otTables.ContextPos,
|
|
|
|
otTables.ChainContextPos)
|
2014-10-07 17:35:42 -07:00
|
|
|
def __subset_classify_context(self):
|
2013-07-23 22:51:50 -04:00
|
|
|
|
2015-05-07 10:40:29 +02:00
|
|
|
class ContextHelper(object):
|
|
|
|
def __init__(self, klass, Format):
|
|
|
|
if klass.__name__.endswith('Subst'):
|
|
|
|
Typ = 'Sub'
|
|
|
|
Type = 'Subst'
|
|
|
|
else:
|
|
|
|
Typ = 'Pos'
|
|
|
|
Type = 'Pos'
|
|
|
|
if klass.__name__.startswith('Chain'):
|
|
|
|
Chain = 'Chain'
|
2015-12-08 20:40:42 +01:00
|
|
|
InputIdx = 1
|
|
|
|
DataLen = 3
|
2015-05-07 10:40:29 +02:00
|
|
|
else:
|
|
|
|
Chain = ''
|
2015-12-08 20:40:42 +01:00
|
|
|
InputIdx = 0
|
|
|
|
DataLen = 1
|
2015-05-07 10:40:29 +02:00
|
|
|
ChainTyp = Chain+Typ
|
|
|
|
|
|
|
|
self.Typ = Typ
|
|
|
|
self.Type = Type
|
|
|
|
self.Chain = Chain
|
|
|
|
self.ChainTyp = ChainTyp
|
2015-12-08 20:40:42 +01:00
|
|
|
self.InputIdx = InputIdx
|
|
|
|
self.DataLen = DataLen
|
2015-05-07 10:40:29 +02:00
|
|
|
|
|
|
|
self.LookupRecord = Type+'LookupRecord'
|
|
|
|
|
|
|
|
if Format == 1:
|
|
|
|
Coverage = lambda r: r.Coverage
|
|
|
|
ChainCoverage = lambda r: r.Coverage
|
|
|
|
ContextData = lambda r:(None,)
|
|
|
|
ChainContextData = lambda r:(None, None, None)
|
2015-12-08 20:40:42 +01:00
|
|
|
SetContextData = None
|
|
|
|
SetChainContextData = None
|
2015-05-07 10:40:29 +02:00
|
|
|
RuleData = lambda r:(r.Input,)
|
|
|
|
ChainRuleData = lambda r:(r.Backtrack, r.Input, r.LookAhead)
|
2015-12-08 20:40:42 +01:00
|
|
|
def SetRuleData(r, d):
|
|
|
|
(r.Input,) = d
|
|
|
|
(r.GlyphCount,) = (len(x)+1 for x in d)
|
|
|
|
def ChainSetRuleData(r, d):
|
|
|
|
(r.Backtrack, r.Input, r.LookAhead) = d
|
|
|
|
(r.BacktrackGlyphCount,r.InputGlyphCount,r.LookAheadGlyphCount,) = (len(d[0]),len(d[1])+1,len(d[2]))
|
2015-05-07 10:40:29 +02:00
|
|
|
elif Format == 2:
|
|
|
|
Coverage = lambda r: r.Coverage
|
|
|
|
ChainCoverage = lambda r: r.Coverage
|
|
|
|
ContextData = lambda r:(r.ClassDef,)
|
|
|
|
ChainContextData = lambda r:(r.BacktrackClassDef,
|
|
|
|
r.InputClassDef,
|
|
|
|
r.LookAheadClassDef)
|
2015-12-08 20:40:42 +01:00
|
|
|
def SetContextData(r, d):
|
|
|
|
(r.ClassDef,) = d
|
|
|
|
def SetChainContextData(r, d):
|
|
|
|
(r.BacktrackClassDef,
|
|
|
|
r.InputClassDef,
|
|
|
|
r.LookAheadClassDef) = d
|
2015-05-07 10:40:29 +02:00
|
|
|
RuleData = lambda r:(r.Class,)
|
|
|
|
ChainRuleData = lambda r:(r.Backtrack, r.Input, r.LookAhead)
|
2015-12-08 20:40:42 +01:00
|
|
|
def SetRuleData(r, d):
|
|
|
|
(r.Class,) = d
|
|
|
|
(r.GlyphCount,) = (len(x)+1 for x in d)
|
|
|
|
def ChainSetRuleData(r, d):
|
|
|
|
(r.Backtrack, r.Input, r.LookAhead) = d
|
|
|
|
(r.BacktrackGlyphCount,r.InputGlyphCount,r.LookAheadGlyphCount,) = (len(d[0]),len(d[1])+1,len(d[2]))
|
2015-05-07 10:40:29 +02:00
|
|
|
elif Format == 3:
|
|
|
|
Coverage = lambda r: r.Coverage[0]
|
|
|
|
ChainCoverage = lambda r: r.InputCoverage[0]
|
|
|
|
ContextData = None
|
|
|
|
ChainContextData = None
|
2015-12-08 20:40:42 +01:00
|
|
|
SetContextData = None
|
|
|
|
SetChainContextData = None
|
2015-05-07 10:40:29 +02:00
|
|
|
RuleData = lambda r: r.Coverage
|
|
|
|
ChainRuleData = lambda r:(r.BacktrackCoverage +
|
|
|
|
r.InputCoverage +
|
|
|
|
r.LookAheadCoverage)
|
2015-12-08 20:40:42 +01:00
|
|
|
def SetRuleData(r, d):
|
|
|
|
(r.Coverage,) = d
|
|
|
|
(r.GlyphCount,) = (len(x) for x in d)
|
|
|
|
def ChainSetRuleData(r, d):
|
|
|
|
(r.BacktrackCoverage, r.InputCoverage, r.LookAheadCoverage) = d
|
|
|
|
(r.BacktrackGlyphCount,r.InputGlyphCount,r.LookAheadGlyphCount,) = (len(x) for x in d)
|
2015-05-07 10:40:29 +02:00
|
|
|
else:
|
|
|
|
assert 0, "unknown format: %s" % Format
|
|
|
|
|
|
|
|
if Chain:
|
|
|
|
self.Coverage = ChainCoverage
|
|
|
|
self.ContextData = ChainContextData
|
2015-12-08 20:40:42 +01:00
|
|
|
self.SetContextData = SetChainContextData
|
2015-05-07 10:40:29 +02:00
|
|
|
self.RuleData = ChainRuleData
|
|
|
|
self.SetRuleData = ChainSetRuleData
|
|
|
|
else:
|
|
|
|
self.Coverage = Coverage
|
|
|
|
self.ContextData = ContextData
|
2015-12-08 20:40:42 +01:00
|
|
|
self.SetContextData = SetContextData
|
2015-05-07 10:40:29 +02:00
|
|
|
self.RuleData = RuleData
|
|
|
|
self.SetRuleData = SetRuleData
|
|
|
|
|
|
|
|
if Format == 1:
|
|
|
|
self.Rule = ChainTyp+'Rule'
|
|
|
|
self.RuleCount = ChainTyp+'RuleCount'
|
|
|
|
self.RuleSet = ChainTyp+'RuleSet'
|
|
|
|
self.RuleSetCount = ChainTyp+'RuleSetCount'
|
|
|
|
self.Intersect = lambda glyphs, c, r: [r] if r in glyphs else []
|
|
|
|
elif Format == 2:
|
|
|
|
self.Rule = ChainTyp+'ClassRule'
|
|
|
|
self.RuleCount = ChainTyp+'ClassRuleCount'
|
|
|
|
self.RuleSet = ChainTyp+'ClassSet'
|
|
|
|
self.RuleSetCount = ChainTyp+'ClassSetCount'
|
|
|
|
self.Intersect = lambda glyphs, c, r: (c.intersect_class(glyphs, r) if c
|
|
|
|
else (set(glyphs) if r == 0 else set()))
|
|
|
|
|
|
|
|
self.ClassDef = 'InputClassDef' if Chain else 'ClassDef'
|
|
|
|
self.ClassDefIndex = 1 if Chain else 0
|
|
|
|
self.Input = 'Input' if Chain else 'Class'
|
|
|
|
|
|
|
|
if self.Format not in [1, 2, 3]:
|
|
|
|
return None # Don't shoot the messenger; let it go
|
|
|
|
if not hasattr(self.__class__, "__ContextHelpers"):
|
|
|
|
self.__class__.__ContextHelpers = {}
|
|
|
|
if self.Format not in self.__class__.__ContextHelpers:
|
|
|
|
helper = ContextHelper(self.__class__, self.Format)
|
|
|
|
self.__class__.__ContextHelpers[self.Format] = helper
|
|
|
|
return self.__class__.__ContextHelpers[self.Format]
|
2013-08-13 19:50:38 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.ContextSubst,
|
2015-06-12 14:05:46 -07:00
|
|
|
otTables.ChainContextSubst)
|
2015-04-01 17:30:21 -07:00
|
|
|
def closure_glyphs(self, s, cur_glyphs):
|
2015-05-07 10:40:29 +02:00
|
|
|
c = self.__subset_classify_context()
|
|
|
|
|
|
|
|
indices = c.Coverage(self).intersect(cur_glyphs)
|
|
|
|
if not indices:
|
|
|
|
return []
|
|
|
|
cur_glyphs = c.Coverage(self).intersect_glyphs(cur_glyphs)
|
|
|
|
|
|
|
|
if self.Format == 1:
|
|
|
|
ContextData = c.ContextData(self)
|
|
|
|
rss = getattr(self, c.RuleSet)
|
|
|
|
rssCount = getattr(self, c.RuleSetCount)
|
|
|
|
for i in indices:
|
|
|
|
if i >= rssCount or not rss[i]: continue
|
|
|
|
for r in getattr(rss[i], c.Rule):
|
|
|
|
if not r: continue
|
|
|
|
if not all(all(c.Intersect(s.glyphs, cd, k) for k in klist)
|
|
|
|
for cd,klist in zip(ContextData, c.RuleData(r))):
|
|
|
|
continue
|
|
|
|
chaos = set()
|
|
|
|
for ll in getattr(r, c.LookupRecord):
|
|
|
|
if not ll: continue
|
|
|
|
seqi = ll.SequenceIndex
|
|
|
|
if seqi in chaos:
|
|
|
|
# TODO Can we improve this?
|
|
|
|
pos_glyphs = None
|
|
|
|
else:
|
|
|
|
if seqi == 0:
|
|
|
|
pos_glyphs = frozenset([c.Coverage(self).glyphs[i]])
|
|
|
|
else:
|
|
|
|
pos_glyphs = frozenset([r.Input[seqi - 1]])
|
|
|
|
lookup = s.table.LookupList.Lookup[ll.LookupListIndex]
|
|
|
|
chaos.add(seqi)
|
|
|
|
if lookup.may_have_non_1to1():
|
|
|
|
chaos.update(range(seqi, len(r.Input)+2))
|
2015-06-12 14:40:18 -07:00
|
|
|
lookup.closure_glyphs(s, cur_glyphs=pos_glyphs)
|
2015-05-07 10:40:29 +02:00
|
|
|
elif self.Format == 2:
|
|
|
|
ClassDef = getattr(self, c.ClassDef)
|
|
|
|
indices = ClassDef.intersect(cur_glyphs)
|
|
|
|
ContextData = c.ContextData(self)
|
|
|
|
rss = getattr(self, c.RuleSet)
|
|
|
|
rssCount = getattr(self, c.RuleSetCount)
|
|
|
|
for i in indices:
|
|
|
|
if i >= rssCount or not rss[i]: continue
|
|
|
|
for r in getattr(rss[i], c.Rule):
|
|
|
|
if not r: continue
|
|
|
|
if not all(all(c.Intersect(s.glyphs, cd, k) for k in klist)
|
|
|
|
for cd,klist in zip(ContextData, c.RuleData(r))):
|
|
|
|
continue
|
|
|
|
chaos = set()
|
|
|
|
for ll in getattr(r, c.LookupRecord):
|
|
|
|
if not ll: continue
|
|
|
|
seqi = ll.SequenceIndex
|
|
|
|
if seqi in chaos:
|
|
|
|
# TODO Can we improve this?
|
|
|
|
pos_glyphs = None
|
|
|
|
else:
|
|
|
|
if seqi == 0:
|
|
|
|
pos_glyphs = frozenset(ClassDef.intersect_class(cur_glyphs, i))
|
|
|
|
else:
|
|
|
|
pos_glyphs = frozenset(ClassDef.intersect_class(s.glyphs, getattr(r, c.Input)[seqi - 1]))
|
|
|
|
lookup = s.table.LookupList.Lookup[ll.LookupListIndex]
|
|
|
|
chaos.add(seqi)
|
|
|
|
if lookup.may_have_non_1to1():
|
|
|
|
chaos.update(range(seqi, len(getattr(r, c.Input))+2))
|
2015-06-12 14:40:18 -07:00
|
|
|
lookup.closure_glyphs(s, cur_glyphs=pos_glyphs)
|
2015-05-07 10:40:29 +02:00
|
|
|
elif self.Format == 3:
|
|
|
|
if not all(x.intersect(s.glyphs) for x in c.RuleData(self)):
|
|
|
|
return []
|
|
|
|
r = self
|
2015-04-01 18:39:25 -07:00
|
|
|
chaos = set()
|
2015-04-01 18:22:51 -07:00
|
|
|
for ll in getattr(r, c.LookupRecord):
|
2015-05-07 10:40:29 +02:00
|
|
|
if not ll: continue
|
|
|
|
seqi = ll.SequenceIndex
|
|
|
|
if seqi in chaos:
|
|
|
|
# TODO Can we improve this?
|
|
|
|
pos_glyphs = None
|
2013-08-13 19:50:38 -04:00
|
|
|
else:
|
2015-05-07 10:40:29 +02:00
|
|
|
if seqi == 0:
|
2015-06-12 14:46:08 -07:00
|
|
|
pos_glyphs = frozenset(cur_glyphs)
|
2015-05-07 10:40:29 +02:00
|
|
|
else:
|
|
|
|
pos_glyphs = frozenset(r.InputCoverage[seqi].intersect_glyphs(s.glyphs))
|
|
|
|
lookup = s.table.LookupList.Lookup[ll.LookupListIndex]
|
|
|
|
chaos.add(seqi)
|
|
|
|
if lookup.may_have_non_1to1():
|
|
|
|
chaos.update(range(seqi, len(r.InputCoverage)+1))
|
2015-06-12 14:40:18 -07:00
|
|
|
lookup.closure_glyphs(s, cur_glyphs=pos_glyphs)
|
2015-05-07 10:40:29 +02:00
|
|
|
else:
|
|
|
|
assert 0, "unknown format: %s" % self.Format
|
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.ContextSubst,
|
|
|
|
otTables.ContextPos,
|
|
|
|
otTables.ChainContextSubst,
|
|
|
|
otTables.ChainContextPos)
|
2013-08-13 19:53:30 -04:00
|
|
|
def subset_glyphs(self, s):
|
2015-05-07 10:40:29 +02:00
|
|
|
c = self.__subset_classify_context()
|
|
|
|
|
|
|
|
if self.Format == 1:
|
|
|
|
indices = self.Coverage.subset(s.glyphs)
|
|
|
|
rss = getattr(self, c.RuleSet)
|
|
|
|
rssCount = getattr(self, c.RuleSetCount)
|
|
|
|
rss = [rss[i] for i in indices if i < rssCount]
|
|
|
|
for rs in rss:
|
|
|
|
if not rs: continue
|
|
|
|
ss = getattr(rs, c.Rule)
|
|
|
|
ss = [r for r in ss
|
|
|
|
if r and all(all(g in s.glyphs for g in glist)
|
|
|
|
for glist in c.RuleData(r))]
|
|
|
|
setattr(rs, c.Rule, ss)
|
|
|
|
setattr(rs, c.RuleCount, len(ss))
|
|
|
|
# Prune empty rulesets
|
|
|
|
indices = [i for i,rs in enumerate(rss) if rs and getattr(rs, c.Rule)]
|
|
|
|
self.Coverage.remap(indices)
|
|
|
|
rss = [rss[i] for i in indices]
|
|
|
|
setattr(self, c.RuleSet, rss)
|
|
|
|
setattr(self, c.RuleSetCount, len(rss))
|
|
|
|
return bool(rss)
|
|
|
|
elif self.Format == 2:
|
|
|
|
if not self.Coverage.subset(s.glyphs):
|
|
|
|
return False
|
|
|
|
ContextData = c.ContextData(self)
|
|
|
|
klass_maps = [x.subset(s.glyphs, remap=True) if x else None for x in ContextData]
|
|
|
|
|
|
|
|
# Keep rulesets for class numbers that survived.
|
|
|
|
indices = klass_maps[c.ClassDefIndex]
|
|
|
|
rss = getattr(self, c.RuleSet)
|
|
|
|
rssCount = getattr(self, c.RuleSetCount)
|
|
|
|
rss = [rss[i] for i in indices if i < rssCount]
|
|
|
|
del rssCount
|
|
|
|
# Delete, but not renumber, unreachable rulesets.
|
|
|
|
indices = getattr(self, c.ClassDef).intersect(self.Coverage.glyphs)
|
|
|
|
rss = [rss if i in indices else None for i,rss in enumerate(rss)]
|
|
|
|
|
|
|
|
for rs in rss:
|
|
|
|
if not rs: continue
|
|
|
|
ss = getattr(rs, c.Rule)
|
|
|
|
ss = [r for r in ss
|
|
|
|
if r and all(all(k in klass_map for k in klist)
|
|
|
|
for klass_map,klist in zip(klass_maps, c.RuleData(r)))]
|
|
|
|
setattr(rs, c.Rule, ss)
|
|
|
|
setattr(rs, c.RuleCount, len(ss))
|
|
|
|
|
|
|
|
# Remap rule classes
|
|
|
|
for r in ss:
|
|
|
|
c.SetRuleData(r, [[klass_map.index(k) for k in klist]
|
|
|
|
for klass_map,klist in zip(klass_maps, c.RuleData(r))])
|
|
|
|
|
|
|
|
# Prune empty rulesets
|
|
|
|
rss = [rs if rs and getattr(rs, c.Rule) else None for rs in rss]
|
|
|
|
while rss and rss[-1] is None:
|
|
|
|
del rss[-1]
|
|
|
|
setattr(self, c.RuleSet, rss)
|
|
|
|
setattr(self, c.RuleSetCount, len(rss))
|
|
|
|
|
|
|
|
# TODO: We can do a second round of remapping class values based
|
|
|
|
# on classes that are actually used in at least one rule. Right
|
|
|
|
# now we subset classes to c.glyphs only. Or better, rewrite
|
|
|
|
# the above to do that.
|
|
|
|
|
|
|
|
return bool(rss)
|
|
|
|
elif self.Format == 3:
|
|
|
|
return all(x.subset(s.glyphs) for x in c.RuleData(self))
|
|
|
|
else:
|
|
|
|
assert 0, "unknown format: %s" % self.Format
|
2013-08-13 19:50:38 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.ContextSubst,
|
|
|
|
otTables.ChainContextSubst,
|
|
|
|
otTables.ContextPos,
|
|
|
|
otTables.ChainContextPos)
|
2013-08-13 19:53:30 -04:00
|
|
|
def subset_lookups(self, lookup_indices):
|
2015-05-07 10:40:29 +02:00
|
|
|
c = self.__subset_classify_context()
|
|
|
|
|
|
|
|
if self.Format in [1, 2]:
|
|
|
|
for rs in getattr(self, c.RuleSet):
|
|
|
|
if not rs: continue
|
|
|
|
for r in getattr(rs, c.Rule):
|
|
|
|
if not r: continue
|
|
|
|
setattr(r, c.LookupRecord,
|
|
|
|
[ll for ll in getattr(r, c.LookupRecord)
|
|
|
|
if ll and ll.LookupListIndex in lookup_indices])
|
|
|
|
for ll in getattr(r, c.LookupRecord):
|
|
|
|
if not ll: continue
|
|
|
|
ll.LookupListIndex = lookup_indices.index(ll.LookupListIndex)
|
|
|
|
elif self.Format == 3:
|
|
|
|
setattr(self, c.LookupRecord,
|
|
|
|
[ll for ll in getattr(self, c.LookupRecord)
|
|
|
|
if ll and ll.LookupListIndex in lookup_indices])
|
|
|
|
for ll in getattr(self, c.LookupRecord):
|
|
|
|
if not ll: continue
|
|
|
|
ll.LookupListIndex = lookup_indices.index(ll.LookupListIndex)
|
|
|
|
else:
|
|
|
|
assert 0, "unknown format: %s" % self.Format
|
2013-08-13 19:50:38 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.ContextSubst,
|
|
|
|
otTables.ChainContextSubst,
|
|
|
|
otTables.ContextPos,
|
|
|
|
otTables.ChainContextPos)
|
2013-08-13 19:53:30 -04:00
|
|
|
def collect_lookups(self):
|
2015-05-07 10:40:29 +02:00
|
|
|
c = self.__subset_classify_context()
|
|
|
|
|
|
|
|
if self.Format in [1, 2]:
|
|
|
|
return [ll.LookupListIndex
|
|
|
|
for rs in getattr(self, c.RuleSet) if rs
|
|
|
|
for r in getattr(rs, c.Rule) if r
|
|
|
|
for ll in getattr(r, c.LookupRecord) if ll]
|
|
|
|
elif self.Format == 3:
|
|
|
|
return [ll.LookupListIndex
|
|
|
|
for ll in getattr(self, c.LookupRecord) if ll]
|
|
|
|
else:
|
|
|
|
assert 0, "unknown format: %s" % self.Format
|
2013-07-23 10:23:42 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.ExtensionSubst)
|
2015-04-01 17:30:21 -07:00
|
|
|
def closure_glyphs(self, s, cur_glyphs):
|
2015-05-07 10:40:29 +02:00
|
|
|
if self.Format == 1:
|
|
|
|
self.ExtSubTable.closure_glyphs(s, cur_glyphs)
|
|
|
|
else:
|
|
|
|
assert 0, "unknown format: %s" % self.Format
|
2013-07-23 14:52:18 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.ExtensionSubst)
|
2013-08-13 19:53:30 -04:00
|
|
|
def may_have_non_1to1(self):
|
2015-05-07 10:40:29 +02:00
|
|
|
if self.Format == 1:
|
|
|
|
return self.ExtSubTable.may_have_non_1to1()
|
|
|
|
else:
|
|
|
|
assert 0, "unknown format: %s" % self.Format
|
2013-08-12 20:24:33 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.ExtensionSubst,
|
|
|
|
otTables.ExtensionPos)
|
2013-08-13 19:53:30 -04:00
|
|
|
def subset_glyphs(self, s):
|
2015-05-07 10:40:29 +02:00
|
|
|
if self.Format == 1:
|
|
|
|
return self.ExtSubTable.subset_glyphs(s)
|
|
|
|
else:
|
|
|
|
assert 0, "unknown format: %s" % self.Format
|
2013-07-21 18:16:55 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.ExtensionSubst,
|
|
|
|
otTables.ExtensionPos)
|
2013-08-15 19:24:36 -04:00
|
|
|
def prune_post_subset(self, options):
|
2015-05-07 10:40:29 +02:00
|
|
|
if self.Format == 1:
|
|
|
|
return self.ExtSubTable.prune_post_subset(options)
|
|
|
|
else:
|
|
|
|
assert 0, "unknown format: %s" % self.Format
|
2013-08-15 19:24:36 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.ExtensionSubst,
|
|
|
|
otTables.ExtensionPos)
|
2013-08-13 19:53:30 -04:00
|
|
|
def subset_lookups(self, lookup_indices):
|
2015-05-07 10:40:29 +02:00
|
|
|
if self.Format == 1:
|
|
|
|
return self.ExtSubTable.subset_lookups(lookup_indices)
|
|
|
|
else:
|
|
|
|
assert 0, "unknown format: %s" % self.Format
|
2013-07-23 10:23:42 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.ExtensionSubst,
|
|
|
|
otTables.ExtensionPos)
|
2013-08-13 19:53:30 -04:00
|
|
|
def collect_lookups(self):
|
2015-05-07 10:40:29 +02:00
|
|
|
if self.Format == 1:
|
|
|
|
return self.ExtSubTable.collect_lookups()
|
|
|
|
else:
|
|
|
|
assert 0, "unknown format: %s" % self.Format
|
2013-07-23 10:23:42 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.Lookup)
|
2013-08-13 19:53:30 -04:00
|
|
|
def closure_glyphs(self, s, cur_glyphs=None):
|
2015-05-07 10:40:29 +02:00
|
|
|
if cur_glyphs is None:
|
2015-06-12 14:40:18 -07:00
|
|
|
cur_glyphs = frozenset(s.glyphs)
|
|
|
|
|
|
|
|
# Memoize
|
|
|
|
if (id(self), cur_glyphs) in s._doneLookups:
|
|
|
|
return
|
|
|
|
s._doneLookups.add((id(self), cur_glyphs))
|
|
|
|
|
2015-05-07 10:40:29 +02:00
|
|
|
if self in s._activeLookups:
|
|
|
|
raise Exception("Circular loop in lookup recursion")
|
|
|
|
s._activeLookups.append(self)
|
|
|
|
for st in self.SubTable:
|
|
|
|
if not st: continue
|
|
|
|
st.closure_glyphs(s, cur_glyphs)
|
|
|
|
assert(s._activeLookups[-1] == self)
|
|
|
|
del s._activeLookups[-1]
|
2013-07-23 14:52:18 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.Lookup)
|
2013-08-13 19:53:30 -04:00
|
|
|
def subset_glyphs(self, s):
|
2015-05-07 10:40:29 +02:00
|
|
|
self.SubTable = [st for st in self.SubTable if st and st.subset_glyphs(s)]
|
|
|
|
self.SubTableCount = len(self.SubTable)
|
|
|
|
return bool(self.SubTableCount)
|
2013-07-21 23:15:32 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.Lookup)
|
2013-08-15 19:24:36 -04:00
|
|
|
def prune_post_subset(self, options):
|
2015-05-07 10:40:29 +02:00
|
|
|
ret = False
|
|
|
|
for st in self.SubTable:
|
|
|
|
if not st: continue
|
|
|
|
if st.prune_post_subset(options): ret = True
|
|
|
|
return ret
|
2013-08-15 19:24:36 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.Lookup)
|
2013-08-13 19:53:30 -04:00
|
|
|
def subset_lookups(self, lookup_indices):
|
2015-05-07 10:40:29 +02:00
|
|
|
for s in self.SubTable:
|
|
|
|
s.subset_lookups(lookup_indices)
|
2013-07-23 10:23:42 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.Lookup)
|
2013-08-13 19:53:30 -04:00
|
|
|
def collect_lookups(self):
|
2017-08-08 17:20:37 -07:00
|
|
|
return sum((st.collect_lookups() for st in self.SubTable if st), [])
|
2013-07-23 10:23:42 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.Lookup)
|
2013-08-13 19:53:30 -04:00
|
|
|
def may_have_non_1to1(self):
|
2015-05-07 10:40:29 +02:00
|
|
|
return any(st.may_have_non_1to1() for st in self.SubTable if st)
|
2013-08-12 20:24:33 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.LookupList)
|
2013-08-13 19:53:30 -04:00
|
|
|
def subset_glyphs(self, s):
|
2015-05-07 10:40:29 +02:00
|
|
|
"""Returns the indices of nonempty lookups."""
|
|
|
|
return [i for i,l in enumerate(self.Lookup) if l and l.subset_glyphs(s)]
|
2013-07-22 11:46:50 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.LookupList)
|
2013-08-15 19:24:36 -04:00
|
|
|
def prune_post_subset(self, options):
|
2015-05-07 10:40:29 +02:00
|
|
|
ret = False
|
|
|
|
for l in self.Lookup:
|
|
|
|
if not l: continue
|
|
|
|
if l.prune_post_subset(options): ret = True
|
|
|
|
return ret
|
2013-08-15 19:24:36 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.LookupList)
|
2013-08-13 19:53:30 -04:00
|
|
|
def subset_lookups(self, lookup_indices):
|
2015-05-07 10:40:29 +02:00
|
|
|
self.ensureDecompiled()
|
|
|
|
self.Lookup = [self.Lookup[i] for i in lookup_indices
|
|
|
|
if i < self.LookupCount]
|
|
|
|
self.LookupCount = len(self.Lookup)
|
|
|
|
for l in self.Lookup:
|
|
|
|
l.subset_lookups(lookup_indices)
|
2013-07-23 10:23:42 -04:00
|
|
|
|
2015-08-24 12:40:51 +01:00
|
|
|
@_add_method(otTables.LookupList)
|
|
|
|
def neuter_lookups(self, lookup_indices):
|
|
|
|
"""Sets lookups not in lookup_indices to None."""
|
|
|
|
self.ensureDecompiled()
|
|
|
|
self.Lookup = [l if i in lookup_indices else None for i,l in enumerate(self.Lookup)]
|
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.LookupList)
|
2013-08-13 19:53:30 -04:00
|
|
|
def closure_lookups(self, lookup_indices):
|
2017-08-08 17:20:37 -07:00
|
|
|
"""Returns sorted index of all lookups reachable from lookup_indices."""
|
2015-05-07 10:40:29 +02:00
|
|
|
lookup_indices = _uniq_sort(lookup_indices)
|
|
|
|
recurse = lookup_indices
|
|
|
|
while True:
|
|
|
|
recurse_lookups = sum((self.Lookup[i].collect_lookups()
|
|
|
|
for i in recurse if i < self.LookupCount), [])
|
|
|
|
recurse_lookups = [l for l in recurse_lookups
|
|
|
|
if l not in lookup_indices and l < self.LookupCount]
|
|
|
|
if not recurse_lookups:
|
|
|
|
return _uniq_sort(lookup_indices)
|
|
|
|
recurse_lookups = _uniq_sort(recurse_lookups)
|
|
|
|
lookup_indices.extend(recurse_lookups)
|
|
|
|
recurse = recurse_lookups
|
2013-07-22 11:46:50 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.Feature)
|
2013-08-13 19:53:30 -04:00
|
|
|
def subset_lookups(self, lookup_indices):
|
2017-08-08 17:20:37 -07:00
|
|
|
""""Returns True if feature is non-empty afterwards."""
|
2015-05-07 10:40:29 +02:00
|
|
|
self.LookupListIndex = [l for l in self.LookupListIndex
|
|
|
|
if l in lookup_indices]
|
|
|
|
# Now map them.
|
|
|
|
self.LookupListIndex = [lookup_indices.index(l)
|
|
|
|
for l in self.LookupListIndex]
|
|
|
|
self.LookupCount = len(self.LookupListIndex)
|
|
|
|
return self.LookupCount or self.FeatureParams
|
2013-07-21 18:40:59 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.FeatureList)
|
2013-08-13 19:53:30 -04:00
|
|
|
def subset_lookups(self, lookup_indices):
|
2015-05-07 10:40:29 +02:00
|
|
|
"""Returns the indices of nonempty features."""
|
|
|
|
# Note: Never ever drop feature 'pref', even if it's empty.
|
|
|
|
# HarfBuzz chooses shaper for Khmer based on presence of this
|
|
|
|
# feature. See thread at:
|
|
|
|
# http://lists.freedesktop.org/archives/harfbuzz/2012-November/002660.html
|
2017-08-08 17:20:37 -07:00
|
|
|
return [i for i,f in enumerate(self.FeatureRecord)
|
|
|
|
if (f.Feature.subset_lookups(lookup_indices) or
|
|
|
|
f.FeatureTag == 'pref')]
|
2013-07-22 11:46:50 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.FeatureList)
|
2013-08-13 19:53:30 -04:00
|
|
|
def collect_lookups(self, feature_indices):
|
2017-08-08 17:20:37 -07:00
|
|
|
return sum((self.FeatureRecord[i].Feature.LookupListIndex
|
|
|
|
for i in feature_indices
|
|
|
|
if i < self.FeatureCount), [])
|
2013-07-23 10:23:42 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.FeatureList)
|
2013-08-13 19:53:30 -04:00
|
|
|
def subset_features(self, feature_indices):
|
2015-05-07 10:40:29 +02:00
|
|
|
self.ensureDecompiled()
|
|
|
|
self.FeatureRecord = [self.FeatureRecord[i] for i in feature_indices]
|
|
|
|
self.FeatureCount = len(self.FeatureRecord)
|
|
|
|
return bool(self.FeatureCount)
|
2013-07-23 12:10:46 -04:00
|
|
|
|
2017-08-08 17:20:37 -07:00
|
|
|
@_add_method(otTables.FeatureTableSubstitution)
|
|
|
|
def subset_lookups(self, lookup_indices):
|
|
|
|
"""Returns the indices of nonempty features."""
|
|
|
|
return [r.FeatureIndex for r in self.SubstitutionRecord
|
|
|
|
if r.Feature.subset_lookups(lookup_indices)]
|
|
|
|
|
|
|
|
@_add_method(otTables.FeatureVariations)
|
|
|
|
def subset_lookups(self, lookup_indices):
|
|
|
|
"""Returns the indices of nonempty features."""
|
|
|
|
return sum((f.FeatureTableSubstitution.subset_lookups(lookup_indices)
|
|
|
|
for f in self.FeatureVariationRecord), [])
|
|
|
|
|
|
|
|
@_add_method(otTables.FeatureVariations)
|
|
|
|
def collect_lookups(self, feature_indices):
|
|
|
|
return sum((r.Feature.LookupListIndex
|
|
|
|
for vr in self.FeatureVariationRecord
|
|
|
|
for r in vr.FeatureTableSubstitution.SubstitutionRecord
|
|
|
|
if r.FeatureIndex in feature_indices), [])
|
|
|
|
|
|
|
|
@_add_method(otTables.FeatureTableSubstitution)
|
|
|
|
def subset_features(self, feature_indices):
|
|
|
|
self.ensureDecompiled()
|
|
|
|
self.SubstitutionRecord = [r for r in self.SubstitutionRecord
|
|
|
|
if r.FeatureIndex in feature_indices]
|
|
|
|
self.SubstitutionCount = len(self.SubstitutionRecord)
|
|
|
|
return bool(self.SubstitutionCount)
|
|
|
|
|
|
|
|
@_add_method(otTables.FeatureVariations)
|
|
|
|
def subset_features(self, feature_indices):
|
|
|
|
self.ensureDecompiled()
|
|
|
|
self.FeaturVariationRecord = [r for r in self.FeatureVariationRecord
|
|
|
|
if r.FeatureTableSubstitution.subset_features(feature_indices)]
|
|
|
|
self.FeatureVariationCount = len(self.FeatureVariationRecord)
|
|
|
|
return bool(self.FeatureVariationCount)
|
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.DefaultLangSys,
|
|
|
|
otTables.LangSys)
|
2013-08-13 19:53:30 -04:00
|
|
|
def subset_features(self, feature_indices):
|
2015-05-07 10:40:29 +02:00
|
|
|
if self.ReqFeatureIndex in feature_indices:
|
|
|
|
self.ReqFeatureIndex = feature_indices.index(self.ReqFeatureIndex)
|
|
|
|
else:
|
|
|
|
self.ReqFeatureIndex = 65535
|
|
|
|
self.FeatureIndex = [f for f in self.FeatureIndex if f in feature_indices]
|
|
|
|
# Now map them.
|
|
|
|
self.FeatureIndex = [feature_indices.index(f) for f in self.FeatureIndex
|
|
|
|
if f in feature_indices]
|
|
|
|
self.FeatureCount = len(self.FeatureIndex)
|
|
|
|
return bool(self.FeatureCount or self.ReqFeatureIndex != 65535)
|
2013-08-13 19:50:38 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.DefaultLangSys,
|
|
|
|
otTables.LangSys)
|
2013-08-13 19:53:30 -04:00
|
|
|
def collect_features(self):
|
2015-05-07 10:40:29 +02:00
|
|
|
feature_indices = self.FeatureIndex[:]
|
|
|
|
if self.ReqFeatureIndex != 65535:
|
|
|
|
feature_indices.append(self.ReqFeatureIndex)
|
|
|
|
return _uniq_sort(feature_indices)
|
2013-07-22 11:46:50 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.Script)
|
2017-08-11 14:51:15 -07:00
|
|
|
def subset_features(self, feature_indices, keepEmptyDefaultLangSys=False):
|
2015-05-07 10:40:29 +02:00
|
|
|
if(self.DefaultLangSys and
|
2017-08-11 14:51:15 -07:00
|
|
|
not self.DefaultLangSys.subset_features(feature_indices) and
|
|
|
|
not keepEmptyDefaultLangSys):
|
2015-05-07 10:40:29 +02:00
|
|
|
self.DefaultLangSys = None
|
|
|
|
self.LangSysRecord = [l for l in self.LangSysRecord
|
|
|
|
if l.LangSys.subset_features(feature_indices)]
|
|
|
|
self.LangSysCount = len(self.LangSysRecord)
|
|
|
|
return bool(self.LangSysCount or self.DefaultLangSys)
|
2013-07-23 10:23:42 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.Script)
|
2013-08-13 19:53:30 -04:00
|
|
|
def collect_features(self):
|
2015-05-07 10:40:29 +02:00
|
|
|
feature_indices = [l.LangSys.collect_features() for l in self.LangSysRecord]
|
|
|
|
if self.DefaultLangSys:
|
|
|
|
feature_indices.append(self.DefaultLangSys.collect_features())
|
|
|
|
return _uniq_sort(sum(feature_indices, []))
|
2013-07-22 11:46:50 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.ScriptList)
|
2016-02-10 16:22:24 +07:00
|
|
|
def subset_features(self, feature_indices, retain_empty):
|
2017-08-11 14:51:15 -07:00
|
|
|
# https://bugzilla.mozilla.org/show_bug.cgi?id=1331737#c32
|
2015-05-07 10:40:29 +02:00
|
|
|
self.ScriptRecord = [s for s in self.ScriptRecord
|
2017-08-11 14:51:15 -07:00
|
|
|
if s.Script.subset_features(feature_indices, s.ScriptTag=='DFLT') or
|
|
|
|
retain_empty]
|
2015-05-07 10:40:29 +02:00
|
|
|
self.ScriptCount = len(self.ScriptRecord)
|
|
|
|
return bool(self.ScriptCount)
|
2013-07-22 11:46:50 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(otTables.ScriptList)
|
2013-08-13 19:53:30 -04:00
|
|
|
def collect_features(self):
|
2015-05-07 10:40:29 +02:00
|
|
|
return _uniq_sort(sum((s.Script.collect_features()
|
|
|
|
for s in self.ScriptRecord), []))
|
2013-07-23 10:23:42 -04:00
|
|
|
|
2015-10-23 13:08:04 -07:00
|
|
|
# CBLC will inherit it
|
|
|
|
@_add_method(ttLib.getTableClass('EBLC'))
|
|
|
|
def subset_glyphs(self, s):
|
|
|
|
for strike in self.strikes:
|
|
|
|
for indexSubTable in strike.indexSubTables:
|
2015-11-10 20:15:15 +00:00
|
|
|
indexSubTable.names = [n for n in indexSubTable.names if n in s.glyphs]
|
2015-10-23 13:08:04 -07:00
|
|
|
strike.indexSubTables = [i for i in strike.indexSubTables if i.names]
|
|
|
|
self.strikes = [s for s in self.strikes if s.indexSubTables]
|
|
|
|
|
|
|
|
return True
|
|
|
|
|
|
|
|
# CBDC will inherit it
|
|
|
|
@_add_method(ttLib.getTableClass('EBDT'))
|
|
|
|
def subset_glyphs(self, s):
|
2015-11-10 20:15:15 +00:00
|
|
|
self.strikeData = [{g: strike[g] for g in s.glyphs if g in strike}
|
|
|
|
for strike in self.strikeData]
|
2015-10-23 13:08:04 -07:00
|
|
|
return True
|
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(ttLib.getTableClass('GSUB'))
|
2013-08-13 19:53:30 -04:00
|
|
|
def closure_glyphs(self, s):
|
2015-05-07 10:40:29 +02:00
|
|
|
s.table = self.table
|
|
|
|
if self.table.ScriptList:
|
|
|
|
feature_indices = self.table.ScriptList.collect_features()
|
|
|
|
else:
|
|
|
|
feature_indices = []
|
|
|
|
if self.table.FeatureList:
|
|
|
|
lookup_indices = self.table.FeatureList.collect_lookups(feature_indices)
|
|
|
|
else:
|
|
|
|
lookup_indices = []
|
2017-08-08 17:20:37 -07:00
|
|
|
if getattr(self.table, 'FeatureVariations', None):
|
|
|
|
lookup_indices += self.table.FeatureVariations.collect_lookups(feature_indices)
|
|
|
|
lookup_indices = _uniq_sort(lookup_indices)
|
2015-05-07 10:40:29 +02:00
|
|
|
if self.table.LookupList:
|
|
|
|
while True:
|
2015-06-12 15:11:36 -07:00
|
|
|
orig_glyphs = frozenset(s.glyphs)
|
2015-06-12 14:11:03 -07:00
|
|
|
s._activeLookups = []
|
|
|
|
s._doneLookups = set()
|
2015-05-07 10:40:29 +02:00
|
|
|
for i in lookup_indices:
|
|
|
|
if i >= self.table.LookupList.LookupCount: continue
|
|
|
|
if not self.table.LookupList.Lookup[i]: continue
|
|
|
|
self.table.LookupList.Lookup[i].closure_glyphs(s)
|
2015-06-12 14:11:03 -07:00
|
|
|
del s._activeLookups, s._doneLookups
|
2015-05-07 10:40:29 +02:00
|
|
|
if orig_glyphs == s.glyphs:
|
|
|
|
break
|
|
|
|
del s.table
|
2013-08-13 19:50:38 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(ttLib.getTableClass('GSUB'),
|
|
|
|
ttLib.getTableClass('GPOS'))
|
2013-08-13 19:53:30 -04:00
|
|
|
def subset_glyphs(self, s):
|
2015-05-07 10:40:29 +02:00
|
|
|
s.glyphs = s.glyphs_gsubed
|
|
|
|
if self.table.LookupList:
|
|
|
|
lookup_indices = self.table.LookupList.subset_glyphs(s)
|
|
|
|
else:
|
|
|
|
lookup_indices = []
|
|
|
|
self.subset_lookups(lookup_indices)
|
|
|
|
return True
|
2013-08-13 19:50:38 -04:00
|
|
|
|
2016-02-10 16:22:24 +07:00
|
|
|
@_add_method(ttLib.getTableClass('GSUB'),
|
|
|
|
ttLib.getTableClass('GPOS'))
|
|
|
|
def retain_empty_scripts(self):
|
|
|
|
# https://github.com/behdad/fonttools/issues/518
|
|
|
|
# https://bugzilla.mozilla.org/show_bug.cgi?id=1080739#c15
|
|
|
|
return self.__class__ == ttLib.getTableClass('GSUB')
|
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(ttLib.getTableClass('GSUB'),
|
|
|
|
ttLib.getTableClass('GPOS'))
|
2013-08-13 19:53:30 -04:00
|
|
|
def subset_lookups(self, lookup_indices):
|
2015-05-07 10:40:29 +02:00
|
|
|
"""Retains specified lookups, then removes empty features, language
|
|
|
|
systems, and scripts."""
|
|
|
|
if self.table.LookupList:
|
|
|
|
self.table.LookupList.subset_lookups(lookup_indices)
|
|
|
|
if self.table.FeatureList:
|
|
|
|
feature_indices = self.table.FeatureList.subset_lookups(lookup_indices)
|
|
|
|
else:
|
|
|
|
feature_indices = []
|
2017-08-08 17:20:37 -07:00
|
|
|
if getattr(self.table, 'FeatureVariations', None):
|
|
|
|
feature_indices += self.table.FeatureVariations.subset_lookups(lookup_indices)
|
|
|
|
feature_indices = _uniq_sort(feature_indices)
|
|
|
|
if self.table.FeatureList:
|
|
|
|
self.table.FeatureList.subset_features(feature_indices)
|
|
|
|
if getattr(self.table, 'FeatureVariations', None):
|
|
|
|
self.table.FeatureVariations.subset_features(feature_indices)
|
2015-05-07 10:40:29 +02:00
|
|
|
if self.table.ScriptList:
|
2016-02-10 16:22:24 +07:00
|
|
|
self.table.ScriptList.subset_features(feature_indices, self.retain_empty_scripts())
|
2013-08-13 19:50:38 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(ttLib.getTableClass('GSUB'),
|
|
|
|
ttLib.getTableClass('GPOS'))
|
2015-08-24 12:40:51 +01:00
|
|
|
def neuter_lookups(self, lookup_indices):
|
|
|
|
"""Sets lookups not in lookup_indices to None."""
|
|
|
|
if self.table.LookupList:
|
|
|
|
self.table.LookupList.neuter_lookups(lookup_indices)
|
|
|
|
|
|
|
|
@_add_method(ttLib.getTableClass('GSUB'),
|
|
|
|
ttLib.getTableClass('GPOS'))
|
|
|
|
def prune_lookups(self, remap=True):
|
|
|
|
"""Remove (default) or neuter unreferenced lookups"""
|
2015-05-07 10:40:29 +02:00
|
|
|
if self.table.ScriptList:
|
|
|
|
feature_indices = self.table.ScriptList.collect_features()
|
|
|
|
else:
|
|
|
|
feature_indices = []
|
|
|
|
if self.table.FeatureList:
|
|
|
|
lookup_indices = self.table.FeatureList.collect_lookups(feature_indices)
|
|
|
|
else:
|
|
|
|
lookup_indices = []
|
2017-08-08 17:20:37 -07:00
|
|
|
if getattr(self.table, 'FeatureVariations', None):
|
|
|
|
lookup_indices += self.table.FeatureVariations.collect_lookups(feature_indices)
|
|
|
|
lookup_indices = _uniq_sort(lookup_indices)
|
2015-05-07 10:40:29 +02:00
|
|
|
if self.table.LookupList:
|
|
|
|
lookup_indices = self.table.LookupList.closure_lookups(lookup_indices)
|
|
|
|
else:
|
|
|
|
lookup_indices = []
|
2015-08-24 12:40:51 +01:00
|
|
|
if remap:
|
|
|
|
self.subset_lookups(lookup_indices)
|
|
|
|
else:
|
|
|
|
self.neuter_lookups(lookup_indices)
|
2013-08-13 19:50:38 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(ttLib.getTableClass('GSUB'),
|
2017-08-08 17:20:37 -07:00
|
|
|
ttLib.getTableClass('GPOS'))
|
2013-08-13 19:53:30 -04:00
|
|
|
def subset_feature_tags(self, feature_tags):
|
2015-05-07 10:40:29 +02:00
|
|
|
if self.table.FeatureList:
|
|
|
|
feature_indices = \
|
|
|
|
[i for i,f in enumerate(self.table.FeatureList.FeatureRecord)
|
|
|
|
if f.FeatureTag in feature_tags]
|
|
|
|
self.table.FeatureList.subset_features(feature_indices)
|
2017-08-08 17:20:37 -07:00
|
|
|
if getattr(self.table, 'FeatureVariations', None):
|
|
|
|
self.table.FeatureVariations.subset_features(feature_indices)
|
2015-05-07 10:40:29 +02:00
|
|
|
else:
|
|
|
|
feature_indices = []
|
|
|
|
if self.table.ScriptList:
|
2016-02-10 16:22:24 +07:00
|
|
|
self.table.ScriptList.subset_features(feature_indices, self.retain_empty_scripts())
|
2013-08-13 19:50:38 -04:00
|
|
|
|
2013-12-06 21:58:41 -05:00
|
|
|
@_add_method(ttLib.getTableClass('GSUB'),
|
|
|
|
ttLib.getTableClass('GPOS'))
|
|
|
|
def prune_features(self):
|
2015-05-07 10:40:29 +02:00
|
|
|
"""Remove unreferenced features"""
|
|
|
|
if self.table.ScriptList:
|
|
|
|
feature_indices = self.table.ScriptList.collect_features()
|
|
|
|
else:
|
|
|
|
feature_indices = []
|
|
|
|
if self.table.FeatureList:
|
|
|
|
self.table.FeatureList.subset_features(feature_indices)
|
2017-08-08 17:20:37 -07:00
|
|
|
if getattr(self.table, 'FeatureVariations', None):
|
|
|
|
self.table.FeatureVariations.subset_features(feature_indices)
|
2015-05-07 10:40:29 +02:00
|
|
|
if self.table.ScriptList:
|
2016-02-10 16:22:24 +07:00
|
|
|
self.table.ScriptList.subset_features(feature_indices, self.retain_empty_scripts())
|
2013-12-06 21:58:41 -05:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(ttLib.getTableClass('GSUB'),
|
|
|
|
ttLib.getTableClass('GPOS'))
|
2016-03-31 16:23:24 +02:00
|
|
|
def prune_pre_subset(self, font, options):
|
2015-05-07 10:40:29 +02:00
|
|
|
# Drop undesired features
|
|
|
|
if '*' not in options.layout_features:
|
|
|
|
self.subset_feature_tags(options.layout_features)
|
2015-08-24 12:40:51 +01:00
|
|
|
# Neuter unreferenced lookups
|
|
|
|
self.prune_lookups(remap=False)
|
2015-05-07 10:40:29 +02:00
|
|
|
return True
|
2013-08-15 19:24:36 -04:00
|
|
|
|
2013-12-07 12:54:44 -05:00
|
|
|
@_add_method(ttLib.getTableClass('GSUB'),
|
|
|
|
ttLib.getTableClass('GPOS'))
|
|
|
|
def remove_redundant_langsys(self):
|
2015-05-07 10:40:29 +02:00
|
|
|
table = self.table
|
|
|
|
if not table.ScriptList or not table.FeatureList:
|
|
|
|
return
|
|
|
|
|
|
|
|
features = table.FeatureList.FeatureRecord
|
|
|
|
|
|
|
|
for s in table.ScriptList.ScriptRecord:
|
|
|
|
d = s.Script.DefaultLangSys
|
|
|
|
if not d:
|
|
|
|
continue
|
|
|
|
for lr in s.Script.LangSysRecord[:]:
|
|
|
|
l = lr.LangSys
|
|
|
|
# Compare d and l
|
|
|
|
if len(d.FeatureIndex) != len(l.FeatureIndex):
|
|
|
|
continue
|
|
|
|
if (d.ReqFeatureIndex == 65535) != (l.ReqFeatureIndex == 65535):
|
|
|
|
continue
|
|
|
|
|
|
|
|
if d.ReqFeatureIndex != 65535:
|
|
|
|
if features[d.ReqFeatureIndex] != features[l.ReqFeatureIndex]:
|
|
|
|
continue
|
|
|
|
|
|
|
|
for i in range(len(d.FeatureIndex)):
|
|
|
|
if features[d.FeatureIndex[i]] != features[l.FeatureIndex[i]]:
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
# LangSys and default are equal; delete LangSys
|
|
|
|
s.Script.LangSysRecord.remove(lr)
|
2013-12-07 12:54:44 -05:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(ttLib.getTableClass('GSUB'),
|
|
|
|
ttLib.getTableClass('GPOS'))
|
2013-08-15 19:24:36 -04:00
|
|
|
def prune_post_subset(self, options):
|
2015-05-07 10:40:29 +02:00
|
|
|
table = self.table
|
2013-12-06 21:58:41 -05:00
|
|
|
|
2015-08-24 12:48:23 +01:00
|
|
|
self.prune_lookups() # XXX Is this actually needed?!
|
|
|
|
|
2015-05-07 10:40:29 +02:00
|
|
|
if table.LookupList:
|
|
|
|
table.LookupList.prune_post_subset(options)
|
|
|
|
# XXX Next two lines disabled because OTS is stupid and
|
|
|
|
# doesn't like NULL offsets here.
|
|
|
|
#if not table.LookupList.Lookup:
|
|
|
|
# table.LookupList = None
|
2013-12-06 21:58:41 -05:00
|
|
|
|
2015-05-07 10:40:29 +02:00
|
|
|
if not table.LookupList:
|
|
|
|
table.FeatureList = None
|
2013-12-06 21:58:41 -05:00
|
|
|
|
2017-08-08 17:20:37 -07:00
|
|
|
|
2015-05-07 10:40:29 +02:00
|
|
|
if table.FeatureList:
|
|
|
|
self.remove_redundant_langsys()
|
|
|
|
# Remove unreferenced features
|
|
|
|
self.prune_features()
|
2013-12-06 21:58:41 -05:00
|
|
|
|
2015-05-07 10:40:29 +02:00
|
|
|
# XXX Next two lines disabled because OTS is stupid and
|
|
|
|
# doesn't like NULL offsets here.
|
|
|
|
#if table.FeatureList and not table.FeatureList.FeatureRecord:
|
|
|
|
# table.FeatureList = None
|
2013-12-06 21:58:41 -05:00
|
|
|
|
2015-05-07 10:40:29 +02:00
|
|
|
# Never drop scripts themselves as them just being available
|
|
|
|
# holds semantic significance.
|
|
|
|
# XXX Next two lines disabled because OTS is stupid and
|
|
|
|
# doesn't like NULL offsets here.
|
|
|
|
#if table.ScriptList and not table.ScriptList.ScriptRecord:
|
|
|
|
# table.ScriptList = None
|
2013-12-06 21:58:41 -05:00
|
|
|
|
2017-08-08 17:20:37 -07:00
|
|
|
if not table.FeatureList and hasattr(table, 'FeatureVariations'):
|
|
|
|
table.FeatureVariations = None
|
|
|
|
|
|
|
|
if hasattr(table, 'FeatureVariations') and not table.FeatureVariations:
|
|
|
|
if table.Version == 0x00010001:
|
|
|
|
table.Version = 0x00010000
|
|
|
|
|
2015-05-07 10:40:29 +02:00
|
|
|
return True
|
2013-07-23 12:10:46 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(ttLib.getTableClass('GDEF'))
|
2013-08-13 19:53:30 -04:00
|
|
|
def subset_glyphs(self, s):
|
2015-05-07 10:40:29 +02:00
|
|
|
glyphs = s.glyphs_gsubed
|
|
|
|
table = self.table
|
|
|
|
if table.LigCaretList:
|
|
|
|
indices = table.LigCaretList.Coverage.subset(glyphs)
|
2016-05-11 15:10:09 +02:00
|
|
|
table.LigCaretList.LigGlyph = [table.LigCaretList.LigGlyph[i] for i in indices]
|
2015-05-07 10:40:29 +02:00
|
|
|
table.LigCaretList.LigGlyphCount = len(table.LigCaretList.LigGlyph)
|
|
|
|
if table.MarkAttachClassDef:
|
|
|
|
table.MarkAttachClassDef.classDefs = \
|
2015-06-11 17:05:15 -07:00
|
|
|
{g:v for g,v in table.MarkAttachClassDef.classDefs.items()
|
|
|
|
if g in glyphs}
|
2015-05-07 10:40:29 +02:00
|
|
|
if table.GlyphClassDef:
|
|
|
|
table.GlyphClassDef.classDefs = \
|
2015-06-11 17:05:15 -07:00
|
|
|
{g:v for g,v in table.GlyphClassDef.classDefs.items()
|
|
|
|
if g in glyphs}
|
2015-05-07 10:40:29 +02:00
|
|
|
if table.AttachList:
|
|
|
|
indices = table.AttachList.Coverage.subset(glyphs)
|
|
|
|
GlyphCount = table.AttachList.GlyphCount
|
|
|
|
table.AttachList.AttachPoint = [table.AttachList.AttachPoint[i]
|
2016-05-11 15:10:09 +02:00
|
|
|
for i in indices if i < GlyphCount]
|
2015-05-07 10:40:29 +02:00
|
|
|
table.AttachList.GlyphCount = len(table.AttachList.AttachPoint)
|
|
|
|
if hasattr(table, "MarkGlyphSetsDef") and table.MarkGlyphSetsDef:
|
|
|
|
for coverage in table.MarkGlyphSetsDef.Coverage:
|
|
|
|
coverage.subset(glyphs)
|
|
|
|
# TODO: The following is disabled. If enabling, we need to go fixup all
|
|
|
|
# lookups that use MarkFilteringSet and map their set.
|
|
|
|
# indices = table.MarkGlyphSetsDef.Coverage = \
|
|
|
|
# [c for c in table.MarkGlyphSetsDef.Coverage if c.glyphs]
|
|
|
|
return True
|
2013-11-25 04:19:42 -05:00
|
|
|
|
|
|
|
@_add_method(ttLib.getTableClass('GDEF'))
|
|
|
|
def prune_post_subset(self, options):
|
2015-05-07 10:40:29 +02:00
|
|
|
table = self.table
|
|
|
|
# XXX check these against OTS
|
|
|
|
if table.LigCaretList and not table.LigCaretList.LigGlyphCount:
|
|
|
|
table.LigCaretList = None
|
|
|
|
if table.MarkAttachClassDef and not table.MarkAttachClassDef.classDefs:
|
|
|
|
table.MarkAttachClassDef = None
|
|
|
|
if table.GlyphClassDef and not table.GlyphClassDef.classDefs:
|
|
|
|
table.GlyphClassDef = None
|
|
|
|
if table.AttachList and not table.AttachList.GlyphCount:
|
|
|
|
table.AttachList = None
|
|
|
|
if (hasattr(table, "MarkGlyphSetsDef") and
|
|
|
|
table.MarkGlyphSetsDef and
|
|
|
|
not table.MarkGlyphSetsDef.Coverage):
|
|
|
|
table.MarkGlyphSetsDef = None
|
2016-09-04 20:58:46 -07:00
|
|
|
if table.Version == 0x00010002:
|
|
|
|
table.Version = 0x00010000
|
2015-05-07 10:40:29 +02:00
|
|
|
return bool(table.LigCaretList or
|
|
|
|
table.MarkAttachClassDef or
|
|
|
|
table.GlyphClassDef or
|
|
|
|
table.AttachList or
|
2016-09-04 20:58:46 -07:00
|
|
|
(table.Version >= 0x00010002 and table.MarkGlyphSetsDef))
|
2013-07-22 12:48:17 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(ttLib.getTableClass('kern'))
|
2016-03-31 16:23:24 +02:00
|
|
|
def prune_pre_subset(self, font, options):
|
2015-05-07 10:40:29 +02:00
|
|
|
# Prune unknown kern table types
|
|
|
|
self.kernTables = [t for t in self.kernTables if hasattr(t, 'kernTable')]
|
|
|
|
return bool(self.kernTables)
|
2013-07-24 18:51:05 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(ttLib.getTableClass('kern'))
|
2013-08-13 19:53:30 -04:00
|
|
|
def subset_glyphs(self, s):
|
2015-05-07 10:40:29 +02:00
|
|
|
glyphs = s.glyphs_gsubed
|
|
|
|
for t in self.kernTables:
|
2015-06-11 17:05:15 -07:00
|
|
|
t.kernTable = {(a,b):v for (a,b),v in t.kernTable.items()
|
|
|
|
if a in glyphs and b in glyphs}
|
2015-05-07 10:40:29 +02:00
|
|
|
self.kernTables = [t for t in self.kernTables if t.kernTable]
|
|
|
|
return bool(self.kernTables)
|
2013-08-13 19:50:38 -04:00
|
|
|
|
2013-10-28 13:09:25 +01:00
|
|
|
@_add_method(ttLib.getTableClass('vmtx'))
|
2013-08-13 19:53:30 -04:00
|
|
|
def subset_glyphs(self, s):
|
2015-08-23 20:39:34 +01:00
|
|
|
self.metrics = _dict_subset(self.metrics, s.glyphs)
|
2015-05-07 10:40:29 +02:00
|
|
|
return bool(self.metrics)
|
2013-07-22 14:29:08 -04:00
|
|
|
|
2013-10-28 13:09:25 +01:00
|
|
|
@_add_method(ttLib.getTableClass('hmtx'))
|
|
|
|
def subset_glyphs(self, s):
|
2015-08-23 20:39:34 +01:00
|
|
|
self.metrics = _dict_subset(self.metrics, s.glyphs)
|
2015-05-07 10:40:29 +02:00
|
|
|
return True # Required table
|
2013-10-28 13:09:25 +01:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(ttLib.getTableClass('hdmx'))
|
2013-08-13 19:53:30 -04:00
|
|
|
def subset_glyphs(self, s):
|
2015-08-23 20:39:34 +01:00
|
|
|
self.hdmx = {sz:_dict_subset(l, s.glyphs) for sz,l in self.hdmx.items()}
|
2015-05-07 10:40:29 +02:00
|
|
|
return bool(self.hdmx)
|
2013-07-22 14:49:54 -04:00
|
|
|
|
2016-03-31 16:23:24 +02:00
|
|
|
@_add_method(ttLib.getTableClass('gvar'))
|
|
|
|
def prune_pre_subset(self, font, options):
|
|
|
|
if options.notdef_glyph and not options.notdef_outline:
|
|
|
|
self.variations[font.glyphOrder[0]] = []
|
|
|
|
return True
|
|
|
|
|
|
|
|
@_add_method(ttLib.getTableClass('gvar'))
|
|
|
|
def subset_glyphs(self, s):
|
|
|
|
self.variations = _dict_subset(self.variations, s.glyphs)
|
|
|
|
self.glyphCount = len(self.variations)
|
|
|
|
return bool(self.variations)
|
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(ttLib.getTableClass('VORG'))
|
2013-08-13 19:53:30 -04:00
|
|
|
def subset_glyphs(self, s):
|
2015-06-11 17:05:15 -07:00
|
|
|
self.VOriginRecords = {g:v for g,v in self.VOriginRecords.items()
|
|
|
|
if g in s.glyphs}
|
2015-05-07 10:40:29 +02:00
|
|
|
self.numVertOriginYMetrics = len(self.VOriginRecords)
|
|
|
|
return True # Never drop; has default metrics
|
2013-07-22 15:29:17 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(ttLib.getTableClass('post'))
|
2016-03-31 16:23:24 +02:00
|
|
|
def prune_pre_subset(self, font, options):
|
2015-05-07 10:40:29 +02:00
|
|
|
if not options.glyph_names:
|
|
|
|
self.formatType = 3.0
|
|
|
|
return True # Required table
|
2013-07-23 12:56:06 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(ttLib.getTableClass('post'))
|
2013-08-13 19:53:30 -04:00
|
|
|
def subset_glyphs(self, s):
|
2015-05-07 10:40:29 +02:00
|
|
|
self.extraNames = [] # This seems to do it
|
|
|
|
return True # Required table
|
2013-07-22 15:17:12 -04:00
|
|
|
|
2015-11-07 14:09:00 +04:00
|
|
|
@_add_method(ttLib.getTableClass('COLR'))
|
|
|
|
def closure_glyphs(self, s):
|
|
|
|
decompose = s.glyphs
|
|
|
|
while True:
|
|
|
|
layers = set()
|
|
|
|
for g in decompose:
|
|
|
|
if g not in self.ColorLayers:
|
|
|
|
continue
|
|
|
|
for l in self.ColorLayers[g]:
|
|
|
|
if l.name not in s.glyphs:
|
|
|
|
layers.add(l.name)
|
|
|
|
layers = set(l for l in layers if l not in s.glyphs)
|
|
|
|
if not layers:
|
|
|
|
break
|
|
|
|
decompose = layers
|
|
|
|
s.glyphs.update(layers)
|
|
|
|
|
|
|
|
@_add_method(ttLib.getTableClass('COLR'))
|
|
|
|
def subset_glyphs(self, s):
|
|
|
|
self.ColorLayers = {g: self.ColorLayers[g] for g in s.glyphs if g in self.ColorLayers}
|
|
|
|
return bool(self.ColorLayers)
|
|
|
|
|
|
|
|
# TODO: prune unused palettes
|
|
|
|
@_add_method(ttLib.getTableClass('CPAL'))
|
|
|
|
def prune_post_subset(self, options):
|
|
|
|
return True
|
|
|
|
|
2016-01-23 06:58:08 +04:00
|
|
|
@_add_method(otTables.MathGlyphConstruction)
|
|
|
|
def closure_glyphs(self, glyphs):
|
|
|
|
variants = set()
|
|
|
|
for v in self.MathGlyphVariantRecord:
|
|
|
|
variants.add(v.VariantGlyph)
|
|
|
|
if self.GlyphAssembly:
|
|
|
|
for p in self.GlyphAssembly.PartRecords:
|
|
|
|
variants.add(p.glyph)
|
|
|
|
return variants
|
|
|
|
|
|
|
|
@_add_method(otTables.MathVariants)
|
|
|
|
def closure_glyphs(self, s):
|
|
|
|
glyphs = frozenset(s.glyphs)
|
|
|
|
variants = set()
|
|
|
|
|
2016-11-11 10:58:02 -08:00
|
|
|
if self.VertGlyphCoverage:
|
|
|
|
indices = self.VertGlyphCoverage.intersect(glyphs)
|
|
|
|
for i in indices:
|
|
|
|
variants.update(self.VertGlyphConstruction[i].closure_glyphs(glyphs))
|
2016-01-23 06:58:08 +04:00
|
|
|
|
2016-11-11 10:58:02 -08:00
|
|
|
if self.HorizGlyphCoverage:
|
|
|
|
indices = self.HorizGlyphCoverage.intersect(glyphs)
|
|
|
|
for i in indices:
|
|
|
|
variants.update(self.HorizGlyphConstruction[i].closure_glyphs(glyphs))
|
2016-01-23 06:58:08 +04:00
|
|
|
|
|
|
|
s.glyphs.update(variants)
|
|
|
|
|
|
|
|
@_add_method(ttLib.getTableClass('MATH'))
|
|
|
|
def closure_glyphs(self, s):
|
|
|
|
self.table.MathVariants.closure_glyphs(s)
|
|
|
|
|
|
|
|
@_add_method(otTables.MathItalicsCorrectionInfo)
|
|
|
|
def subset_glyphs(self, s):
|
|
|
|
indices = self.Coverage.subset(s.glyphs)
|
|
|
|
self.ItalicsCorrection = [self.ItalicsCorrection[i] for i in indices]
|
|
|
|
self.ItalicsCorrectionCount = len(self.ItalicsCorrection)
|
|
|
|
return bool(self.ItalicsCorrectionCount)
|
|
|
|
|
|
|
|
@_add_method(otTables.MathTopAccentAttachment)
|
|
|
|
def subset_glyphs(self, s):
|
|
|
|
indices = self.TopAccentCoverage.subset(s.glyphs)
|
|
|
|
self.TopAccentAttachment = [self.TopAccentAttachment[i] for i in indices]
|
|
|
|
self.TopAccentAttachmentCount = len(self.TopAccentAttachment)
|
|
|
|
return bool(self.TopAccentAttachmentCount)
|
|
|
|
|
|
|
|
@_add_method(otTables.MathKernInfo)
|
|
|
|
def subset_glyphs(self, s):
|
|
|
|
indices = self.MathKernCoverage.subset(s.glyphs)
|
|
|
|
self.MathKernInfoRecords = [self.MathKernInfoRecords[i] for i in indices]
|
|
|
|
self.MathKernCount = len(self.MathKernInfoRecords)
|
|
|
|
return bool(self.MathKernCount)
|
|
|
|
|
|
|
|
@_add_method(otTables.MathGlyphInfo)
|
|
|
|
def subset_glyphs(self, s):
|
|
|
|
if self.MathItalicsCorrectionInfo:
|
|
|
|
self.MathItalicsCorrectionInfo.subset_glyphs(s)
|
|
|
|
if self.MathTopAccentAttachment:
|
|
|
|
self.MathTopAccentAttachment.subset_glyphs(s)
|
|
|
|
if self.MathKernInfo:
|
|
|
|
self.MathKernInfo.subset_glyphs(s)
|
|
|
|
if self.ExtendedShapeCoverage:
|
|
|
|
self.ExtendedShapeCoverage.subset(s.glyphs)
|
|
|
|
return True
|
|
|
|
|
|
|
|
@_add_method(otTables.MathVariants)
|
|
|
|
def subset_glyphs(self, s):
|
2016-11-11 10:58:02 -08:00
|
|
|
if self.VertGlyphCoverage:
|
|
|
|
indices = self.VertGlyphCoverage.subset(s.glyphs)
|
|
|
|
self.VertGlyphConstruction = [self.VertGlyphConstruction[i] for i in indices]
|
|
|
|
self.VertGlyphCount = len(self.VertGlyphConstruction)
|
|
|
|
|
|
|
|
if self.HorizGlyphCoverage:
|
|
|
|
indices = self.HorizGlyphCoverage.subset(s.glyphs)
|
|
|
|
self.HorizGlyphConstruction = [self.HorizGlyphConstruction[i] for i in indices]
|
|
|
|
self.HorizGlyphCount = len(self.HorizGlyphConstruction)
|
2016-01-23 06:58:08 +04:00
|
|
|
|
|
|
|
return True
|
|
|
|
|
|
|
|
@_add_method(ttLib.getTableClass('MATH'))
|
|
|
|
def subset_glyphs(self, s):
|
|
|
|
s.glyphs = s.glyphs_mathed
|
|
|
|
self.table.MathGlyphInfo.subset_glyphs(s)
|
|
|
|
self.table.MathVariants.subset_glyphs(s)
|
|
|
|
return True
|
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(ttLib.getTableModule('glyf').Glyph)
|
2013-08-13 19:53:30 -04:00
|
|
|
def remapComponentsFast(self, indices):
|
2015-05-07 10:40:29 +02:00
|
|
|
if not self.data or struct.unpack(">h", self.data[:2])[0] >= 0:
|
|
|
|
return # Not composite
|
|
|
|
data = array.array("B", self.data)
|
|
|
|
i = 10
|
|
|
|
more = 1
|
|
|
|
while more:
|
|
|
|
flags =(data[i] << 8) | data[i+1]
|
|
|
|
glyphID =(data[i+2] << 8) | data[i+3]
|
|
|
|
# Remap
|
|
|
|
glyphID = indices.index(glyphID)
|
|
|
|
data[i+2] = glyphID >> 8
|
|
|
|
data[i+3] = glyphID & 0xFF
|
|
|
|
i += 4
|
|
|
|
flags = int(flags)
|
|
|
|
|
|
|
|
if flags & 0x0001: i += 4 # ARG_1_AND_2_ARE_WORDS
|
|
|
|
else: i += 2
|
|
|
|
if flags & 0x0008: i += 2 # WE_HAVE_A_SCALE
|
|
|
|
elif flags & 0x0040: i += 4 # WE_HAVE_AN_X_AND_Y_SCALE
|
|
|
|
elif flags & 0x0080: i += 8 # WE_HAVE_A_TWO_BY_TWO
|
|
|
|
more = flags & 0x0020 # MORE_COMPONENTS
|
|
|
|
|
|
|
|
self.data = data.tostring()
|
2013-07-24 16:08:35 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(ttLib.getTableClass('glyf'))
|
2013-08-13 19:53:30 -04:00
|
|
|
def closure_glyphs(self, s):
|
2015-05-07 10:40:29 +02:00
|
|
|
decompose = s.glyphs
|
|
|
|
while True:
|
|
|
|
components = set()
|
|
|
|
for g in decompose:
|
|
|
|
if g not in self.glyphs:
|
|
|
|
continue
|
|
|
|
gl = self.glyphs[g]
|
|
|
|
for c in gl.getComponentNames(self):
|
|
|
|
if c not in s.glyphs:
|
|
|
|
components.add(c)
|
|
|
|
components = set(c for c in components if c not in s.glyphs)
|
|
|
|
if not components:
|
|
|
|
break
|
|
|
|
decompose = components
|
|
|
|
s.glyphs.update(components)
|
2013-07-23 12:58:37 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(ttLib.getTableClass('glyf'))
|
2016-03-31 16:23:24 +02:00
|
|
|
def prune_pre_subset(self, font, options):
|
2015-05-07 10:40:29 +02:00
|
|
|
if options.notdef_glyph and not options.notdef_outline:
|
|
|
|
g = self[self.glyphOrder[0]]
|
|
|
|
# Yay, easy!
|
|
|
|
g.__dict__.clear()
|
|
|
|
g.data = ""
|
|
|
|
return True
|
2013-08-29 18:02:48 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(ttLib.getTableClass('glyf'))
|
2013-08-13 19:53:30 -04:00
|
|
|
def subset_glyphs(self, s):
|
2015-08-23 20:39:34 +01:00
|
|
|
self.glyphs = _dict_subset(self.glyphs, s.glyphs)
|
2015-05-07 10:40:29 +02:00
|
|
|
indices = [i for i,g in enumerate(self.glyphOrder) if g in s.glyphs]
|
|
|
|
for v in self.glyphs.values():
|
|
|
|
if hasattr(v, "data"):
|
|
|
|
v.remapComponentsFast(indices)
|
|
|
|
else:
|
|
|
|
pass # No need
|
|
|
|
self.glyphOrder = [g for g in self.glyphOrder if g in s.glyphs]
|
|
|
|
# Don't drop empty 'glyf' tables, otherwise 'loca' doesn't get subset.
|
|
|
|
return True
|
2013-07-22 16:47:24 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(ttLib.getTableClass('glyf'))
|
2013-08-13 19:53:30 -04:00
|
|
|
def prune_post_subset(self, options):
|
2015-05-07 10:40:29 +02:00
|
|
|
remove_hinting = not options.hinting
|
|
|
|
for v in self.glyphs.values():
|
|
|
|
v.trim(remove_hinting=remove_hinting)
|
|
|
|
return True
|
2013-07-23 12:37:41 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(ttLib.getTableClass('CFF '))
|
2016-03-31 16:23:24 +02:00
|
|
|
def prune_pre_subset(self, font, options):
|
2015-05-07 10:40:29 +02:00
|
|
|
cff = self.cff
|
|
|
|
# CFF table must have one font only
|
|
|
|
cff.fontNames = cff.fontNames[:1]
|
2013-08-29 18:02:48 -04:00
|
|
|
|
2015-05-07 10:40:29 +02:00
|
|
|
if options.notdef_glyph and not options.notdef_outline:
|
|
|
|
for fontname in cff.keys():
|
|
|
|
font = cff[fontname]
|
2017-02-16 10:57:56 +09:00
|
|
|
c, fdSelectIndex = font.CharStrings.getItemAndSelector('.notdef')
|
2015-11-13 17:43:22 -08:00
|
|
|
if hasattr(font, 'FDArray') and font.FDArray is not None:
|
2017-02-16 10:57:56 +09:00
|
|
|
private = font.FDArray[fdSelectIndex].Private
|
2015-11-13 17:43:22 -08:00
|
|
|
else:
|
|
|
|
private = font.Private
|
|
|
|
dfltWdX = private.defaultWidthX
|
|
|
|
nmnlWdX = private.nominalWidthX
|
2017-01-14 14:45:35 +00:00
|
|
|
pen = NullPen()
|
|
|
|
c.draw(pen) # this will set the charstring's width
|
2015-11-13 17:43:22 -08:00
|
|
|
if c.width != dfltWdX:
|
|
|
|
c.program = [c.width - nmnlWdX, 'endchar']
|
|
|
|
else:
|
|
|
|
c.program = ['endchar']
|
2013-08-29 18:02:48 -04:00
|
|
|
|
2016-07-13 00:33:41 -07:00
|
|
|
# Clear useless Encoding
|
|
|
|
for fontname in cff.keys():
|
|
|
|
font = cff[fontname]
|
|
|
|
# https://github.com/behdad/fonttools/issues/620
|
|
|
|
font.Encoding = "StandardEncoding"
|
|
|
|
|
2015-05-07 10:40:29 +02:00
|
|
|
return True # bool(cff.fontNames)
|
2013-08-13 15:46:37 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(ttLib.getTableClass('CFF '))
|
2013-08-13 19:53:30 -04:00
|
|
|
def subset_glyphs(self, s):
|
2015-05-07 10:40:29 +02:00
|
|
|
cff = self.cff
|
|
|
|
for fontname in cff.keys():
|
|
|
|
font = cff[fontname]
|
|
|
|
cs = font.CharStrings
|
|
|
|
|
|
|
|
# Load all glyphs
|
|
|
|
for g in font.charset:
|
|
|
|
if g not in s.glyphs: continue
|
2017-02-16 11:23:04 +09:00
|
|
|
c, _ = cs.getItemAndSelector(g)
|
2015-05-07 10:40:29 +02:00
|
|
|
|
|
|
|
if cs.charStringsAreIndexed:
|
|
|
|
indices = [i for i,g in enumerate(font.charset) if g in s.glyphs]
|
|
|
|
csi = cs.charStringsIndex
|
|
|
|
csi.items = [csi.items[i] for i in indices]
|
|
|
|
del csi.file, csi.offsets
|
|
|
|
if hasattr(font, "FDSelect"):
|
|
|
|
sel = font.FDSelect
|
|
|
|
# XXX We want to set sel.format to None, such that the
|
|
|
|
# most compact format is selected. However, OTS was
|
|
|
|
# broken and couldn't parse a FDSelect format 0 that
|
|
|
|
# happened before CharStrings. As such, always force
|
|
|
|
# format 3 until we fix cffLib to always generate
|
|
|
|
# FDSelect after CharStrings.
|
|
|
|
# https://github.com/khaledhosny/ots/pull/31
|
|
|
|
#sel.format = None
|
|
|
|
sel.format = 3
|
|
|
|
sel.gidArray = [sel.gidArray[i] for i in indices]
|
2015-06-11 17:05:15 -07:00
|
|
|
cs.charStrings = {g:indices.index(v)
|
|
|
|
for g,v in cs.charStrings.items()
|
|
|
|
if g in s.glyphs}
|
2015-05-07 10:40:29 +02:00
|
|
|
else:
|
2015-06-11 17:05:15 -07:00
|
|
|
cs.charStrings = {g:v
|
|
|
|
for g,v in cs.charStrings.items()
|
|
|
|
if g in s.glyphs}
|
2015-05-07 10:40:29 +02:00
|
|
|
font.charset = [g for g in font.charset if g in s.glyphs]
|
|
|
|
font.numGlyphs = len(font.charset)
|
2013-08-14 17:48:31 -04:00
|
|
|
|
2015-05-07 10:40:29 +02:00
|
|
|
return True # any(cff[fontname].numGlyphs for fontname in cff.keys())
|
2013-08-14 18:18:51 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(psCharStrings.T2CharString)
|
2013-08-14 19:37:39 -04:00
|
|
|
def subset_subroutines(self, subrs, gsubrs):
|
2015-05-07 10:40:29 +02:00
|
|
|
p = self.program
|
|
|
|
assert len(p)
|
|
|
|
for i in range(1, len(p)):
|
|
|
|
if p[i] == 'callsubr':
|
|
|
|
assert isinstance(p[i-1], int)
|
|
|
|
p[i-1] = subrs._used.index(p[i-1] + subrs._old_bias) - subrs._new_bias
|
|
|
|
elif p[i] == 'callgsubr':
|
|
|
|
assert isinstance(p[i-1], int)
|
|
|
|
p[i-1] = gsubrs._used.index(p[i-1] + gsubrs._old_bias) - gsubrs._new_bias
|
2013-08-14 19:37:39 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(psCharStrings.T2CharString)
|
2013-09-10 14:33:19 -04:00
|
|
|
def drop_hints(self):
|
2015-05-07 10:40:29 +02:00
|
|
|
hints = self._hints
|
2013-09-10 14:33:19 -04:00
|
|
|
|
2017-02-15 01:17:26 -08:00
|
|
|
if hints.deletions:
|
|
|
|
p = self.program
|
|
|
|
for idx in reversed(hints.deletions):
|
|
|
|
del p[idx-2:idx]
|
|
|
|
|
2015-05-07 10:40:29 +02:00
|
|
|
if hints.has_hint:
|
2017-02-15 01:17:26 -08:00
|
|
|
assert not hints.deletions or hints.last_hint <= hints.deletions[0]
|
2015-05-07 10:40:29 +02:00
|
|
|
self.program = self.program[hints.last_hint:]
|
|
|
|
if hasattr(self, 'width'):
|
|
|
|
# Insert width back if needed
|
|
|
|
if self.width != self.private.defaultWidthX:
|
|
|
|
self.program.insert(0, self.width - self.private.nominalWidthX)
|
2013-09-10 14:33:19 -04:00
|
|
|
|
2015-05-07 10:40:29 +02:00
|
|
|
if hints.has_hintmask:
|
|
|
|
i = 0
|
|
|
|
p = self.program
|
|
|
|
while i < len(p):
|
|
|
|
if p[i] in ['hintmask', 'cntrmask']:
|
|
|
|
assert i + 1 <= len(p)
|
|
|
|
del p[i:i+2]
|
|
|
|
continue
|
|
|
|
i += 1
|
2013-08-14 18:18:51 -04:00
|
|
|
|
2015-05-07 10:40:29 +02:00
|
|
|
assert len(self.program)
|
2013-08-14 18:18:51 -04:00
|
|
|
|
2015-05-07 10:40:29 +02:00
|
|
|
del self._hints
|
2013-08-14 18:18:51 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
class _MarkingT2Decompiler(psCharStrings.SimpleT2Decompiler):
|
2013-08-14 18:18:51 -04:00
|
|
|
|
2015-05-07 10:40:29 +02:00
|
|
|
def __init__(self, localSubrs, globalSubrs):
|
|
|
|
psCharStrings.SimpleT2Decompiler.__init__(self,
|
|
|
|
localSubrs,
|
|
|
|
globalSubrs)
|
|
|
|
for subrs in [localSubrs, globalSubrs]:
|
|
|
|
if subrs and not hasattr(subrs, "_used"):
|
|
|
|
subrs._used = set()
|
2013-09-10 14:33:19 -04:00
|
|
|
|
2015-05-07 10:40:29 +02:00
|
|
|
def op_callsubr(self, index):
|
|
|
|
self.localSubrs._used.add(self.operandStack[-1]+self.localBias)
|
|
|
|
psCharStrings.SimpleT2Decompiler.op_callsubr(self, index)
|
2013-09-10 14:33:19 -04:00
|
|
|
|
2015-05-07 10:40:29 +02:00
|
|
|
def op_callgsubr(self, index):
|
|
|
|
self.globalSubrs._used.add(self.operandStack[-1]+self.globalBias)
|
|
|
|
psCharStrings.SimpleT2Decompiler.op_callgsubr(self, index)
|
2013-09-10 14:33:19 -04:00
|
|
|
|
2017-01-26 12:09:37 +00:00
|
|
|
class _DehintingT2Decompiler(psCharStrings.T2WidthExtractor):
|
2013-09-10 14:33:19 -04:00
|
|
|
|
2015-05-07 10:40:29 +02:00
|
|
|
class Hints(object):
|
|
|
|
def __init__(self):
|
|
|
|
# Whether calling this charstring produces any hint stems
|
2017-02-14 23:25:37 -08:00
|
|
|
# Note that if a charstring starts with hintmask, it will
|
|
|
|
# have has_hint set to True, because it *might* produce an
|
|
|
|
# implicit vstem if called under certain conditions.
|
2015-05-07 10:40:29 +02:00
|
|
|
self.has_hint = False
|
|
|
|
# Index to start at to drop all hints
|
|
|
|
self.last_hint = 0
|
|
|
|
# Index up to which we know more hints are possible.
|
|
|
|
# Only relevant if status is 0 or 1.
|
|
|
|
self.last_checked = 0
|
|
|
|
# The status means:
|
|
|
|
# 0: after dropping hints, this charstring is empty
|
|
|
|
# 1: after dropping hints, there may be more hints
|
|
|
|
# continuing after this
|
|
|
|
# 2: no more hints possible after this charstring
|
|
|
|
self.status = 0
|
|
|
|
# Has hintmask instructions; not recursive
|
|
|
|
self.has_hintmask = False
|
2017-02-15 01:17:26 -08:00
|
|
|
# List of indices of calls to empty subroutines to remove.
|
|
|
|
self.deletions = []
|
2015-05-07 10:40:29 +02:00
|
|
|
pass
|
2013-09-10 14:33:19 -04:00
|
|
|
|
2017-01-26 12:09:37 +00:00
|
|
|
def __init__(self, css, localSubrs, globalSubrs, nominalWidthX, defaultWidthX):
|
2015-05-07 10:40:29 +02:00
|
|
|
self._css = css
|
2017-01-26 12:09:37 +00:00
|
|
|
psCharStrings.T2WidthExtractor.__init__(
|
|
|
|
self, localSubrs, globalSubrs, nominalWidthX, defaultWidthX)
|
2015-05-07 10:40:29 +02:00
|
|
|
|
|
|
|
def execute(self, charString):
|
|
|
|
old_hints = charString._hints if hasattr(charString, '_hints') else None
|
|
|
|
charString._hints = self.Hints()
|
|
|
|
|
2017-01-26 12:09:37 +00:00
|
|
|
psCharStrings.T2WidthExtractor.execute(self, charString)
|
2015-05-07 10:40:29 +02:00
|
|
|
|
|
|
|
hints = charString._hints
|
|
|
|
|
|
|
|
if hints.has_hint or hints.has_hintmask:
|
|
|
|
self._css.add(charString)
|
|
|
|
|
|
|
|
if hints.status != 2:
|
|
|
|
# Check from last_check, make sure we didn't have any operators.
|
|
|
|
for i in range(hints.last_checked, len(charString.program) - 1):
|
|
|
|
if isinstance(charString.program[i], str):
|
|
|
|
hints.status = 2
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
hints.status = 1 # There's *something* here
|
|
|
|
hints.last_checked = len(charString.program)
|
|
|
|
|
|
|
|
if old_hints:
|
|
|
|
assert hints.__dict__ == old_hints.__dict__
|
|
|
|
|
|
|
|
def op_callsubr(self, index):
|
|
|
|
subr = self.localSubrs[self.operandStack[-1]+self.localBias]
|
2017-01-26 12:09:37 +00:00
|
|
|
psCharStrings.T2WidthExtractor.op_callsubr(self, index)
|
2015-05-07 10:40:29 +02:00
|
|
|
self.processSubr(index, subr)
|
|
|
|
|
|
|
|
def op_callgsubr(self, index):
|
|
|
|
subr = self.globalSubrs[self.operandStack[-1]+self.globalBias]
|
2017-01-26 12:09:37 +00:00
|
|
|
psCharStrings.T2WidthExtractor.op_callgsubr(self, index)
|
2015-05-07 10:40:29 +02:00
|
|
|
self.processSubr(index, subr)
|
|
|
|
|
|
|
|
def op_hstem(self, index):
|
2017-01-26 12:09:37 +00:00
|
|
|
psCharStrings.T2WidthExtractor.op_hstem(self, index)
|
2015-05-07 10:40:29 +02:00
|
|
|
self.processHint(index)
|
|
|
|
def op_vstem(self, index):
|
2017-01-26 12:09:37 +00:00
|
|
|
psCharStrings.T2WidthExtractor.op_vstem(self, index)
|
2015-05-07 10:40:29 +02:00
|
|
|
self.processHint(index)
|
|
|
|
def op_hstemhm(self, index):
|
2017-01-26 12:09:37 +00:00
|
|
|
psCharStrings.T2WidthExtractor.op_hstemhm(self, index)
|
2015-05-07 10:40:29 +02:00
|
|
|
self.processHint(index)
|
|
|
|
def op_vstemhm(self, index):
|
2017-01-26 12:09:37 +00:00
|
|
|
psCharStrings.T2WidthExtractor.op_vstemhm(self, index)
|
2015-05-07 10:40:29 +02:00
|
|
|
self.processHint(index)
|
|
|
|
def op_hintmask(self, index):
|
2017-07-20 21:45:30 +01:00
|
|
|
rv = psCharStrings.T2WidthExtractor.op_hintmask(self, index)
|
2015-05-07 10:40:29 +02:00
|
|
|
self.processHintmask(index)
|
2017-07-20 21:45:30 +01:00
|
|
|
return rv
|
2015-05-07 10:40:29 +02:00
|
|
|
def op_cntrmask(self, index):
|
2017-07-20 21:45:30 +01:00
|
|
|
rv = psCharStrings.T2WidthExtractor.op_cntrmask(self, index)
|
2015-05-07 10:40:29 +02:00
|
|
|
self.processHintmask(index)
|
2017-07-20 21:45:30 +01:00
|
|
|
return rv
|
2015-05-07 10:40:29 +02:00
|
|
|
|
|
|
|
def processHintmask(self, index):
|
|
|
|
cs = self.callingStack[-1]
|
|
|
|
hints = cs._hints
|
|
|
|
hints.has_hintmask = True
|
[subset] set has_hint if a subr contains hintmask acting as implicit vstemhm
When a subroutine contains no explicit hint stem operators (has_hint=False),
but it contains a hintmask operator which, in the context of the calling
charstring, would be understood as implying a vstemhm, then we need to set
has_hint=True on the subroutine, and update the last_hint index.
Otherwise, the drop_hints function leaves behind the arguments of the implicit
vstemhm operator.
This case is exemplified in the test font Tests/subset/data/Lobster.subset.ttx,
for charstrings "B" and "B.salt", and subroutine index="2".
--- /Users/cosimolupo/Documents/Github/fonttools/Tests/subset/data/expect_no_hinting_CFF.ttx
+++ /var/folders/jb/rjz76yw92w7144mwqg119jnm0000gn/T/tmpO_XOWh/tmp3.ttx
@@ -47,7 +47,7 @@
107 return
</CharString>
<CharString index="2">
- 230 636 rmoveto
+ 119 230 636 rmoveto
-136 -636 rlineto
144 hlineto
return
@@ -94,7 +94,7 @@
endchar
</CharString>
<CharString name="B">
- 187 -105 callsubr
+ 187 6 93 362 139 -119 101 -101 -105 callsubr
82 383 rlineto
2 18 20 1 8 hhcurveto
73 22 -57 -70 hvcurveto
@@ -109,7 +109,7 @@
endchar
</CharString>
<CharString name="B.salt">
- 185 -105 callsubr
+ 185 6 93 350 149 -119 105 -105 -105 callsubr
6 30 rlineto
-41 39 41 -17 39 hhcurveto
125 110 175 136 72 -32 62 -82 15 hvcurveto
2017-01-26 12:23:51 +00:00
|
|
|
if hints.status != 2:
|
2015-05-07 10:40:29 +02:00
|
|
|
# Check from last_check, see if we may be an implicit vstem
|
|
|
|
for i in range(hints.last_checked, index - 1):
|
|
|
|
if isinstance(cs.program[i], str):
|
|
|
|
hints.status = 2
|
|
|
|
break
|
2017-02-14 22:56:48 -08:00
|
|
|
else:
|
2015-05-07 10:40:29 +02:00
|
|
|
# We are an implicit vstem
|
[subset] set has_hint if a subr contains hintmask acting as implicit vstemhm
When a subroutine contains no explicit hint stem operators (has_hint=False),
but it contains a hintmask operator which, in the context of the calling
charstring, would be understood as implying a vstemhm, then we need to set
has_hint=True on the subroutine, and update the last_hint index.
Otherwise, the drop_hints function leaves behind the arguments of the implicit
vstemhm operator.
This case is exemplified in the test font Tests/subset/data/Lobster.subset.ttx,
for charstrings "B" and "B.salt", and subroutine index="2".
--- /Users/cosimolupo/Documents/Github/fonttools/Tests/subset/data/expect_no_hinting_CFF.ttx
+++ /var/folders/jb/rjz76yw92w7144mwqg119jnm0000gn/T/tmpO_XOWh/tmp3.ttx
@@ -47,7 +47,7 @@
107 return
</CharString>
<CharString index="2">
- 230 636 rmoveto
+ 119 230 636 rmoveto
-136 -636 rlineto
144 hlineto
return
@@ -94,7 +94,7 @@
endchar
</CharString>
<CharString name="B">
- 187 -105 callsubr
+ 187 6 93 362 139 -119 101 -101 -105 callsubr
82 383 rlineto
2 18 20 1 8 hhcurveto
73 22 -57 -70 hvcurveto
@@ -109,7 +109,7 @@
endchar
</CharString>
<CharString name="B.salt">
- 185 -105 callsubr
+ 185 6 93 350 149 -119 105 -105 -105 callsubr
6 30 rlineto
-41 39 41 -17 39 hhcurveto
125 110 175 136 72 -32 62 -82 15 hvcurveto
2017-01-26 12:23:51 +00:00
|
|
|
hints.has_hint = True
|
2015-05-07 10:40:29 +02:00
|
|
|
hints.last_hint = index + 1
|
|
|
|
hints.status = 0
|
|
|
|
hints.last_checked = index + 1
|
|
|
|
|
|
|
|
def processHint(self, index):
|
|
|
|
cs = self.callingStack[-1]
|
|
|
|
hints = cs._hints
|
2013-09-10 20:30:47 -04:00
|
|
|
hints.has_hint = True
|
2015-05-07 10:40:29 +02:00
|
|
|
hints.last_hint = index
|
2013-09-12 00:23:11 -04:00
|
|
|
hints.last_checked = index
|
2015-05-07 10:40:29 +02:00
|
|
|
|
|
|
|
def processSubr(self, index, subr):
|
|
|
|
cs = self.callingStack[-1]
|
|
|
|
hints = cs._hints
|
|
|
|
subr_hints = subr._hints
|
|
|
|
|
2017-02-15 00:52:55 -08:00
|
|
|
# Check from last_check, make sure we didn't have
|
|
|
|
# any operators.
|
2017-02-15 00:33:37 -08:00
|
|
|
if hints.status != 2:
|
|
|
|
for i in range(hints.last_checked, index - 1):
|
|
|
|
if isinstance(cs.program[i], str):
|
|
|
|
hints.status = 2
|
|
|
|
break
|
|
|
|
hints.last_checked = index
|
|
|
|
|
2017-02-15 00:52:55 -08:00
|
|
|
if hints.status != 2:
|
|
|
|
if subr_hints.has_hint:
|
2017-02-15 00:33:37 -08:00
|
|
|
hints.has_hint = True
|
2017-02-15 00:52:55 -08:00
|
|
|
|
|
|
|
# Decide where to chop off from
|
|
|
|
if subr_hints.status == 0:
|
|
|
|
hints.last_hint = index
|
2015-05-07 10:40:29 +02:00
|
|
|
else:
|
2017-02-15 00:52:55 -08:00
|
|
|
hints.last_hint = index - 2 # Leave the subr call in
|
2017-02-15 01:17:26 -08:00
|
|
|
elif subr_hints.status == 0:
|
|
|
|
hints.deletions.append(index)
|
2017-02-15 00:33:37 -08:00
|
|
|
|
2017-02-15 00:52:55 -08:00
|
|
|
hints.status = max(hints.status, subr_hints.status)
|
2013-08-14 18:18:51 -04:00
|
|
|
|
2014-06-20 15:30:26 -07:00
|
|
|
class _DesubroutinizingT2Decompiler(psCharStrings.SimpleT2Decompiler):
|
|
|
|
|
2015-05-07 10:40:29 +02:00
|
|
|
def __init__(self, localSubrs, globalSubrs):
|
|
|
|
psCharStrings.SimpleT2Decompiler.__init__(self,
|
|
|
|
localSubrs,
|
|
|
|
globalSubrs)
|
|
|
|
|
|
|
|
def execute(self, charString):
|
|
|
|
# Note: Currently we recompute _desubroutinized each time.
|
|
|
|
# This is more robust in some cases, but in other places we assume
|
|
|
|
# that each subroutine always expands to the same code, so
|
|
|
|
# maybe it doesn't matter. To speed up we can just not
|
|
|
|
# recompute _desubroutinized if it's there. For now I just
|
|
|
|
# double-check that it desubroutinized to the same thing.
|
|
|
|
old_desubroutinized = charString._desubroutinized if hasattr(charString, '_desubroutinized') else None
|
|
|
|
|
|
|
|
charString._patches = []
|
|
|
|
psCharStrings.SimpleT2Decompiler.execute(self, charString)
|
|
|
|
desubroutinized = charString.program[:]
|
|
|
|
for idx,expansion in reversed (charString._patches):
|
|
|
|
assert idx >= 2
|
|
|
|
assert desubroutinized[idx - 1] in ['callsubr', 'callgsubr'], desubroutinized[idx - 1]
|
|
|
|
assert type(desubroutinized[idx - 2]) == int
|
|
|
|
if expansion[-1] == 'return':
|
|
|
|
expansion = expansion[:-1]
|
|
|
|
desubroutinized[idx-2:idx] = expansion
|
|
|
|
if 'endchar' in desubroutinized:
|
|
|
|
# Cut off after first endchar
|
|
|
|
desubroutinized = desubroutinized[:desubroutinized.index('endchar') + 1]
|
|
|
|
else:
|
|
|
|
if not len(desubroutinized) or desubroutinized[-1] != 'return':
|
|
|
|
desubroutinized.append('return')
|
2014-06-20 15:30:26 -07:00
|
|
|
|
2015-05-07 10:40:29 +02:00
|
|
|
charString._desubroutinized = desubroutinized
|
|
|
|
del charString._patches
|
2014-06-20 15:30:26 -07:00
|
|
|
|
2015-05-07 10:40:29 +02:00
|
|
|
if old_desubroutinized:
|
|
|
|
assert desubroutinized == old_desubroutinized
|
2014-06-20 15:30:26 -07:00
|
|
|
|
2015-05-07 10:40:29 +02:00
|
|
|
def op_callsubr(self, index):
|
|
|
|
subr = self.localSubrs[self.operandStack[-1]+self.localBias]
|
|
|
|
psCharStrings.SimpleT2Decompiler.op_callsubr(self, index)
|
|
|
|
self.processSubr(index, subr)
|
2014-06-20 15:30:26 -07:00
|
|
|
|
2015-05-07 10:40:29 +02:00
|
|
|
def op_callgsubr(self, index):
|
|
|
|
subr = self.globalSubrs[self.operandStack[-1]+self.globalBias]
|
|
|
|
psCharStrings.SimpleT2Decompiler.op_callgsubr(self, index)
|
|
|
|
self.processSubr(index, subr)
|
2014-06-20 15:30:26 -07:00
|
|
|
|
2015-05-07 10:40:29 +02:00
|
|
|
def processSubr(self, index, subr):
|
|
|
|
cs = self.callingStack[-1]
|
|
|
|
cs._patches.append((index, subr._desubroutinized))
|
2014-06-20 15:30:26 -07:00
|
|
|
|
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(ttLib.getTableClass('CFF '))
|
2013-09-10 14:33:19 -04:00
|
|
|
def prune_post_subset(self, options):
|
2015-05-07 10:40:29 +02:00
|
|
|
cff = self.cff
|
|
|
|
for fontname in cff.keys():
|
|
|
|
font = cff[fontname]
|
|
|
|
cs = font.CharStrings
|
|
|
|
|
|
|
|
# Drop unused FontDictionaries
|
|
|
|
if hasattr(font, "FDSelect"):
|
|
|
|
sel = font.FDSelect
|
|
|
|
indices = _uniq_sort(sel.gidArray)
|
|
|
|
sel.gidArray = [indices.index (ss) for ss in sel.gidArray]
|
|
|
|
arr = font.FDArray
|
|
|
|
arr.items = [arr[i] for i in indices]
|
|
|
|
del arr.file, arr.offsets
|
|
|
|
|
|
|
|
# Desubroutinize if asked for
|
|
|
|
if options.desubroutinize:
|
|
|
|
for g in font.charset:
|
2017-02-16 11:23:04 +09:00
|
|
|
c, _ = cs.getItemAndSelector(g)
|
2015-05-07 10:40:29 +02:00
|
|
|
c.decompile()
|
|
|
|
subrs = getattr(c.private, "Subrs", [])
|
|
|
|
decompiler = _DesubroutinizingT2Decompiler(subrs, c.globalSubrs)
|
|
|
|
decompiler.execute(c)
|
|
|
|
c.program = c._desubroutinized
|
|
|
|
|
|
|
|
# Drop hints if not needed
|
|
|
|
if not options.hinting:
|
|
|
|
|
|
|
|
# This can be tricky, but doesn't have to. What we do is:
|
|
|
|
#
|
|
|
|
# - Run all used glyph charstrings and recurse into subroutines,
|
|
|
|
# - For each charstring (including subroutines), if it has any
|
|
|
|
# of the hint stem operators, we mark it as such.
|
|
|
|
# Upon returning, for each charstring we note all the
|
|
|
|
# subroutine calls it makes that (recursively) contain a stem,
|
|
|
|
# - Dropping hinting then consists of the following two ops:
|
|
|
|
# * Drop the piece of the program in each charstring before the
|
|
|
|
# last call to a stem op or a stem-calling subroutine,
|
|
|
|
# * Drop all hintmask operations.
|
|
|
|
# - It's trickier... A hintmask right after hints and a few numbers
|
|
|
|
# will act as an implicit vstemhm. As such, we track whether
|
|
|
|
# we have seen any non-hint operators so far and do the right
|
|
|
|
# thing, recursively... Good luck understanding that :(
|
|
|
|
css = set()
|
|
|
|
for g in font.charset:
|
2017-02-16 11:23:04 +09:00
|
|
|
c, _ = cs.getItemAndSelector(g)
|
2015-05-07 10:40:29 +02:00
|
|
|
c.decompile()
|
|
|
|
subrs = getattr(c.private, "Subrs", [])
|
2017-01-26 12:09:37 +00:00
|
|
|
decompiler = _DehintingT2Decompiler(css, subrs, c.globalSubrs,
|
|
|
|
c.private.nominalWidthX,
|
|
|
|
c.private.defaultWidthX)
|
2015-05-07 10:40:29 +02:00
|
|
|
decompiler.execute(c)
|
2017-01-26 12:09:37 +00:00
|
|
|
c.width = decompiler.width
|
2015-05-07 10:40:29 +02:00
|
|
|
for charstring in css:
|
|
|
|
charstring.drop_hints()
|
|
|
|
del css
|
|
|
|
|
|
|
|
# Drop font-wide hinting values
|
|
|
|
all_privs = []
|
|
|
|
if hasattr(font, 'FDSelect'):
|
|
|
|
all_privs.extend(fd.Private for fd in font.FDArray)
|
|
|
|
else:
|
|
|
|
all_privs.append(font.Private)
|
|
|
|
for priv in all_privs:
|
|
|
|
for k in ['BlueValues', 'OtherBlues',
|
|
|
|
'FamilyBlues', 'FamilyOtherBlues',
|
|
|
|
'BlueScale', 'BlueShift', 'BlueFuzz',
|
|
|
|
'StemSnapH', 'StemSnapV', 'StdHW', 'StdVW']:
|
|
|
|
if hasattr(priv, k):
|
|
|
|
setattr(priv, k, None)
|
|
|
|
|
|
|
|
# Renumber subroutines to remove unused ones
|
|
|
|
|
|
|
|
# Mark all used subroutines
|
|
|
|
for g in font.charset:
|
2017-02-16 11:23:04 +09:00
|
|
|
c, _ = cs.getItemAndSelector(g)
|
2015-05-07 10:40:29 +02:00
|
|
|
subrs = getattr(c.private, "Subrs", [])
|
|
|
|
decompiler = _MarkingT2Decompiler(subrs, c.globalSubrs)
|
|
|
|
decompiler.execute(c)
|
|
|
|
|
|
|
|
all_subrs = [font.GlobalSubrs]
|
|
|
|
if hasattr(font, 'FDSelect'):
|
|
|
|
all_subrs.extend(fd.Private.Subrs for fd in font.FDArray if hasattr(fd.Private, 'Subrs') and fd.Private.Subrs)
|
|
|
|
elif hasattr(font.Private, 'Subrs') and font.Private.Subrs:
|
|
|
|
all_subrs.append(font.Private.Subrs)
|
|
|
|
|
|
|
|
subrs = set(subrs) # Remove duplicates
|
|
|
|
|
|
|
|
# Prepare
|
|
|
|
for subrs in all_subrs:
|
|
|
|
if not hasattr(subrs, '_used'):
|
|
|
|
subrs._used = set()
|
|
|
|
subrs._used = _uniq_sort(subrs._used)
|
|
|
|
subrs._old_bias = psCharStrings.calcSubrBias(subrs)
|
|
|
|
subrs._new_bias = psCharStrings.calcSubrBias(subrs._used)
|
|
|
|
|
|
|
|
# Renumber glyph charstrings
|
|
|
|
for g in font.charset:
|
2017-02-16 11:23:04 +09:00
|
|
|
c, _ = cs.getItemAndSelector(g)
|
2015-05-07 10:40:29 +02:00
|
|
|
subrs = getattr(c.private, "Subrs", [])
|
|
|
|
c.subset_subroutines (subrs, font.GlobalSubrs)
|
|
|
|
|
|
|
|
# Renumber subroutines themselves
|
|
|
|
for subrs in all_subrs:
|
|
|
|
if subrs == font.GlobalSubrs:
|
|
|
|
if not hasattr(font, 'FDSelect') and hasattr(font.Private, 'Subrs'):
|
|
|
|
local_subrs = font.Private.Subrs
|
|
|
|
else:
|
|
|
|
local_subrs = []
|
|
|
|
else:
|
|
|
|
local_subrs = subrs
|
2013-09-10 14:33:19 -04:00
|
|
|
|
2015-05-07 10:40:29 +02:00
|
|
|
subrs.items = [subrs.items[i] for i in subrs._used]
|
2016-05-19 20:50:10 +02:00
|
|
|
if hasattr(subrs, 'file'):
|
|
|
|
del subrs.file
|
2015-05-07 10:40:29 +02:00
|
|
|
if hasattr(subrs, 'offsets'):
|
|
|
|
del subrs.offsets
|
2013-09-10 14:33:19 -04:00
|
|
|
|
2015-05-07 10:40:29 +02:00
|
|
|
for subr in subrs.items:
|
|
|
|
subr.subset_subroutines (local_subrs, font.GlobalSubrs)
|
2013-09-10 14:33:19 -04:00
|
|
|
|
2015-05-07 10:40:29 +02:00
|
|
|
# Cleanup
|
|
|
|
for subrs in all_subrs:
|
|
|
|
del subrs._used, subrs._old_bias, subrs._new_bias
|
2013-08-14 18:18:51 -04:00
|
|
|
|
2015-05-07 10:40:29 +02:00
|
|
|
return True
|
2013-07-23 13:37:13 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(ttLib.getTableClass('cmap'))
|
2013-08-13 19:53:30 -04:00
|
|
|
def closure_glyphs(self, s):
|
2015-05-07 10:40:29 +02:00
|
|
|
tables = [t for t in self.tables if t.isUnicode()]
|
2015-06-12 13:50:56 -07:00
|
|
|
|
|
|
|
# Close glyphs
|
|
|
|
for table in tables:
|
|
|
|
if table.format == 14:
|
|
|
|
for cmap in table.uvsDict.values():
|
|
|
|
glyphs = {g for u,g in cmap if u in s.unicodes_requested}
|
|
|
|
if None in glyphs:
|
|
|
|
glyphs.remove(None)
|
|
|
|
s.glyphs.update(glyphs)
|
|
|
|
else:
|
|
|
|
cmap = table.cmap
|
|
|
|
intersection = s.unicodes_requested.intersection(cmap.keys())
|
|
|
|
s.glyphs.update(cmap[u] for u in intersection)
|
|
|
|
|
|
|
|
# Calculate unicodes_missing
|
|
|
|
s.unicodes_missing = s.unicodes_requested.copy()
|
|
|
|
for table in tables:
|
|
|
|
s.unicodes_missing.difference_update(table.cmap)
|
2013-07-31 19:47:37 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(ttLib.getTableClass('cmap'))
|
2016-03-31 16:23:24 +02:00
|
|
|
def prune_pre_subset(self, font, options):
|
2015-05-07 10:40:29 +02:00
|
|
|
if not options.legacy_cmap:
|
|
|
|
# Drop non-Unicode / non-Symbol cmaps
|
|
|
|
self.tables = [t for t in self.tables if t.isUnicode() or t.isSymbol()]
|
|
|
|
if not options.symbol_cmap:
|
|
|
|
self.tables = [t for t in self.tables if not t.isSymbol()]
|
|
|
|
# TODO(behdad) Only keep one subtable?
|
|
|
|
# For now, drop format=0 which can't be subset_glyphs easily?
|
|
|
|
self.tables = [t for t in self.tables if t.format != 0]
|
|
|
|
self.numSubTables = len(self.tables)
|
|
|
|
return True # Required table
|
2013-07-23 12:58:37 -04:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(ttLib.getTableClass('cmap'))
|
2013-08-13 19:53:30 -04:00
|
|
|
def subset_glyphs(self, s):
|
2015-06-12 15:18:43 -07:00
|
|
|
s.glyphs = None # We use s.glyphs_requested and s.unicodes_requested only
|
2015-05-07 10:40:29 +02:00
|
|
|
for t in self.tables:
|
|
|
|
if t.format == 14:
|
|
|
|
# TODO(behdad) We drop all the default-UVS mappings
|
2015-06-12 15:18:43 -07:00
|
|
|
# for glyphs_requested. So it's the caller's responsibility to make
|
|
|
|
# sure those are included.
|
2015-06-11 17:05:15 -07:00
|
|
|
t.uvsDict = {v:[(u,g) for u,g in l
|
2015-06-12 15:18:43 -07:00
|
|
|
if g in s.glyphs_requested or u in s.unicodes_requested]
|
2015-06-11 17:05:15 -07:00
|
|
|
for v,l in t.uvsDict.items()}
|
|
|
|
t.uvsDict = {v:l for v,l in t.uvsDict.items() if l}
|
2015-05-07 10:40:29 +02:00
|
|
|
elif t.isUnicode():
|
2015-06-11 17:05:15 -07:00
|
|
|
t.cmap = {u:g for u,g in t.cmap.items()
|
|
|
|
if g in s.glyphs_requested or u in s.unicodes_requested}
|
2015-05-07 10:40:29 +02:00
|
|
|
else:
|
2015-06-11 17:05:15 -07:00
|
|
|
t.cmap = {u:g for u,g in t.cmap.items()
|
|
|
|
if g in s.glyphs_requested}
|
2015-05-07 10:40:29 +02:00
|
|
|
self.tables = [t for t in self.tables
|
|
|
|
if (t.cmap if t.format != 14 else t.uvsDict)]
|
|
|
|
self.numSubTables = len(self.tables)
|
|
|
|
# TODO(behdad) Convert formats when needed.
|
|
|
|
# In particular, if we have a format=12 without non-BMP
|
|
|
|
# characters, either drop format=12 one or convert it
|
|
|
|
# to format=4 if there's not one.
|
|
|
|
return True # Required table
|
2013-07-23 11:03:49 -04:00
|
|
|
|
2014-12-09 19:00:17 -08:00
|
|
|
@_add_method(ttLib.getTableClass('DSIG'))
|
2016-03-31 16:23:24 +02:00
|
|
|
def prune_pre_subset(self, font, options):
|
2015-05-07 10:40:29 +02:00
|
|
|
# Drop all signatures since they will be invalid
|
|
|
|
self.usNumSigs = 0
|
|
|
|
self.signatureRecords = []
|
|
|
|
return True
|
2014-12-09 19:00:17 -08:00
|
|
|
|
2014-12-09 18:53:20 -08:00
|
|
|
@_add_method(ttLib.getTableClass('maxp'))
|
2016-03-31 16:23:24 +02:00
|
|
|
def prune_pre_subset(self, font, options):
|
2015-05-07 10:40:29 +02:00
|
|
|
if not options.hinting:
|
|
|
|
if self.tableVersion == 0x00010000:
|
|
|
|
self.maxZones = 1
|
|
|
|
self.maxTwilightPoints = 0
|
2016-05-16 17:05:46 +01:00
|
|
|
self.maxStorage = 0
|
2015-05-07 10:40:29 +02:00
|
|
|
self.maxFunctionDefs = 0
|
|
|
|
self.maxInstructionDefs = 0
|
|
|
|
self.maxStackElements = 0
|
|
|
|
self.maxSizeOfInstructions = 0
|
|
|
|
return True
|
2014-12-09 18:53:20 -08:00
|
|
|
|
2013-09-19 20:36:49 -04:00
|
|
|
@_add_method(ttLib.getTableClass('name'))
|
2016-03-31 16:23:24 +02:00
|
|
|
def prune_pre_subset(self, font, options):
|
|
|
|
nameIDs = set(options.name_IDs)
|
|
|
|
fvar = font.get('fvar')
|
|
|
|
if fvar:
|
2016-09-02 18:12:14 -07:00
|
|
|
nameIDs.update([axis.axisNameID for axis in fvar.axes])
|
|
|
|
nameIDs.update([inst.subfamilyNameID for inst in fvar.instances])
|
|
|
|
nameIDs.update([inst.postscriptNameID for inst in fvar.instances
|
|
|
|
if inst.postscriptNameID != 0xFFFF])
|
2015-05-07 10:40:29 +02:00
|
|
|
if '*' not in options.name_IDs:
|
2016-03-31 16:23:24 +02:00
|
|
|
self.names = [n for n in self.names if n.nameID in nameIDs]
|
2015-05-07 10:40:29 +02:00
|
|
|
if not options.name_legacy:
|
|
|
|
# TODO(behdad) Sometimes (eg Apple Color Emoji) there's only a macroman
|
|
|
|
# entry for Latin and no Unicode names.
|
|
|
|
self.names = [n for n in self.names if n.isUnicode()]
|
|
|
|
# TODO(behdad) Option to keep only one platform's
|
|
|
|
if '*' not in options.name_languages:
|
|
|
|
# TODO(behdad) This is Windows-platform specific!
|
|
|
|
self.names = [n for n in self.names
|
|
|
|
if n.langID in options.name_languages]
|
|
|
|
if options.obfuscate_names:
|
|
|
|
namerecs = []
|
|
|
|
for n in self.names:
|
|
|
|
if n.nameID in [1, 4]:
|
|
|
|
n.string = ".\x7f".encode('utf_16_be') if n.isUnicode() else ".\x7f"
|
|
|
|
elif n.nameID in [2, 6]:
|
|
|
|
n.string = "\x7f".encode('utf_16_be') if n.isUnicode() else "\x7f"
|
|
|
|
elif n.nameID == 3:
|
|
|
|
n.string = ""
|
|
|
|
elif n.nameID in [16, 17, 18]:
|
|
|
|
continue
|
|
|
|
namerecs.append(n)
|
|
|
|
self.names = namerecs
|
|
|
|
return True # Required table
|
2013-07-22 15:17:12 -04:00
|
|
|
|
2013-07-22 15:06:23 -04:00
|
|
|
|
2016-01-27 16:43:55 +00:00
|
|
|
# TODO(behdad) OS/2 ulCodePageRange?
|
2013-10-26 22:03:35 +02:00
|
|
|
# TODO(behdad) Drop AAT tables.
|
2013-08-13 20:16:16 -04:00
|
|
|
# TODO(behdad) Drop unneeded GSUB/GPOS Script/LangSys entries.
|
2013-08-29 18:19:22 -04:00
|
|
|
# TODO(behdad) Drop empty GSUB/GPOS, and GDEF if no GSUB/GPOS left
|
|
|
|
# TODO(behdad) Drop GDEF subitems if unused by lookups
|
2013-08-14 19:55:24 -04:00
|
|
|
# TODO(behdad) Avoid recursing too much (in GSUB/GPOS and in CFF)
|
2013-08-13 20:16:16 -04:00
|
|
|
# TODO(behdad) Text direction considerations.
|
|
|
|
# TODO(behdad) Text script / language considerations.
|
2013-11-26 22:53:04 -05:00
|
|
|
# TODO(behdad) Optionally drop 'kern' table if GPOS available
|
2013-12-15 22:02:20 -05:00
|
|
|
# TODO(behdad) Implement --unicode='*' to choose all cmap'ed
|
|
|
|
# TODO(behdad) Drop old-spec Indic scripts
|
|
|
|
|
2013-07-24 13:34:47 -04:00
|
|
|
|
2013-08-13 20:57:59 -04:00
|
|
|
class Options(object):
|
|
|
|
|
2015-05-07 10:40:29 +02:00
|
|
|
class OptionError(Exception): pass
|
|
|
|
class UnknownOptionError(OptionError): pass
|
|
|
|
|
2015-09-23 14:34:02 +01:00
|
|
|
# spaces in tag names (e.g. "SVG ", "cvt ") are stripped by the argument parser
|
2015-05-07 10:40:29 +02:00
|
|
|
_drop_tables_default = ['BASE', 'JSTF', 'DSIG', 'EBDT', 'EBLC',
|
2015-09-23 14:34:02 +01:00
|
|
|
'EBSC', 'SVG', 'PCLT', 'LTSH']
|
2015-05-07 10:40:29 +02:00
|
|
|
_drop_tables_default += ['Feat', 'Glat', 'Gloc', 'Silf', 'Sill'] # Graphite
|
2015-10-23 13:08:04 -07:00
|
|
|
_drop_tables_default += ['sbix'] # Color
|
2016-04-08 18:41:31 -07:00
|
|
|
_no_subset_tables_default = ['avar', 'fvar',
|
|
|
|
'gasp', 'head', 'hhea', 'maxp',
|
2015-09-23 14:34:02 +01:00
|
|
|
'vhea', 'OS/2', 'loca', 'name', 'cvt',
|
2017-04-14 15:27:52 -07:00
|
|
|
'fpgm', 'prep', 'VDMX', 'DSIG', 'CPAL',
|
|
|
|
'MVAR', 'STAT']
|
2016-03-31 16:23:24 +02:00
|
|
|
_hinting_tables_default = ['cvar', 'cvt', 'fpgm', 'prep', 'hdmx', 'VDMX']
|
2015-05-07 10:40:29 +02:00
|
|
|
|
|
|
|
# Based on HarfBuzz shapers
|
|
|
|
_layout_features_groups = {
|
|
|
|
# Default shaper
|
2017-08-07 21:48:11 -07:00
|
|
|
'common': ['rvrn', 'ccmp', 'liga', 'locl', 'mark', 'mkmk', 'rlig'],
|
|
|
|
'fractions': ['frac', 'numr', 'dnom'],
|
2015-05-07 10:40:29 +02:00
|
|
|
'horizontal': ['calt', 'clig', 'curs', 'kern', 'rclt'],
|
|
|
|
'vertical': ['valt', 'vert', 'vkrn', 'vpal', 'vrt2'],
|
|
|
|
'ltr': ['ltra', 'ltrm'],
|
|
|
|
'rtl': ['rtla', 'rtlm'],
|
|
|
|
# Complex shapers
|
|
|
|
'arabic': ['init', 'medi', 'fina', 'isol', 'med2', 'fin2', 'fin3',
|
2015-11-05 14:31:38 -08:00
|
|
|
'cswh', 'mset', 'stch'],
|
2015-05-07 10:40:29 +02:00
|
|
|
'hangul': ['ljmo', 'vjmo', 'tjmo'],
|
|
|
|
'tibetan': ['abvs', 'blws', 'abvm', 'blwm'],
|
|
|
|
'indic': ['nukt', 'akhn', 'rphf', 'rkrf', 'pref', 'blwf', 'half',
|
|
|
|
'abvf', 'pstf', 'cfar', 'vatu', 'cjct', 'init', 'pres',
|
|
|
|
'abvs', 'blws', 'psts', 'haln', 'dist', 'abvm', 'blwm'],
|
|
|
|
}
|
|
|
|
_layout_features_default = _uniq_sort(sum(
|
|
|
|
iter(_layout_features_groups.values()), []))
|
|
|
|
|
|
|
|
def __init__(self, **kwargs):
|
2015-12-07 11:14:04 +01:00
|
|
|
|
|
|
|
self.drop_tables = self._drop_tables_default[:]
|
|
|
|
self.no_subset_tables = self._no_subset_tables_default[:]
|
2016-09-26 19:13:04 +01:00
|
|
|
self.passthrough_tables = False # keep/drop tables we can't subset
|
2015-12-07 11:14:04 +01:00
|
|
|
self.hinting_tables = self._hinting_tables_default[:]
|
|
|
|
self.legacy_kern = False # drop 'kern' table if GPOS available
|
|
|
|
self.layout_features = self._layout_features_default[:]
|
|
|
|
self.ignore_missing_glyphs = False
|
|
|
|
self.ignore_missing_unicodes = True
|
|
|
|
self.hinting = True
|
|
|
|
self.glyph_names = False
|
|
|
|
self.legacy_cmap = False
|
|
|
|
self.symbol_cmap = False
|
|
|
|
self.name_IDs = [1, 2] # Family and Style
|
|
|
|
self.name_legacy = False
|
|
|
|
self.name_languages = [0x0409] # English
|
|
|
|
self.obfuscate_names = False # to make webfont unusable as a system font
|
|
|
|
self.notdef_glyph = True # gid0 for TrueType / .notdef for CFF
|
|
|
|
self.notdef_outline = False # No need for notdef to have an outline really
|
|
|
|
self.recommended_glyphs = False # gid1, gid2, gid3 for TrueType
|
|
|
|
self.recalc_bounds = False # Recalculate font bounding boxes
|
|
|
|
self.recalc_timestamp = False # Recalculate font modified timestamp
|
2016-01-27 16:43:55 +00:00
|
|
|
self.prune_unicode_ranges = True # Clear unused 'ulUnicodeRange' bits
|
2016-07-20 09:33:18 +01:00
|
|
|
self.recalc_average_width = False # update 'xAvgCharWidth'
|
2016-01-31 14:02:10 +00:00
|
|
|
self.canonical_order = None # Order tables as recommended
|
2015-12-07 11:14:04 +01:00
|
|
|
self.flavor = None # May be 'woff' or 'woff2'
|
2016-01-31 14:02:10 +00:00
|
|
|
self.with_zopfli = False # use zopfli instead of zlib for WOFF 1.0
|
2015-12-07 11:14:04 +01:00
|
|
|
self.desubroutinize = False # Desubroutinize CFF CharStrings
|
2016-01-24 16:13:31 +00:00
|
|
|
self.verbose = False
|
|
|
|
self.timing = False
|
|
|
|
self.xml = False
|
2015-12-07 11:14:04 +01:00
|
|
|
|
2015-05-07 10:40:29 +02:00
|
|
|
self.set(**kwargs)
|
|
|
|
|
|
|
|
def set(self, **kwargs):
|
|
|
|
for k,v in kwargs.items():
|
|
|
|
if not hasattr(self, k):
|
|
|
|
raise self.UnknownOptionError("Unknown option '%s'" % k)
|
|
|
|
setattr(self, k, v)
|
|
|
|
|
|
|
|
def parse_opts(self, argv, ignore_unknown=False):
|
2017-01-13 12:47:00 +00:00
|
|
|
posargs = []
|
|
|
|
passthru_options = []
|
2015-05-07 10:40:29 +02:00
|
|
|
for a in argv:
|
|
|
|
orig_a = a
|
|
|
|
if not a.startswith('--'):
|
2017-01-13 12:47:00 +00:00
|
|
|
posargs.append(a)
|
2015-05-07 10:40:29 +02:00
|
|
|
continue
|
|
|
|
a = a[2:]
|
|
|
|
i = a.find('=')
|
|
|
|
op = '='
|
|
|
|
if i == -1:
|
|
|
|
if a.startswith("no-"):
|
|
|
|
k = a[3:]
|
2016-01-31 14:02:10 +00:00
|
|
|
if k == "canonical-order":
|
|
|
|
# reorderTables=None is faster than False (the latter
|
|
|
|
# still reorders to "keep" the original table order)
|
|
|
|
v = None
|
|
|
|
else:
|
|
|
|
v = False
|
2015-05-07 10:40:29 +02:00
|
|
|
else:
|
|
|
|
k = a
|
|
|
|
v = True
|
|
|
|
if k.endswith("?"):
|
|
|
|
k = k[:-1]
|
|
|
|
v = '?'
|
|
|
|
else:
|
|
|
|
k = a[:i]
|
|
|
|
if k[-1] in "-+":
|
|
|
|
op = k[-1]+'=' # Op is '-=' or '+=' now.
|
|
|
|
k = k[:-1]
|
|
|
|
v = a[i+1:]
|
|
|
|
ok = k
|
|
|
|
k = k.replace('-', '_')
|
|
|
|
if not hasattr(self, k):
|
|
|
|
if ignore_unknown is True or ok in ignore_unknown:
|
2017-01-13 12:47:00 +00:00
|
|
|
passthru_options.append(orig_a)
|
2015-05-07 10:40:29 +02:00
|
|
|
continue
|
|
|
|
else:
|
|
|
|
raise self.UnknownOptionError("Unknown option '%s'" % a)
|
|
|
|
|
|
|
|
ov = getattr(self, k)
|
|
|
|
if v == '?':
|
|
|
|
print("Current setting for '%s' is: %s" % (ok, ov))
|
|
|
|
continue
|
|
|
|
if isinstance(ov, bool):
|
|
|
|
v = bool(v)
|
|
|
|
elif isinstance(ov, int):
|
|
|
|
v = int(v)
|
|
|
|
elif isinstance(ov, str):
|
|
|
|
v = str(v) # redundant
|
|
|
|
elif isinstance(ov, list):
|
|
|
|
if isinstance(v, bool):
|
|
|
|
raise self.OptionError("Option '%s' requires values to be specified using '='" % a)
|
|
|
|
vv = v.replace(',', ' ').split()
|
|
|
|
if vv == ['']:
|
|
|
|
vv = []
|
|
|
|
vv = [int(x, 0) if len(x) and x[0] in "0123456789" else x for x in vv]
|
|
|
|
if op == '=':
|
|
|
|
v = vv
|
|
|
|
elif op == '+=':
|
|
|
|
v = ov
|
|
|
|
v.extend(vv)
|
|
|
|
elif op == '-=':
|
|
|
|
v = ov
|
|
|
|
for x in vv:
|
|
|
|
if x in v:
|
|
|
|
v.remove(x)
|
|
|
|
else:
|
|
|
|
assert False
|
|
|
|
|
|
|
|
setattr(self, k, v)
|
|
|
|
|
2017-01-13 12:47:00 +00:00
|
|
|
return posargs + passthru_options
|
2013-08-13 19:50:38 -04:00
|
|
|
|
|
|
|
|
2013-08-13 20:57:59 -04:00
|
|
|
class Subsetter(object):
|
|
|
|
|
2015-05-07 10:40:29 +02:00
|
|
|
class SubsettingError(Exception): pass
|
|
|
|
class MissingGlyphsSubsettingError(SubsettingError): pass
|
|
|
|
class MissingUnicodesSubsettingError(SubsettingError): pass
|
|
|
|
|
2016-01-24 16:13:31 +00:00
|
|
|
def __init__(self, options=None):
|
2015-05-07 10:40:29 +02:00
|
|
|
|
|
|
|
if not options:
|
|
|
|
options = Options()
|
|
|
|
|
|
|
|
self.options = options
|
|
|
|
self.unicodes_requested = set()
|
|
|
|
self.glyph_names_requested = set()
|
|
|
|
self.glyph_ids_requested = set()
|
|
|
|
|
|
|
|
def populate(self, glyphs=[], gids=[], unicodes=[], text=""):
|
|
|
|
self.unicodes_requested.update(unicodes)
|
|
|
|
if isinstance(text, bytes):
|
|
|
|
text = text.decode("utf_8")
|
2016-11-29 15:49:05 +00:00
|
|
|
text_utf32 = text.encode("utf-32-be")
|
|
|
|
nchars = len(text_utf32)//4
|
|
|
|
for u in struct.unpack('>%dL' % nchars, text_utf32):
|
|
|
|
self.unicodes_requested.add(u)
|
2015-05-07 10:40:29 +02:00
|
|
|
self.glyph_names_requested.update(glyphs)
|
|
|
|
self.glyph_ids_requested.update(gids)
|
|
|
|
|
|
|
|
def _prune_pre_subset(self, font):
|
2016-03-31 16:23:24 +02:00
|
|
|
for tag in self._sort_tables(font):
|
2015-09-23 14:34:02 +01:00
|
|
|
if(tag.strip() in self.options.drop_tables or
|
|
|
|
(tag.strip() in self.options.hinting_tables and not self.options.hinting) or
|
2015-05-07 10:40:29 +02:00
|
|
|
(tag == 'kern' and (not self.options.legacy_kern and 'GPOS' in font))):
|
2016-01-24 16:13:31 +00:00
|
|
|
log.info("%s dropped", tag)
|
2015-05-07 10:40:29 +02:00
|
|
|
del font[tag]
|
|
|
|
continue
|
|
|
|
|
|
|
|
clazz = ttLib.getTableClass(tag)
|
|
|
|
|
|
|
|
if hasattr(clazz, 'prune_pre_subset'):
|
2016-01-24 16:13:31 +00:00
|
|
|
with timer("load '%s'" % tag):
|
|
|
|
table = font[tag]
|
|
|
|
with timer("prune '%s'" % tag):
|
2016-03-31 16:23:24 +02:00
|
|
|
retain = table.prune_pre_subset(font, self.options)
|
2015-05-07 10:40:29 +02:00
|
|
|
if not retain:
|
2016-01-24 16:13:31 +00:00
|
|
|
log.info("%s pruned to empty; dropped", tag)
|
2015-05-07 10:40:29 +02:00
|
|
|
del font[tag]
|
|
|
|
continue
|
|
|
|
else:
|
2016-01-24 16:13:31 +00:00
|
|
|
log.info("%s pruned", tag)
|
2015-05-07 10:40:29 +02:00
|
|
|
|
|
|
|
def _closure_glyphs(self, font):
|
|
|
|
|
|
|
|
realGlyphs = set(font.getGlyphOrder())
|
|
|
|
glyph_order = font.getGlyphOrder()
|
|
|
|
|
|
|
|
self.glyphs_requested = set()
|
|
|
|
self.glyphs_requested.update(self.glyph_names_requested)
|
|
|
|
self.glyphs_requested.update(glyph_order[i]
|
|
|
|
for i in self.glyph_ids_requested
|
|
|
|
if i < len(glyph_order))
|
|
|
|
|
|
|
|
self.glyphs_missing = set()
|
|
|
|
self.glyphs_missing.update(self.glyphs_requested.difference(realGlyphs))
|
|
|
|
self.glyphs_missing.update(i for i in self.glyph_ids_requested
|
|
|
|
if i >= len(glyph_order))
|
|
|
|
if self.glyphs_missing:
|
2016-01-24 16:13:31 +00:00
|
|
|
log.info("Missing requested glyphs: %s", self.glyphs_missing)
|
2015-05-07 10:40:29 +02:00
|
|
|
if not self.options.ignore_missing_glyphs:
|
|
|
|
raise self.MissingGlyphsSubsettingError(self.glyphs_missing)
|
|
|
|
|
|
|
|
self.glyphs = self.glyphs_requested.copy()
|
|
|
|
|
|
|
|
self.unicodes_missing = set()
|
|
|
|
if 'cmap' in font:
|
2016-01-24 16:13:31 +00:00
|
|
|
with timer("close glyph list over 'cmap'"):
|
|
|
|
font['cmap'].closure_glyphs(self)
|
|
|
|
self.glyphs.intersection_update(realGlyphs)
|
2015-06-12 15:11:36 -07:00
|
|
|
self.glyphs_cmaped = frozenset(self.glyphs)
|
2015-05-07 10:40:29 +02:00
|
|
|
if self.unicodes_missing:
|
|
|
|
missing = ["U+%04X" % u for u in self.unicodes_missing]
|
2016-01-24 16:13:31 +00:00
|
|
|
log.info("Missing glyphs for requested Unicodes: %s", missing)
|
2015-05-07 10:40:29 +02:00
|
|
|
if not self.options.ignore_missing_unicodes:
|
|
|
|
raise self.MissingUnicodesSubsettingError(missing)
|
|
|
|
del missing
|
|
|
|
|
|
|
|
if self.options.notdef_glyph:
|
|
|
|
if 'glyf' in font:
|
|
|
|
self.glyphs.add(font.getGlyphName(0))
|
2016-01-24 16:13:31 +00:00
|
|
|
log.info("Added gid0 to subset")
|
2015-05-07 10:40:29 +02:00
|
|
|
else:
|
|
|
|
self.glyphs.add('.notdef')
|
2016-01-24 16:13:31 +00:00
|
|
|
log.info("Added .notdef to subset")
|
2015-05-07 10:40:29 +02:00
|
|
|
if self.options.recommended_glyphs:
|
|
|
|
if 'glyf' in font:
|
|
|
|
for i in range(min(4, len(font.getGlyphOrder()))):
|
|
|
|
self.glyphs.add(font.getGlyphName(i))
|
2016-01-24 16:13:31 +00:00
|
|
|
log.info("Added first four glyphs to subset")
|
2015-05-07 10:40:29 +02:00
|
|
|
|
|
|
|
if 'GSUB' in font:
|
2016-01-24 16:13:31 +00:00
|
|
|
with timer("close glyph list over 'GSUB'"):
|
|
|
|
log.info("Closing glyph list over 'GSUB': %d glyphs before",
|
|
|
|
len(self.glyphs))
|
|
|
|
log.glyphs(self.glyphs, font=font)
|
|
|
|
font['GSUB'].closure_glyphs(self)
|
|
|
|
self.glyphs.intersection_update(realGlyphs)
|
|
|
|
log.info("Closed glyph list over 'GSUB': %d glyphs after",
|
|
|
|
len(self.glyphs))
|
|
|
|
log.glyphs(self.glyphs, font=font)
|
2015-06-12 15:11:36 -07:00
|
|
|
self.glyphs_gsubed = frozenset(self.glyphs)
|
2015-05-07 10:40:29 +02:00
|
|
|
|
2016-01-23 06:58:08 +04:00
|
|
|
if 'MATH' in font:
|
2016-01-24 16:13:31 +00:00
|
|
|
with timer("close glyph list over 'MATH'"):
|
|
|
|
log.info("Closing glyph list over 'MATH': %d glyphs before",
|
|
|
|
len(self.glyphs))
|
|
|
|
log.glyphs(self.glyphs, font=font)
|
|
|
|
font['MATH'].closure_glyphs(self)
|
|
|
|
self.glyphs.intersection_update(realGlyphs)
|
|
|
|
log.info("Closed glyph list over 'MATH': %d glyphs after",
|
|
|
|
len(self.glyphs))
|
|
|
|
log.glyphs(self.glyphs, font=font)
|
2016-01-23 06:58:08 +04:00
|
|
|
self.glyphs_mathed = frozenset(self.glyphs)
|
|
|
|
|
2015-11-07 14:09:00 +04:00
|
|
|
if 'COLR' in font:
|
2016-01-24 16:13:31 +00:00
|
|
|
with timer("close glyph list over 'COLR'"):
|
|
|
|
log.info("Closing glyph list over 'COLR': %d glyphs before",
|
|
|
|
len(self.glyphs))
|
|
|
|
log.glyphs(self.glyphs, font=font)
|
|
|
|
font['COLR'].closure_glyphs(self)
|
|
|
|
self.glyphs.intersection_update(realGlyphs)
|
|
|
|
log.info("Closed glyph list over 'COLR': %d glyphs after",
|
|
|
|
len(self.glyphs))
|
|
|
|
log.glyphs(self.glyphs, font=font)
|
2015-11-07 14:09:00 +04:00
|
|
|
self.glyphs_colred = frozenset(self.glyphs)
|
|
|
|
|
2015-05-07 10:40:29 +02:00
|
|
|
if 'glyf' in font:
|
2016-01-24 16:13:31 +00:00
|
|
|
with timer("close glyph list over 'glyf'"):
|
|
|
|
log.info("Closing glyph list over 'glyf': %d glyphs before",
|
|
|
|
len(self.glyphs))
|
|
|
|
log.glyphs(self.glyphs, font=font)
|
|
|
|
font['glyf'].closure_glyphs(self)
|
|
|
|
self.glyphs.intersection_update(realGlyphs)
|
|
|
|
log.info("Closed glyph list over 'glyf': %d glyphs after",
|
|
|
|
len(self.glyphs))
|
|
|
|
log.glyphs(self.glyphs, font=font)
|
2015-06-12 15:11:36 -07:00
|
|
|
self.glyphs_glyfed = frozenset(self.glyphs)
|
2015-05-07 10:40:29 +02:00
|
|
|
|
2015-06-12 15:11:36 -07:00
|
|
|
self.glyphs_all = frozenset(self.glyphs)
|
2015-05-07 10:40:29 +02:00
|
|
|
|
2016-01-24 16:13:31 +00:00
|
|
|
log.info("Retaining %d glyphs", len(self.glyphs_all))
|
2013-08-13 19:50:38 -04:00
|
|
|
|
2015-05-07 10:40:29 +02:00
|
|
|
del self.glyphs
|
2013-08-13 19:50:38 -04:00
|
|
|
|
2015-05-07 10:40:29 +02:00
|
|
|
def _subset_glyphs(self, font):
|
2016-03-31 16:23:24 +02:00
|
|
|
for tag in self._sort_tables(font):
|
2015-05-07 10:40:29 +02:00
|
|
|
clazz = ttLib.getTableClass(tag)
|
|
|
|
|
2015-09-23 14:34:02 +01:00
|
|
|
if tag.strip() in self.options.no_subset_tables:
|
2016-01-24 16:13:31 +00:00
|
|
|
log.info("%s subsetting not needed", tag)
|
2015-05-07 10:40:29 +02:00
|
|
|
elif hasattr(clazz, 'subset_glyphs'):
|
2016-01-24 16:13:31 +00:00
|
|
|
with timer("subset '%s'" % tag):
|
|
|
|
table = font[tag]
|
|
|
|
self.glyphs = self.glyphs_all
|
|
|
|
retain = table.subset_glyphs(self)
|
|
|
|
del self.glyphs
|
2015-05-07 10:40:29 +02:00
|
|
|
if not retain:
|
2016-01-24 16:13:31 +00:00
|
|
|
log.info("%s subsetted to empty; dropped", tag)
|
2015-05-07 10:40:29 +02:00
|
|
|
del font[tag]
|
|
|
|
else:
|
2016-01-24 16:13:31 +00:00
|
|
|
log.info("%s subsetted", tag)
|
2016-09-26 19:13:04 +01:00
|
|
|
elif self.options.passthrough_tables:
|
|
|
|
log.info("%s NOT subset; don't know how to subset", tag)
|
2015-05-07 10:40:29 +02:00
|
|
|
else:
|
2016-01-24 16:13:31 +00:00
|
|
|
log.info("%s NOT subset; don't know how to subset; dropped", tag)
|
2015-05-07 10:40:29 +02:00
|
|
|
del font[tag]
|
|
|
|
|
2016-01-24 16:13:31 +00:00
|
|
|
with timer("subset GlyphOrder"):
|
|
|
|
glyphOrder = font.getGlyphOrder()
|
|
|
|
glyphOrder = [g for g in glyphOrder if g in self.glyphs_all]
|
|
|
|
font.setGlyphOrder(glyphOrder)
|
|
|
|
font._buildReverseGlyphOrderDict()
|
2015-05-07 10:40:29 +02:00
|
|
|
|
|
|
|
def _prune_post_subset(self, font):
|
|
|
|
for tag in font.keys():
|
|
|
|
if tag == 'GlyphOrder': continue
|
2016-01-27 16:43:55 +00:00
|
|
|
if tag == 'OS/2' and self.options.prune_unicode_ranges:
|
|
|
|
old_uniranges = font[tag].getUnicodeRanges()
|
|
|
|
new_uniranges = font[tag].recalcUnicodeRanges(font, pruneOnly=True)
|
|
|
|
if old_uniranges != new_uniranges:
|
2016-01-24 16:13:31 +00:00
|
|
|
log.info("%s Unicode ranges pruned: %s", tag, sorted(new_uniranges))
|
2016-07-19 22:54:05 +01:00
|
|
|
if self.options.recalc_average_width:
|
|
|
|
widths = [m[0] for m in font["hmtx"].metrics.values() if m[0] > 0]
|
|
|
|
avg_width = int(round(sum(widths) / len(widths)))
|
|
|
|
if avg_width != font[tag].xAvgCharWidth:
|
|
|
|
font[tag].xAvgCharWidth = avg_width
|
|
|
|
log.info("%s xAvgCharWidth updated: %d", tag, avg_width)
|
2015-05-07 10:40:29 +02:00
|
|
|
clazz = ttLib.getTableClass(tag)
|
|
|
|
if hasattr(clazz, 'prune_post_subset'):
|
2016-01-24 16:13:31 +00:00
|
|
|
with timer("prune '%s'" % tag):
|
|
|
|
table = font[tag]
|
|
|
|
retain = table.prune_post_subset(self.options)
|
2015-05-07 10:40:29 +02:00
|
|
|
if not retain:
|
2016-01-24 16:13:31 +00:00
|
|
|
log.info("%s pruned to empty; dropped", tag)
|
2015-05-07 10:40:29 +02:00
|
|
|
del font[tag]
|
|
|
|
else:
|
2016-01-24 16:13:31 +00:00
|
|
|
log.info("%s pruned", tag)
|
2015-05-07 10:40:29 +02:00
|
|
|
|
2016-03-31 16:23:24 +02:00
|
|
|
def _sort_tables(self, font):
|
|
|
|
tagOrder = ['fvar', 'avar', 'gvar', 'name', 'glyf']
|
|
|
|
tagOrder = {t: i + 1 for i, t in enumerate(tagOrder)}
|
|
|
|
tags = sorted(font.keys(), key=lambda tag: tagOrder.get(tag, 0))
|
|
|
|
return [t for t in tags if t != 'GlyphOrder']
|
2015-05-07 10:40:29 +02:00
|
|
|
|
2016-03-31 16:23:24 +02:00
|
|
|
def subset(self, font):
|
2015-05-07 10:40:29 +02:00
|
|
|
self._prune_pre_subset(font)
|
|
|
|
self._closure_glyphs(font)
|
|
|
|
self._subset_glyphs(font)
|
|
|
|
self._prune_post_subset(font)
|
2013-07-31 20:16:24 -04:00
|
|
|
|
2013-08-01 12:05:26 -04:00
|
|
|
|
2016-01-24 16:13:31 +00:00
|
|
|
@timer("load font")
|
2013-08-15 12:17:21 -04:00
|
|
|
def load_font(fontFile,
|
2013-08-15 18:29:25 -04:00
|
|
|
options,
|
2013-12-04 21:34:05 -05:00
|
|
|
allowVID=False,
|
2013-08-15 12:17:21 -04:00
|
|
|
checkChecksums=False,
|
2013-12-16 00:50:48 -05:00
|
|
|
dontLoadGlyphNames=False,
|
|
|
|
lazy=True):
|
2013-07-21 18:40:59 -04:00
|
|
|
|
2015-05-07 10:40:29 +02:00
|
|
|
font = ttLib.TTFont(fontFile,
|
|
|
|
allowVID=allowVID,
|
|
|
|
checkChecksums=checkChecksums,
|
|
|
|
recalcBBoxes=options.recalc_bounds,
|
|
|
|
recalcTimestamp=options.recalc_timestamp,
|
|
|
|
lazy=lazy)
|
|
|
|
|
|
|
|
# Hack:
|
|
|
|
#
|
|
|
|
# If we don't need glyph names, change 'post' class to not try to
|
|
|
|
# load them. It avoid lots of headache with broken fonts as well
|
|
|
|
# as loading time.
|
|
|
|
#
|
|
|
|
# Ideally ttLib should provide a way to ask it to skip loading
|
|
|
|
# glyph names. But it currently doesn't provide such a thing.
|
|
|
|
#
|
|
|
|
if dontLoadGlyphNames:
|
|
|
|
post = ttLib.getTableClass('post')
|
|
|
|
saved = post.decode_format_2_0
|
|
|
|
post.decode_format_2_0 = post.decode_format_3_0
|
|
|
|
f = font['post']
|
|
|
|
if f.formatType == 2.0:
|
|
|
|
f.formatType = 3.0
|
|
|
|
post.decode_format_2_0 = saved
|
|
|
|
|
|
|
|
return font
|
2013-08-13 12:20:59 -04:00
|
|
|
|
2016-01-24 16:13:31 +00:00
|
|
|
@timer("compile and save font")
|
2013-08-16 12:42:34 -04:00
|
|
|
def save_font(font, outfile, options):
|
2015-05-07 10:40:29 +02:00
|
|
|
if options.flavor and not hasattr(font, 'flavor'):
|
|
|
|
raise Exception("fonttools version does not support flavors.")
|
2016-01-31 14:02:10 +00:00
|
|
|
if options.with_zopfli and options.flavor == "woff":
|
|
|
|
from fontTools.ttLib import sfnt
|
|
|
|
sfnt.USE_ZOPFLI = True
|
2015-05-07 10:40:29 +02:00
|
|
|
font.flavor = options.flavor
|
|
|
|
font.save(outfile, reorderTables=options.canonical_order)
|
2013-08-15 12:09:55 -04:00
|
|
|
|
2014-08-13 19:30:03 -04:00
|
|
|
def parse_unicodes(s):
|
2015-05-07 10:40:29 +02:00
|
|
|
import re
|
|
|
|
s = re.sub (r"0[xX]", " ", s)
|
|
|
|
s = re.sub (r"[<+>,;&#\\xXuU\n ]", " ", s)
|
|
|
|
l = []
|
|
|
|
for item in s.split():
|
|
|
|
fields = item.split('-')
|
|
|
|
if len(fields) == 1:
|
|
|
|
l.append(int(item, 16))
|
|
|
|
else:
|
|
|
|
start,end = fields
|
|
|
|
l.extend(range(int(start, 16), int(end, 16)+1))
|
|
|
|
return l
|
2014-08-13 19:30:03 -04:00
|
|
|
|
2014-08-14 11:37:35 -04:00
|
|
|
def parse_gids(s):
|
2015-05-07 10:40:29 +02:00
|
|
|
l = []
|
|
|
|
for item in s.replace(',', ' ').split():
|
|
|
|
fields = item.split('-')
|
|
|
|
if len(fields) == 1:
|
|
|
|
l.append(int(fields[0]))
|
|
|
|
else:
|
|
|
|
l.extend(range(int(fields[0]), int(fields[1])+1))
|
|
|
|
return l
|
2014-08-14 11:37:35 -04:00
|
|
|
|
|
|
|
def parse_glyphs(s):
|
2015-05-07 10:40:29 +02:00
|
|
|
return s.replace(',', ' ').split()
|
2014-08-14 11:37:35 -04:00
|
|
|
|
2016-02-04 12:17:01 +00:00
|
|
|
def usage():
|
|
|
|
print("usage:", __usage__, file=sys.stderr)
|
|
|
|
print("Try pyftsubset --help for more information.\n", file=sys.stderr)
|
|
|
|
|
2016-01-24 16:13:31 +00:00
|
|
|
@timer("make one with everything (TOTAL TIME)")
|
2015-05-20 11:02:43 +01:00
|
|
|
def main(args=None):
|
2017-01-13 12:50:46 +00:00
|
|
|
from os.path import splitext
|
2016-01-24 16:13:31 +00:00
|
|
|
from fontTools import configLogger
|
2015-05-20 11:02:43 +01:00
|
|
|
|
|
|
|
if args is None:
|
|
|
|
args = sys.argv[1:]
|
2013-08-13 12:20:59 -04:00
|
|
|
|
2015-05-07 10:40:29 +02:00
|
|
|
if '--help' in args:
|
|
|
|
print(__doc__)
|
2017-01-11 12:10:58 +00:00
|
|
|
return 0
|
2015-05-07 10:40:29 +02:00
|
|
|
|
|
|
|
options = Options()
|
2016-02-04 12:17:01 +00:00
|
|
|
try:
|
|
|
|
args = options.parse_opts(args,
|
|
|
|
ignore_unknown=['gids', 'gids-file',
|
|
|
|
'glyphs', 'glyphs-file',
|
|
|
|
'text', 'text-file',
|
|
|
|
'unicodes', 'unicodes-file',
|
|
|
|
'output-file'])
|
|
|
|
except options.OptionError as e:
|
|
|
|
usage()
|
|
|
|
print("ERROR:", e, file=sys.stderr)
|
2017-01-11 12:10:58 +00:00
|
|
|
return 2
|
2015-05-07 10:40:29 +02:00
|
|
|
|
|
|
|
if len(args) < 2:
|
2016-02-04 12:17:01 +00:00
|
|
|
usage()
|
2017-01-11 12:10:58 +00:00
|
|
|
return 1
|
2015-05-07 10:40:29 +02:00
|
|
|
|
2016-01-24 16:13:31 +00:00
|
|
|
configLogger(level=logging.INFO if options.verbose else logging.WARNING)
|
|
|
|
if options.timing:
|
|
|
|
timer.logger.setLevel(logging.DEBUG)
|
|
|
|
else:
|
|
|
|
timer.logger.disabled = True
|
|
|
|
|
2015-05-07 10:40:29 +02:00
|
|
|
fontfile = args[0]
|
|
|
|
args = args[1:]
|
|
|
|
|
2016-01-24 16:13:31 +00:00
|
|
|
subsetter = Subsetter(options=options)
|
2017-01-13 12:50:46 +00:00
|
|
|
basename, extension = splitext(fontfile)
|
|
|
|
outfile = basename + '.subset' + extension
|
2015-05-07 10:40:29 +02:00
|
|
|
glyphs = []
|
|
|
|
gids = []
|
|
|
|
unicodes = []
|
|
|
|
wildcard_glyphs = False
|
|
|
|
wildcard_unicodes = False
|
|
|
|
text = ""
|
|
|
|
for g in args:
|
|
|
|
if g == '*':
|
|
|
|
wildcard_glyphs = True
|
|
|
|
continue
|
|
|
|
if g.startswith('--output-file='):
|
|
|
|
outfile = g[14:]
|
|
|
|
continue
|
|
|
|
if g.startswith('--text='):
|
|
|
|
text += g[7:]
|
|
|
|
continue
|
|
|
|
if g.startswith('--text-file='):
|
2016-08-29 20:43:33 +08:00
|
|
|
text += open(g[12:], encoding='utf-8').read().replace('\n', '')
|
2015-05-07 10:40:29 +02:00
|
|
|
continue
|
|
|
|
if g.startswith('--unicodes='):
|
|
|
|
if g[11:] == '*':
|
|
|
|
wildcard_unicodes = True
|
|
|
|
else:
|
|
|
|
unicodes.extend(parse_unicodes(g[11:]))
|
|
|
|
continue
|
|
|
|
if g.startswith('--unicodes-file='):
|
|
|
|
for line in open(g[16:]).readlines():
|
|
|
|
unicodes.extend(parse_unicodes(line.split('#')[0]))
|
|
|
|
continue
|
|
|
|
if g.startswith('--gids='):
|
|
|
|
gids.extend(parse_gids(g[7:]))
|
|
|
|
continue
|
|
|
|
if g.startswith('--gids-file='):
|
|
|
|
for line in open(g[12:]).readlines():
|
|
|
|
gids.extend(parse_gids(line.split('#')[0]))
|
|
|
|
continue
|
|
|
|
if g.startswith('--glyphs='):
|
|
|
|
if g[9:] == '*':
|
|
|
|
wildcard_glyphs = True
|
|
|
|
else:
|
|
|
|
glyphs.extend(parse_glyphs(g[9:]))
|
|
|
|
continue
|
|
|
|
if g.startswith('--glyphs-file='):
|
|
|
|
for line in open(g[14:]).readlines():
|
|
|
|
glyphs.extend(parse_glyphs(line.split('#')[0]))
|
|
|
|
continue
|
|
|
|
glyphs.append(g)
|
|
|
|
|
|
|
|
dontLoadGlyphNames = not options.glyph_names and not glyphs
|
|
|
|
font = load_font(fontfile, options, dontLoadGlyphNames=dontLoadGlyphNames)
|
2016-01-24 16:13:31 +00:00
|
|
|
|
|
|
|
with timer("compile glyph list"):
|
|
|
|
if wildcard_glyphs:
|
2015-05-07 10:40:29 +02:00
|
|
|
glyphs.extend(font.getGlyphOrder())
|
2016-01-24 16:13:31 +00:00
|
|
|
if wildcard_unicodes:
|
2015-05-07 10:40:29 +02:00
|
|
|
for t in font['cmap'].tables:
|
|
|
|
if t.isUnicode():
|
|
|
|
unicodes.extend(t.cmap.keys())
|
2016-01-24 16:13:31 +00:00
|
|
|
assert '' not in glyphs
|
2015-05-07 10:40:29 +02:00
|
|
|
|
2016-01-24 16:13:31 +00:00
|
|
|
log.info("Text: '%s'" % text)
|
|
|
|
log.info("Unicodes: %s", unicodes)
|
|
|
|
log.info("Glyphs: %s", glyphs)
|
|
|
|
log.info("Gids: %s", gids)
|
2015-05-07 10:40:29 +02:00
|
|
|
|
|
|
|
subsetter.populate(glyphs=glyphs, gids=gids, unicodes=unicodes, text=text)
|
|
|
|
subsetter.subset(font)
|
|
|
|
|
2016-01-24 16:13:31 +00:00
|
|
|
save_font(font, outfile, options)
|
2015-05-07 10:40:29 +02:00
|
|
|
|
2016-01-24 16:13:31 +00:00
|
|
|
if options.verbose:
|
2015-05-07 10:40:29 +02:00
|
|
|
import os
|
2016-01-24 16:13:31 +00:00
|
|
|
log.info("Input font:% 7d bytes: %s" % (os.path.getsize(fontfile), fontfile))
|
|
|
|
log.info("Subset font:% 7d bytes: %s" % (os.path.getsize(outfile), outfile))
|
2015-05-07 10:40:29 +02:00
|
|
|
|
2016-01-24 16:13:31 +00:00
|
|
|
if options.xml:
|
|
|
|
font.saveXML(sys.stdout)
|
2015-05-07 10:40:29 +02:00
|
|
|
|
|
|
|
font.close()
|
2013-08-13 20:13:33 -04:00
|
|
|
|
2013-08-22 18:10:17 -04:00
|
|
|
|
|
|
|
__all__ = [
|
2015-05-07 10:40:29 +02:00
|
|
|
'Options',
|
|
|
|
'Subsetter',
|
|
|
|
'load_font',
|
|
|
|
'save_font',
|
|
|
|
'parse_gids',
|
|
|
|
'parse_glyphs',
|
|
|
|
'parse_unicodes',
|
|
|
|
'main'
|
2013-08-22 18:10:17 -04:00
|
|
|
]
|
|
|
|
|
2013-07-24 13:34:47 -04:00
|
|
|
if __name__ == '__main__':
|
2017-01-11 12:10:58 +00:00
|
|
|
sys.exit(main())
|