Merge pull request #334 from brawer/feaLib

Parser for OpenType feature files
This commit is contained in:
Sascha Brawer 2015-08-17 20:13:53 +02:00
commit 6c623928be
17 changed files with 1439 additions and 0 deletions

View File

@ -0,0 +1,4 @@
"""fontTools.feaLib -- a package for dealing with OpenType feature files."""
# The structure of OpenType feature files is defined here:
# http://www.adobe.com/devnet/opentype/afdko/topic_feature_file_syntax.html

View File

@ -0,0 +1,98 @@
from __future__ import print_function, division, absolute_import
from __future__ import unicode_literals
class FeatureFile(object):
def __init__(self):
self.statements = []
class FeatureBlock(object):
def __init__(self, location, name, use_extension):
self.location = location
self.name, self.use_extension = name, use_extension
self.statements = []
class LookupBlock(object):
def __init__(self, location, name, use_extension):
self.location = location
self.name, self.use_extension = name, use_extension
self.statements = []
class GlyphClassDefinition(object):
def __init__(self, location, name, glyphs):
self.location = location
self.name = name
self.glyphs = glyphs
class AlternateSubstitution(object):
def __init__(self, location, glyph, from_class):
self.location = location
self.glyph, self.from_class = (glyph, from_class)
class AnchorDefinition(object):
def __init__(self, location, name, x, y, contourpoint):
self.location = location
self.name, self.x, self.y, self.contourpoint = name, x, y, contourpoint
class LanguageStatement(object):
def __init__(self, location, language, include_default, required):
self.location = location
self.language = language
self.include_default = include_default
self.required = required
class LanguageSystemStatement(object):
def __init__(self, location, script, language):
self.location = location
self.script, self.language = (script, language)
class IgnoreSubstitutionRule(object):
def __init__(self, location, prefix, glyphs, suffix):
self.location = location
self.prefix, self.glyphs, self.suffix = (prefix, glyphs, suffix)
class LookupReferenceStatement(object):
def __init__(self, location, lookup):
self.location, self.lookup = (location, lookup)
class ScriptStatement(object):
def __init__(self, location, script):
self.location = location
self.script = script
class SubtableStatement(object):
def __init__(self, location):
self.location = location
class SubstitutionRule(object):
def __init__(self, location, old, new):
self.location, self.old, self.new = (location, old, new)
self.old_prefix = []
self.old_suffix = []
self.lookups = [None] * len(old)
class ValueRecord(object):
def __init__(self, location, xPlacement, yPlacement, xAdvance, yAdvance):
self.location = location
self.xPlacement, self.yPlacement = (xPlacement, yPlacement)
self.xAdvance, self.yAdvance = (xAdvance, yAdvance)
class ValueRecordDefinition(object):
def __init__(self, location, name, value):
self.location = location
self.name = name
self.value = value

View File

@ -0,0 +1,203 @@
from __future__ import print_function, division, absolute_import
from __future__ import unicode_literals
import codecs
import os
class LexerError(Exception):
def __init__(self, message, location):
Exception.__init__(self, message)
self.location = location
def __str__(self):
message = Exception.__str__(self)
if self.location:
path, line, column = self.location
return "%s:%d:%d: %s" % (path, line, column, message)
else:
return message
class Lexer(object):
NUMBER = "NUMBER"
STRING = "STRING"
NAME = "NAME"
FILENAME = "FILENAME"
GLYPHCLASS = "GLYPHCLASS"
CID = "CID"
SYMBOL = "SYMBOL"
COMMENT = "COMMENT"
NEWLINE = "NEWLINE"
CHAR_WHITESPACE_ = " \t"
CHAR_NEWLINE_ = "\r\n"
CHAR_SYMBOL_ = ";:-+'{}[]<>()="
CHAR_DIGIT_ = "0123456789"
CHAR_HEXDIGIT_ = "0123456789ABCDEFabcdef"
CHAR_LETTER_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
CHAR_NAME_START_ = CHAR_LETTER_ + "_.\\"
CHAR_NAME_CONTINUATION_ = CHAR_LETTER_ + CHAR_DIGIT_ + "_."
MODE_NORMAL_ = "NORMAL"
MODE_FILENAME_ = "FILENAME"
def __init__(self, text, filename):
self.filename_ = filename
self.line_ = 1
self.pos_ = 0
self.line_start_ = 0
self.text_ = text
self.text_length_ = len(text)
self.mode_ = Lexer.MODE_NORMAL_
def __iter__(self):
return self
def next(self): # Python 2
return self.__next__()
def __next__(self): # Python 3
while True:
token_type, token, location = self.next_()
if token_type not in {Lexer.COMMENT, Lexer.NEWLINE}:
return (token_type, token, location)
def next_(self):
self.scan_over_(Lexer.CHAR_WHITESPACE_)
column = self.pos_ - self.line_start_ + 1
location = (self.filename_, self.line_, column)
start = self.pos_
text = self.text_
limit = len(text)
if start >= limit:
raise StopIteration()
cur_char = text[start]
next_char = text[start + 1] if start + 1 < limit else None
if cur_char == "\n":
self.pos_ += 1
self.line_ += 1
self.line_start_ = self.pos_
return (Lexer.NEWLINE, None, location)
if cur_char == "\r":
self.pos_ += (2 if next_char == "\n" else 1)
self.line_ += 1
self.line_start_ = self.pos_
return (Lexer.NEWLINE, None, location)
if cur_char == "#":
self.scan_until_(Lexer.CHAR_NEWLINE_)
return (Lexer.COMMENT, text[start:self.pos_], location)
if self.mode_ is Lexer.MODE_FILENAME_:
if cur_char != "(":
raise LexerError("Expected '(' before file name", location)
self.scan_until_(")")
cur_char = text[self.pos_] if self.pos_ < limit else None
if cur_char != ")":
raise LexerError("Expected ')' after file name", location)
self.pos_ += 1
self.mode_ = Lexer.MODE_NORMAL_
return (Lexer.FILENAME, text[start + 1:self.pos_ - 1], location)
if cur_char == "\\" and next_char in Lexer.CHAR_DIGIT_:
self.pos_ += 1
self.scan_over_(Lexer.CHAR_DIGIT_)
return (Lexer.CID, int(text[start + 1:self.pos_], 10), location)
if cur_char == "@":
self.pos_ += 1
self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_)
glyphclass = text[start + 1:self.pos_]
if len(glyphclass) < 1:
raise LexerError("Expected glyph class name", location)
if len(glyphclass) > 30:
raise LexerError(
"Glyph class names must not be longer than 30 characters",
location)
return (Lexer.GLYPHCLASS, glyphclass, location)
if cur_char in Lexer.CHAR_NAME_START_:
self.pos_ += 1
self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_)
token = text[start:self.pos_]
if token == "include":
self.mode_ = Lexer.MODE_FILENAME_
return (Lexer.NAME, token, location)
if cur_char == "0" and next_char in "xX":
self.pos_ += 2
self.scan_over_(Lexer.CHAR_HEXDIGIT_)
return (Lexer.NUMBER, int(text[start:self.pos_], 16), location)
if cur_char in Lexer.CHAR_DIGIT_:
self.scan_over_(Lexer.CHAR_DIGIT_)
return (Lexer.NUMBER, int(text[start:self.pos_], 10), location)
if cur_char == "-" and next_char in Lexer.CHAR_DIGIT_:
self.pos_ += 1
self.scan_over_(Lexer.CHAR_DIGIT_)
return (Lexer.NUMBER, int(text[start:self.pos_], 10), location)
if cur_char in Lexer.CHAR_SYMBOL_:
self.pos_ += 1
return (Lexer.SYMBOL, cur_char, location)
if cur_char == '"':
self.pos_ += 1
self.scan_until_('"\r\n')
if self.pos_ < self.text_length_ and self.text_[self.pos_] == '"':
self.pos_ += 1
return (Lexer.STRING, text[start + 1:self.pos_ - 1], location)
else:
raise LexerError("Expected '\"' to terminate string", location)
raise LexerError("Unexpected character: '%s'" % cur_char, location)
def scan_over_(self, valid):
p = self.pos_
while p < self.text_length_ and self.text_[p] in valid:
p += 1
self.pos_ = p
def scan_until_(self, stop_at):
p = self.pos_
while p < self.text_length_ and self.text_[p] not in stop_at:
p += 1
self.pos_ = p
class IncludingLexer(object):
def __init__(self, filename):
self.lexers_ = [self.make_lexer_(filename, (filename, 0, 0))]
def __iter__(self):
return self
def next(self): # Python 2
return self.__next__()
def __next__(self): # Python 3
while self.lexers_:
lexer = self.lexers_[-1]
try:
token_type, token, location = lexer.next()
except StopIteration:
self.lexers_.pop()
continue
if token_type is Lexer.NAME and token == "include":
fname_type, fname_token, fname_location = lexer.next()
if fname_type is not Lexer.FILENAME:
raise LexerError("Expected file name", fname_location)
semi_type, semi_token, semi_location = lexer.next()
if semi_type is not Lexer.SYMBOL or semi_token != ";":
raise LexerError("Expected ';'", semi_location)
curpath, _ = os.path.split(lexer.filename_)
path = os.path.join(curpath, fname_token)
if len(self.lexers_) >= 5:
raise LexerError("Too many recursive includes",
fname_location)
self.lexers_.append(self.make_lexer_(path, fname_location))
continue
else:
return (token_type, token, location)
raise StopIteration()
@staticmethod
def make_lexer_(filename, location):
try:
with codecs.open(filename, "rb", "utf-8") as f:
return Lexer(f.read(), filename)
except IOError as err:
raise LexerError(str(err), location)

View File

@ -0,0 +1,160 @@
from __future__ import print_function, division, absolute_import
from __future__ import unicode_literals
from fontTools.feaLib.lexer import IncludingLexer, Lexer, LexerError
import os
import unittest
def lex(s):
return [(typ, tok) for (typ, tok, _) in Lexer(s, "test.fea")]
class LexerErrorTest(unittest.TestCase):
def test_str(self):
err = LexerError("Squeak!", ("foo.fea", 23, 42))
self.assertEqual(str(err), "foo.fea:23:42: Squeak!")
def test_str_nolocation(self):
err = LexerError("Squeak!", None)
self.assertEqual(str(err), "Squeak!")
class LexerTest(unittest.TestCase):
def __init__(self, methodName):
unittest.TestCase.__init__(self, methodName)
# Python 3 renamed assertRaisesRegexp to assertRaisesRegex,
# and fires deprecation warnings if a program uses the old name.
if not hasattr(self, "assertRaisesRegex"):
self.assertRaisesRegex = self.assertRaisesRegexp
def test_empty(self):
self.assertEqual(lex(""), [])
self.assertEqual(lex(" \t "), [])
def test_name(self):
self.assertEqual(lex("a17"), [(Lexer.NAME, "a17")])
self.assertEqual(lex(".notdef"), [(Lexer.NAME, ".notdef")])
self.assertEqual(lex("two.oldstyle"), [(Lexer.NAME, "two.oldstyle")])
self.assertEqual(lex("_"), [(Lexer.NAME, "_")])
self.assertEqual(lex("\\table"), [(Lexer.NAME, "\\table")])
def test_cid(self):
self.assertEqual(lex("\\0 \\987"), [(Lexer.CID, 0), (Lexer.CID, 987)])
def test_glyphclass(self):
self.assertEqual(lex("@Vowel.sc"), [(Lexer.GLYPHCLASS, "Vowel.sc")])
self.assertRaisesRegex(LexerError, "Expected glyph class", lex, "@(a)")
self.assertRaisesRegex(LexerError, "Expected glyph class", lex, "@ A")
self.assertRaisesRegex(LexerError, "not be longer than 30 characters",
lex, "@a123456789.a123456789.a123456789.x")
def test_include(self):
self.assertEqual(lex("include (~/foo/bar baz.fea);"), [
(Lexer.NAME, "include"),
(Lexer.FILENAME, "~/foo/bar baz.fea"),
(Lexer.SYMBOL, ";")
])
self.assertEqual(lex("include # Comment\n (foo) \n;"), [
(Lexer.NAME, "include"),
(Lexer.FILENAME, "foo"),
(Lexer.SYMBOL, ";")
])
self.assertRaises(LexerError, lex, "include blah")
self.assertRaises(LexerError, lex, "include (blah")
def test_number(self):
self.assertEqual(lex("123 -456"),
[(Lexer.NUMBER, 123), (Lexer.NUMBER, -456)])
self.assertEqual(lex("0xCAFED00D"), [(Lexer.NUMBER, 0xCAFED00D)])
self.assertEqual(lex("0xcafed00d"), [(Lexer.NUMBER, 0xCAFED00D)])
def test_symbol(self):
self.assertEqual(lex("a'"), [(Lexer.NAME, "a"), (Lexer.SYMBOL, "'")])
self.assertEqual(
lex("foo - -2"),
[(Lexer.NAME, "foo"), (Lexer.SYMBOL, "-"), (Lexer.NUMBER, -2)])
def test_comment(self):
self.assertEqual(lex("# Comment\n#"), [])
def test_string(self):
self.assertEqual(lex('"foo" "bar"'),
[(Lexer.STRING, "foo"), (Lexer.STRING, "bar")])
self.assertRaises(LexerError, lambda: lex('"foo\n bar"'))
def test_bad_character(self):
self.assertRaises(LexerError, lambda: lex("123 \u0001"))
def test_newline(self):
lines = lambda s: [loc[1] for (_, _, loc) in Lexer(s, "test.fea")]
self.assertEqual(lines("FOO\n\nBAR\nBAZ"), [1, 3, 4]) # Unix
self.assertEqual(lines("FOO\r\rBAR\rBAZ"), [1, 3, 4]) # Macintosh
self.assertEqual(lines("FOO\r\n\r\n BAR\r\nBAZ"), [1, 3, 4]) # Windows
self.assertEqual(lines("FOO\n\rBAR\r\nBAZ"), [1, 3, 4]) # mixed
def test_location(self):
locs = lambda s: ["%s:%d:%d" % loc
for (_, _, loc) in Lexer(s, "test.fea")]
self.assertEqual(locs("a b # Comment\n12 @x"), [
"test.fea:1:1", "test.fea:1:3", "test.fea:2:1",
"test.fea:2:4"
])
def test_scan_over_(self):
lexer = Lexer("abbacabba12", "test.fea")
self.assertEqual(lexer.pos_, 0)
lexer.scan_over_("xyz")
self.assertEqual(lexer.pos_, 0)
lexer.scan_over_("abc")
self.assertEqual(lexer.pos_, 9)
lexer.scan_over_("abc")
self.assertEqual(lexer.pos_, 9)
lexer.scan_over_("0123456789")
self.assertEqual(lexer.pos_, 11)
def test_scan_until_(self):
lexer = Lexer("foo'bar", "test.fea")
self.assertEqual(lexer.pos_, 0)
lexer.scan_until_("'")
self.assertEqual(lexer.pos_, 3)
lexer.scan_until_("'")
self.assertEqual(lexer.pos_, 3)
class IncludingLexerTest(unittest.TestCase):
@staticmethod
def getpath(filename):
path, _ = os.path.split(__file__)
return os.path.join(path, "testdata", filename)
def test_include(self):
lexer = IncludingLexer(self.getpath("include4.fea"))
result = ['%s %s:%d' % (token, os.path.split(loc[0])[1], loc[1])
for _, token, loc in lexer]
self.assertEqual(result, [
"I4a include4.fea:1",
"I3a include3.fea:1",
"I2a include2.fea:1",
"I1a include1.fea:1",
"I0 include0.fea:1",
"I1b include1.fea:3",
"I2b include2.fea:3",
"I3b include3.fea:3",
"I4b include4.fea:3"
])
def test_include_limit(self):
lexer = IncludingLexer(self.getpath("include6.fea"))
self.assertRaises(LexerError, lambda: list(lexer))
def test_include_self(self):
lexer = IncludingLexer(self.getpath("includeself.fea"))
self.assertRaises(LexerError, lambda: list(lexer))
def test_include_missing_file(self):
lexer = IncludingLexer(self.getpath("includemissingfile.fea"))
self.assertRaises(LexerError, lambda: list(lexer))
if __name__ == "__main__":
unittest.main()

View File

@ -0,0 +1,466 @@
from __future__ import print_function, division, absolute_import
from __future__ import unicode_literals
from fontTools.feaLib.lexer import Lexer, IncludingLexer
import fontTools.feaLib.ast as ast
import os
import re
class ParserError(Exception):
def __init__(self, message, location):
Exception.__init__(self, message)
self.location = location
def __str__(self):
message = Exception.__str__(self)
if self.location:
path, line, column = self.location
return "%s:%d:%d: %s" % (path, line, column, message)
else:
return message
class Parser(object):
def __init__(self, path):
self.doc_ = ast.FeatureFile()
self.anchors_ = SymbolTable()
self.glyphclasses_ = SymbolTable()
self.lookups_ = SymbolTable()
self.valuerecords_ = SymbolTable()
self.symbol_tables_ = {
self.anchors_, self.glyphclasses_,
self.lookups_, self.valuerecords_
}
self.next_token_type_, self.next_token_ = (None, None)
self.next_token_location_ = None
self.lexer_ = IncludingLexer(path)
self.advance_lexer_()
def parse(self):
statements = self.doc_.statements
while self.next_token_type_ is not None:
self.advance_lexer_()
if self.cur_token_type_ is Lexer.GLYPHCLASS:
statements.append(self.parse_glyphclass_definition_())
elif self.is_cur_keyword_("anchorDef"):
statements.append(self.parse_anchordef_())
elif self.is_cur_keyword_("languagesystem"):
statements.append(self.parse_languagesystem_())
elif self.is_cur_keyword_("lookup"):
statements.append(self.parse_lookup_(vertical=False))
elif self.is_cur_keyword_("feature"):
statements.append(self.parse_feature_block_())
elif self.is_cur_keyword_("valueRecordDef"):
statements.append(
self.parse_valuerecord_definition_(vertical=False))
else:
raise ParserError("Expected feature, languagesystem, "
"lookup, or glyph class definition",
self.cur_token_location_)
return self.doc_
def parse_anchordef_(self):
assert self.is_cur_keyword_("anchorDef")
location = self.cur_token_location_
x, y = self.expect_number_(), self.expect_number_()
contourpoint = None
if self.next_token_ == "contourpoint":
self.expect_keyword_("contourpoint")
contourpoint = self.expect_number_()
name = self.expect_name_()
self.expect_symbol_(";")
anchordef = ast.AnchorDefinition(location, name, x, y, contourpoint)
self.anchors_.define(name, anchordef)
return anchordef
def parse_glyphclass_definition_(self):
location, name = self.cur_token_location_, self.cur_token_
self.expect_symbol_("=")
glyphs = self.parse_glyphclass_(accept_glyphname=False)
self.expect_symbol_(";")
if self.glyphclasses_.resolve(name) is not None:
raise ParserError("Glyph class @%s already defined" % name,
location)
glyphclass = ast.GlyphClassDefinition(location, name, glyphs)
self.glyphclasses_.define(name, glyphclass)
return glyphclass
def parse_glyphclass_(self, accept_glyphname):
result = set()
if accept_glyphname and self.next_token_type_ is Lexer.NAME:
result.add(self.expect_name_())
return result
if self.next_token_type_ is Lexer.GLYPHCLASS:
self.advance_lexer_()
gc = self.glyphclasses_.resolve(self.cur_token_)
if gc is None:
raise ParserError("Unknown glyph class @%s" % self.cur_token_,
self.cur_token_location_)
result.update(gc.glyphs)
return result
self.expect_symbol_("[")
while self.next_token_ != "]":
self.advance_lexer_()
if self.cur_token_type_ is Lexer.NAME:
if self.next_token_ == "-":
range_location_ = self.cur_token_location_
range_start = self.cur_token_
self.expect_symbol_("-")
range_end = self.expect_name_()
result.update(self.make_glyph_range_(range_location_,
range_start,
range_end))
else:
result.add(self.cur_token_)
elif self.cur_token_type_ is Lexer.GLYPHCLASS:
gc = self.glyphclasses_.resolve(self.cur_token_)
if gc is None:
raise ParserError(
"Unknown glyph class @%s" % self.cur_token_,
self.cur_token_location_)
result.update(gc.glyphs)
else:
raise ParserError(
"Expected glyph name, glyph range, "
"or glyph class reference",
self.cur_token_location_)
self.expect_symbol_("]")
return result
def parse_glyph_pattern_(self):
prefix, glyphs, lookups, suffix = ([], [], [], [])
while self.next_token_ not in {"by", "from", ";"}:
gc = self.parse_glyphclass_(accept_glyphname=True)
marked = False
if self.next_token_ == "'":
self.expect_symbol_("'")
marked = True
if marked:
glyphs.append(gc)
elif glyphs:
suffix.append(gc)
else:
prefix.append(gc)
lookup = None
if self.next_token_ == "lookup":
self.expect_keyword_("lookup")
if not marked:
raise ParserError("Lookups can only follow marked glyphs",
self.cur_token_location_)
lookup_name = self.expect_name_()
lookup = self.lookups_.resolve(lookup_name)
if lookup is None:
raise ParserError('Unknown lookup "%s"' % lookup_name,
self.cur_token_location_)
if marked:
lookups.append(lookup)
if not glyphs and not suffix: # eg., "sub f f i by"
assert lookups == []
return ([], prefix, [None] * len(prefix), [])
else:
return (prefix, glyphs, lookups, suffix)
def parse_ignore_(self):
assert self.is_cur_keyword_("ignore")
location = self.cur_token_location_
self.advance_lexer_()
if self.cur_token_ in ["substitute", "sub"]:
prefix, glyphs, lookups, suffix = self.parse_glyph_pattern_()
self.expect_symbol_(";")
return ast.IgnoreSubstitutionRule(location, prefix, glyphs, suffix)
raise ParserError("Expected \"substitute\"", self.next_token_location_)
def parse_language_(self):
assert self.is_cur_keyword_("language")
location, language = self.cur_token_location_, self.expect_tag_()
include_default, required = (True, False)
if self.next_token_ in {"exclude_dflt", "include_dflt"}:
include_default = (self.expect_name_() == "include_dflt")
if self.next_token_ == "required":
self.expect_keyword_("required")
required = True
self.expect_symbol_(";")
return ast.LanguageStatement(location, language.strip(),
include_default, required)
def parse_lookup_(self, vertical):
assert self.is_cur_keyword_("lookup")
location, name = self.cur_token_location_, self.expect_name_()
if self.next_token_ == ";":
lookup = self.lookups_.resolve(name)
if lookup is None:
raise ParserError("Unknown lookup \"%s\"" % name,
self.cur_token_location_)
self.expect_symbol_(";")
return ast.LookupReferenceStatement(location, lookup)
use_extension = False
if self.next_token_ == "useExtension":
self.expect_keyword_("useExtension")
use_extension = True
block = ast.LookupBlock(location, name, use_extension)
self.parse_block_(block, vertical)
self.lookups_.define(name, block)
return block
def parse_script_(self):
assert self.is_cur_keyword_("script")
location, script = self.cur_token_location_, self.expect_tag_()
self.expect_symbol_(";")
return ast.ScriptStatement(location, script)
def parse_substitute_(self):
assert self.cur_token_ in {"substitute", "sub"}
location = self.cur_token_location_
old_prefix, old, lookups, old_suffix = self.parse_glyph_pattern_()
new = []
if self.next_token_ == "by":
keyword = self.expect_keyword_("by")
while self.next_token_ != ";":
new.append(self.parse_glyphclass_(accept_glyphname=True))
elif self.next_token_ == "from":
keyword = self.expect_keyword_("from")
new = [self.parse_glyphclass_(accept_glyphname=False)]
else:
keyword = None
self.expect_symbol_(";")
if len(new) is 0 and not any(lookups):
raise ParserError(
'Expected "by", "from" or explicit lookup references',
self.cur_token_location_)
if keyword == "from":
if len(old) != 1 or len(old[0]) != 1:
raise ParserError('Expected a single glyph before "from"',
location)
if len(new) != 1:
raise ParserError('Expected a single glyphclass after "from"',
location)
return ast.AlternateSubstitution(location, list(old[0])[0], new[0])
rule = ast.SubstitutionRule(location, old, new)
rule.old_prefix, rule.old_suffix = old_prefix, old_suffix
rule.lookups = lookups
return rule
def parse_subtable_(self):
assert self.is_cur_keyword_("subtable")
location = self.cur_token_location_
self.expect_symbol_(";")
return ast.SubtableStatement(location)
def parse_valuerecord_(self, vertical):
if self.next_token_type_ is Lexer.NUMBER:
number, location = self.expect_number_(), self.cur_token_location_
if vertical:
val = ast.ValueRecord(location, 0, 0, 0, number)
else:
val = ast.ValueRecord(location, 0, 0, number, 0)
return val
self.expect_symbol_("<")
location = self.cur_token_location_
if self.next_token_type_ is Lexer.NAME:
name = self.expect_name_()
vrd = self.valuerecords_.resolve(name)
if vrd is None:
raise ParserError("Unknown valueRecordDef \"%s\"" % name,
self.cur_token_location_)
value = vrd.value
xPlacement, yPlacement = (value.xPlacement, value.yPlacement)
xAdvance, yAdvance = (value.xAdvance, value.yAdvance)
else:
xPlacement, yPlacement, xAdvance, yAdvance = (
self.expect_number_(), self.expect_number_(),
self.expect_number_(), self.expect_number_())
self.expect_symbol_(">")
return ast.ValueRecord(
location, xPlacement, yPlacement, xAdvance, yAdvance)
def parse_valuerecord_definition_(self, vertical):
assert self.is_cur_keyword_("valueRecordDef")
location = self.cur_token_location_
value = self.parse_valuerecord_(vertical)
name = self.expect_name_()
self.expect_symbol_(";")
vrd = ast.ValueRecordDefinition(location, name, value)
self.valuerecords_.define(name, vrd)
return vrd
def parse_languagesystem_(self):
assert self.cur_token_ == "languagesystem"
location = self.cur_token_location_
script, language = self.expect_tag_(), self.expect_tag_()
self.expect_symbol_(";")
return ast.LanguageSystemStatement(location, script, language)
def parse_feature_block_(self):
assert self.cur_token_ == "feature"
location = self.cur_token_location_
tag = self.expect_tag_()
vertical = (tag == "vkrn")
use_extension = False
if self.next_token_ == "useExtension":
self.expect_keyword_("useExtension")
use_extension = True
block = ast.FeatureBlock(location, tag, use_extension)
self.parse_block_(block, vertical)
return block
def parse_block_(self, block, vertical):
self.expect_symbol_("{")
for symtab in self.symbol_tables_:
symtab.enter_scope()
statements = block.statements
while self.next_token_ != "}":
self.advance_lexer_()
if self.cur_token_type_ is Lexer.GLYPHCLASS:
statements.append(self.parse_glyphclass_definition_())
elif self.is_cur_keyword_("anchorDef"):
statements.append(self.parse_anchordef_())
elif self.is_cur_keyword_("ignore"):
statements.append(self.parse_ignore_())
elif self.is_cur_keyword_("language"):
statements.append(self.parse_language_())
elif self.is_cur_keyword_("lookup"):
statements.append(self.parse_lookup_(vertical))
elif self.is_cur_keyword_("script"):
statements.append(self.parse_script_())
elif (self.is_cur_keyword_("substitute") or
self.is_cur_keyword_("sub")):
statements.append(self.parse_substitute_())
elif self.is_cur_keyword_("subtable"):
statements.append(self.parse_subtable_())
elif self.is_cur_keyword_("valueRecordDef"):
statements.append(self.parse_valuerecord_definition_(vertical))
else:
raise ParserError(
"Expected glyph class definition or statement",
self.cur_token_location_)
self.expect_symbol_("}")
for symtab in self.symbol_tables_:
symtab.exit_scope()
name = self.expect_name_()
if name != block.name.strip():
raise ParserError("Expected \"%s\"" % block.name.strip(),
self.cur_token_location_)
self.expect_symbol_(";")
def is_cur_keyword_(self, k):
return (self.cur_token_type_ is Lexer.NAME) and (self.cur_token_ == k)
def expect_tag_(self):
self.advance_lexer_()
if self.cur_token_type_ is not Lexer.NAME:
raise ParserError("Expected a tag", self.cur_token_location_)
if len(self.cur_token_) > 4:
raise ParserError("Tags can not be longer than 4 characters",
self.cur_token_location_)
return (self.cur_token_ + " ")[:4]
def expect_symbol_(self, symbol):
self.advance_lexer_()
if self.cur_token_type_ is Lexer.SYMBOL and self.cur_token_ == symbol:
return symbol
raise ParserError("Expected '%s'" % symbol, self.cur_token_location_)
def expect_keyword_(self, keyword):
self.advance_lexer_()
if self.cur_token_type_ is Lexer.NAME and self.cur_token_ == keyword:
return self.cur_token_
raise ParserError("Expected \"%s\"" % keyword,
self.cur_token_location_)
def expect_name_(self):
self.advance_lexer_()
if self.cur_token_type_ is Lexer.NAME:
return self.cur_token_
raise ParserError("Expected a name", self.cur_token_location_)
def expect_number_(self):
self.advance_lexer_()
if self.cur_token_type_ is Lexer.NUMBER:
return self.cur_token_
raise ParserError("Expected a number", self.cur_token_location_)
def advance_lexer_(self):
self.cur_token_type_, self.cur_token_, self.cur_token_location_ = (
self.next_token_type_, self.next_token_, self.next_token_location_)
try:
(self.next_token_type_, self.next_token_,
self.next_token_location_) = self.lexer_.next()
except StopIteration:
self.next_token_type_, self.next_token_ = (None, None)
def make_glyph_range_(self, location, start, limit):
"""("a.sc", "d.sc") --> {"a.sc", "b.sc", "c.sc", "d.sc"}"""
result = set()
if len(start) != len(limit):
raise ParserError(
"Bad range: \"%s\" and \"%s\" should have the same length" %
(start, limit), location)
rev = lambda s: ''.join(reversed(list(s))) # string reversal
prefix = os.path.commonprefix([start, limit])
suffix = rev(os.path.commonprefix([rev(start), rev(limit)]))
if len(suffix) > 0:
start_range = start[len(prefix):-len(suffix)]
limit_range = limit[len(prefix):-len(suffix)]
else:
start_range = start[len(prefix):]
limit_range = limit[len(prefix):]
if start_range >= limit_range:
raise ParserError("Start of range must be smaller than its end",
location)
uppercase = re.compile(r'^[A-Z]$')
if uppercase.match(start_range) and uppercase.match(limit_range):
for c in range(ord(start_range), ord(limit_range) + 1):
result.add("%s%c%s" % (prefix, c, suffix))
return result
lowercase = re.compile(r'^[a-z]$')
if lowercase.match(start_range) and lowercase.match(limit_range):
for c in range(ord(start_range), ord(limit_range) + 1):
result.add("%s%c%s" % (prefix, c, suffix))
return result
digits = re.compile(r'^[0-9]{1,3}$')
if digits.match(start_range) and digits.match(limit_range):
for i in range(int(start_range, 10), int(limit_range, 10) + 1):
number = ("000" + str(i))[-len(start_range):]
result.add("%s%s%s" % (prefix, number, suffix))
return result
raise ParserError("Bad range: \"%s-%s\"" % (start, limit), location)
class SymbolTable(object):
def __init__(self):
self.scopes_ = [{}]
def enter_scope(self):
self.scopes_.append({})
def exit_scope(self):
self.scopes_.pop()
def define(self, name, item):
self.scopes_[-1][name] = item
def resolve(self, name):
for scope in reversed(self.scopes_):
item = scope.get(name)
if item:
return item
return None

View File

@ -0,0 +1,448 @@
from __future__ import print_function, division, absolute_import
from __future__ import unicode_literals
from fontTools.feaLib.lexer import LexerError
from fontTools.feaLib.parser import Parser, ParserError, SymbolTable
from fontTools.misc.py23 import *
import fontTools.feaLib.ast as ast
import codecs
import os
import shutil
import sys
import tempfile
import unittest
class ParserTest(unittest.TestCase):
def __init__(self, methodName):
unittest.TestCase.__init__(self, methodName)
# Python 3 renamed assertRaisesRegexp to assertRaisesRegex,
# and fires deprecation warnings if a program uses the old name.
if not hasattr(self, "assertRaisesRegex"):
self.assertRaisesRegex = self.assertRaisesRegexp
def test_anchordef(self):
[foo] = self.parse("anchorDef 123 456 foo;").statements
self.assertEqual(type(foo), ast.AnchorDefinition)
self.assertEqual(foo.name, "foo")
self.assertEqual(foo.x, 123)
self.assertEqual(foo.y, 456)
self.assertEqual(foo.contourpoint, None)
def test_anchordef_contourpoint(self):
[foo] = self.parse("anchorDef 123 456 contourpoint 5 foo;").statements
self.assertEqual(type(foo), ast.AnchorDefinition)
self.assertEqual(foo.name, "foo")
self.assertEqual(foo.x, 123)
self.assertEqual(foo.y, 456)
self.assertEqual(foo.contourpoint, 5)
def test_feature_block(self):
[liga] = self.parse("feature liga {} liga;").statements
self.assertEqual(liga.name, "liga")
self.assertFalse(liga.use_extension)
def test_feature_block_useExtension(self):
[liga] = self.parse("feature liga useExtension {} liga;").statements
self.assertEqual(liga.name, "liga")
self.assertTrue(liga.use_extension)
def test_glyphclass(self):
[gc] = self.parse("@dash = [endash emdash figuredash];").statements
self.assertEqual(gc.name, "dash")
self.assertEqual(gc.glyphs, {"endash", "emdash", "figuredash"})
def test_glyphclass_bad(self):
self.assertRaisesRegex(
ParserError,
"Expected glyph name, glyph range, or glyph class reference",
self.parse, "@bad = [a 123];")
def test_glyphclass_duplicate(self):
self.assertRaisesRegex(
ParserError, "Glyph class @dup already defined",
self.parse, "@dup = [a b]; @dup = [x];")
def test_glyphclass_empty(self):
[gc] = self.parse("@empty_set = [];").statements
self.assertEqual(gc.name, "empty_set")
self.assertEqual(gc.glyphs, set())
def test_glyphclass_equality(self):
[foo, bar] = self.parse("@foo = [a b]; @bar = @foo;").statements
self.assertEqual(foo.glyphs, {"a", "b"})
self.assertEqual(bar.glyphs, {"a", "b"})
def test_glyphclass_range_uppercase(self):
[gc] = self.parse("@swashes = [X.swash-Z.swash];").statements
self.assertEqual(gc.name, "swashes")
self.assertEqual(gc.glyphs, {"X.swash", "Y.swash", "Z.swash"})
def test_glyphclass_range_lowercase(self):
[gc] = self.parse("@defg.sc = [d.sc-g.sc];").statements
self.assertEqual(gc.name, "defg.sc")
self.assertEqual(gc.glyphs, {"d.sc", "e.sc", "f.sc", "g.sc"})
def test_glyphclass_range_digit1(self):
[gc] = self.parse("@range = [foo.2-foo.5];").statements
self.assertEqual(gc.glyphs, {"foo.2", "foo.3", "foo.4", "foo.5"})
def test_glyphclass_range_digit2(self):
[gc] = self.parse("@range = [foo.09-foo.11];").statements
self.assertEqual(gc.glyphs, {"foo.09", "foo.10", "foo.11"})
def test_glyphclass_range_digit3(self):
[gc] = self.parse("@range = [foo.123-foo.125];").statements
self.assertEqual(gc.glyphs, {"foo.123", "foo.124", "foo.125"})
def test_glyphclass_range_bad(self):
self.assertRaisesRegex(
ParserError,
"Bad range: \"a\" and \"foobar\" should have the same length",
self.parse, "@bad = [a-foobar];")
self.assertRaisesRegex(
ParserError, "Bad range: \"A.swash-z.swash\"",
self.parse, "@bad = [A.swash-z.swash];")
self.assertRaisesRegex(
ParserError, "Start of range must be smaller than its end",
self.parse, "@bad = [B.swash-A.swash];")
self.assertRaisesRegex(
ParserError, "Bad range: \"foo.1234-foo.9876\"",
self.parse, "@bad = [foo.1234-foo.9876];")
def test_glyphclass_range_mixed(self):
[gc] = self.parse("@range = [a foo.09-foo.11 X.sc-Z.sc];").statements
self.assertEqual(gc.glyphs, {
"a", "foo.09", "foo.10", "foo.11", "X.sc", "Y.sc", "Z.sc"
})
def test_glyphclass_reference(self):
[vowels_lc, vowels_uc, vowels] = self.parse(
"@Vowels.lc = [a e i o u]; @Vowels.uc = [A E I O U];"
"@Vowels = [@Vowels.lc @Vowels.uc y Y];").statements
self.assertEqual(vowels_lc.glyphs, set(list("aeiou")))
self.assertEqual(vowels_uc.glyphs, set(list("AEIOU")))
self.assertEqual(vowels.glyphs, set(list("aeiouyAEIOUY")))
self.assertRaisesRegex(
ParserError, "Unknown glyph class @unknown",
self.parse, "@bad = [@unknown];")
def test_glyphclass_scoping(self):
[foo, liga, smcp] = self.parse(
"@foo = [a b];"
"feature liga { @bar = [@foo l]; } liga;"
"feature smcp { @bar = [@foo s]; } smcp;"
).statements
self.assertEqual(foo.glyphs, {"a", "b"})
self.assertEqual(liga.statements[0].glyphs, {"a", "b", "l"})
self.assertEqual(smcp.statements[0].glyphs, {"a", "b", "s"})
def test_ignore_sub(self):
doc = self.parse("feature test {ignore sub e t' c;} test;")
s = doc.statements[0].statements[0]
self.assertEqual(type(s), ast.IgnoreSubstitutionRule)
self.assertEqual(s.prefix, [{"e"}])
self.assertEqual(s.glyphs, [{"t"}])
self.assertEqual(s.suffix, [{"c"}])
def test_ignore_substitute(self):
doc = self.parse(
"feature test {"
" ignore substitute f [a e] d' [a u]' [e y];"
"} test;")
s = doc.statements[0].statements[0]
self.assertEqual(type(s), ast.IgnoreSubstitutionRule)
self.assertEqual(s.prefix, [{"f"}, {"a", "e"}])
self.assertEqual(s.glyphs, [{"d"}, {"a", "u"}])
self.assertEqual(s.suffix, [{"e", "y"}])
def test_language(self):
doc = self.parse("feature test {language DEU;} test;")
s = doc.statements[0].statements[0]
self.assertEqual(type(s), ast.LanguageStatement)
self.assertEqual(s.language, "DEU")
self.assertTrue(s.include_default)
self.assertFalse(s.required)
def test_language_exclude_dflt(self):
doc = self.parse("feature test {language DEU exclude_dflt;} test;")
s = doc.statements[0].statements[0]
self.assertEqual(type(s), ast.LanguageStatement)
self.assertEqual(s.language, "DEU")
self.assertFalse(s.include_default)
self.assertFalse(s.required)
def test_language_exclude_dflt_required(self):
doc = self.parse("feature test {"
" language DEU exclude_dflt required;"
"} test;")
s = doc.statements[0].statements[0]
self.assertEqual(type(s), ast.LanguageStatement)
self.assertEqual(s.language, "DEU")
self.assertFalse(s.include_default)
self.assertTrue(s.required)
def test_language_include_dflt(self):
doc = self.parse("feature test {language DEU include_dflt;} test;")
s = doc.statements[0].statements[0]
self.assertEqual(type(s), ast.LanguageStatement)
self.assertEqual(s.language, "DEU")
self.assertTrue(s.include_default)
self.assertFalse(s.required)
def test_language_include_dflt_required(self):
doc = self.parse("feature test {"
" language DEU include_dflt required;"
"} test;")
s = doc.statements[0].statements[0]
self.assertEqual(type(s), ast.LanguageStatement)
self.assertEqual(s.language, "DEU")
self.assertTrue(s.include_default)
self.assertTrue(s.required)
def test_lookup_block(self):
[lookup] = self.parse("lookup Ligatures {} Ligatures;").statements
self.assertEqual(lookup.name, "Ligatures")
self.assertFalse(lookup.use_extension)
def test_lookup_block_useExtension(self):
[lookup] = self.parse("lookup Foo useExtension {} Foo;").statements
self.assertEqual(lookup.name, "Foo")
self.assertTrue(lookup.use_extension)
def test_lookup_block_name_mismatch(self):
self.assertRaisesRegex(
ParserError, 'Expected "Foo"',
self.parse, "lookup Foo {} Bar;")
def test_lookup_block_with_horizontal_valueRecordDef(self):
doc = self.parse("feature liga {"
" lookup look {"
" valueRecordDef 123 foo;"
" } look;"
"} liga;")
[liga] = doc.statements
[look] = liga.statements
[foo] = look.statements
self.assertEqual(foo.value.xAdvance, 123)
self.assertEqual(foo.value.yAdvance, 0)
def test_lookup_block_with_vertical_valueRecordDef(self):
doc = self.parse("feature vkrn {"
" lookup look {"
" valueRecordDef 123 foo;"
" } look;"
"} vkrn;")
[vkrn] = doc.statements
[look] = vkrn.statements
[foo] = look.statements
self.assertEqual(foo.value.xAdvance, 0)
self.assertEqual(foo.value.yAdvance, 123)
def test_lookup_reference(self):
[foo, bar] = self.parse("lookup Foo {} Foo;"
"feature Bar {lookup Foo;} Bar;").statements
[ref] = bar.statements
self.assertEqual(type(ref), ast.LookupReferenceStatement)
self.assertEqual(ref.lookup, foo)
def test_lookup_reference_unknown(self):
self.assertRaisesRegex(
ParserError, 'Unknown lookup "Huh"',
self.parse, "feature liga {lookup Huh;} liga;")
def test_script(self):
doc = self.parse("feature test {script cyrl;} test;")
s = doc.statements[0].statements[0]
self.assertEqual(type(s), ast.ScriptStatement)
self.assertEqual(s.script, "cyrl")
def test_substitute_single_format_a(self): # GSUB LookupType 1
doc = self.parse("feature smcp {substitute a by a.sc;} smcp;")
sub = doc.statements[0].statements[0]
self.assertEqual(sub.old_prefix, [])
self.assertEqual(sub.old, [{"a"}])
self.assertEqual(sub.old_suffix, [])
self.assertEqual(sub.new, [{"a.sc"}])
self.assertEqual(sub.lookups, [None])
def test_substitute_single_format_b(self): # GSUB LookupType 1
doc = self.parse(
"feature smcp {"
" substitute [one.fitted one.oldstyle] by one;"
"} smcp;")
sub = doc.statements[0].statements[0]
self.assertEqual(sub.old_prefix, [])
self.assertEqual(sub.old, [{"one.fitted", "one.oldstyle"}])
self.assertEqual(sub.old_suffix, [])
self.assertEqual(sub.new, [{"one"}])
self.assertEqual(sub.lookups, [None])
def test_substitute_single_format_c(self): # GSUB LookupType 1
doc = self.parse(
"feature smcp {"
" substitute [a-d] by [A.sc-D.sc];"
"} smcp;")
sub = doc.statements[0].statements[0]
self.assertEqual(sub.old_prefix, [])
self.assertEqual(sub.old, [{"a", "b", "c", "d"}])
self.assertEqual(sub.old_suffix, [])
self.assertEqual(sub.new, [{"A.sc", "B.sc", "C.sc", "D.sc"}])
self.assertEqual(sub.lookups, [None])
def test_substitute_multiple(self): # GSUB LookupType 2
doc = self.parse("lookup Look {substitute f_f_i by f f i;} Look;")
sub = doc.statements[0].statements[0]
self.assertEqual(type(sub), ast.SubstitutionRule)
self.assertEqual(sub.old_prefix, [])
self.assertEqual(sub.old, [{"f_f_i"}])
self.assertEqual(sub.old_suffix, [])
self.assertEqual(sub.new, [{"f"}, {"f"}, {"i"}])
self.assertEqual(sub.lookups, [None])
def test_substitute_from(self): # GSUB LookupType 3
doc = self.parse("feature test {"
" substitute a from [a.1 a.2 a.3];"
"} test;")
sub = doc.statements[0].statements[0]
self.assertEqual(type(sub), ast.AlternateSubstitution)
self.assertEqual(sub.glyph, "a")
self.assertEqual(sub.from_class, {"a.1", "a.2", "a.3"})
def test_substitute_from_glyphclass(self): # GSUB LookupType 3
doc = self.parse("feature test {"
" @Ampersands = [ampersand.1 ampersand.2];"
" substitute ampersand from @Ampersands;"
"} test;")
[glyphclass, sub] = doc.statements[0].statements
self.assertEqual(type(sub), ast.AlternateSubstitution)
self.assertEqual(sub.glyph, "ampersand")
self.assertEqual(sub.from_class, {"ampersand.1", "ampersand.2"})
def test_substitute_ligature(self): # GSUB LookupType 4
doc = self.parse("feature liga {substitute f f i by f_f_i;} liga;")
sub = doc.statements[0].statements[0]
self.assertEqual(sub.old_prefix, [])
self.assertEqual(sub.old, [{"f"}, {"f"}, {"i"}])
self.assertEqual(sub.old_suffix, [])
self.assertEqual(sub.new, [{"f_f_i"}])
self.assertEqual(sub.lookups, [None, None, None])
def test_substitute_lookups(self):
doc = Parser(self.getpath("spec5fi.fea")).parse()
[ligs, sub, feature] = doc.statements
self.assertEqual(feature.statements[0].lookups, [ligs, None, sub])
self.assertEqual(feature.statements[1].lookups, [ligs, None, sub])
def test_substitute_missing_by(self):
self.assertRaisesRegex(
ParserError, 'Expected "by", "from" or explicit lookup references',
self.parse, "feature liga {substitute f f i;} liga;")
def test_subtable(self):
doc = self.parse("feature test {subtable;} test;")
s = doc.statements[0].statements[0]
self.assertEqual(type(s), ast.SubtableStatement)
def test_valuerecord_format_a_horizontal(self):
doc = self.parse("feature liga {valueRecordDef 123 foo;} liga;")
value = doc.statements[0].statements[0].value
self.assertEqual(value.xPlacement, 0)
self.assertEqual(value.yPlacement, 0)
self.assertEqual(value.xAdvance, 123)
self.assertEqual(value.yAdvance, 0)
def test_valuerecord_format_a_vertical(self):
doc = self.parse("feature vkrn {valueRecordDef 123 foo;} vkrn;")
value = doc.statements[0].statements[0].value
self.assertEqual(value.xPlacement, 0)
self.assertEqual(value.yPlacement, 0)
self.assertEqual(value.xAdvance, 0)
self.assertEqual(value.yAdvance, 123)
def test_valuerecord_format_b(self):
doc = self.parse("feature liga {valueRecordDef <1 2 3 4> foo;} liga;")
value = doc.statements[0].statements[0].value
self.assertEqual(value.xPlacement, 1)
self.assertEqual(value.yPlacement, 2)
self.assertEqual(value.xAdvance, 3)
self.assertEqual(value.yAdvance, 4)
def test_valuerecord_named(self):
doc = self.parse("valueRecordDef <1 2 3 4> foo;"
"feature liga {valueRecordDef <foo> bar;} liga;")
value = doc.statements[1].statements[0].value
self.assertEqual(value.xPlacement, 1)
self.assertEqual(value.yPlacement, 2)
self.assertEqual(value.xAdvance, 3)
self.assertEqual(value.yAdvance, 4)
def test_valuerecord_named_unknown(self):
self.assertRaisesRegex(
ParserError, "Unknown valueRecordDef \"unknown\"",
self.parse, "valueRecordDef <unknown> foo;")
def test_valuerecord_scoping(self):
[foo, liga, smcp] = self.parse(
"valueRecordDef 789 foo;"
"feature liga {valueRecordDef <foo> bar;} liga;"
"feature smcp {valueRecordDef <foo> bar;} smcp;"
).statements
self.assertEqual(foo.value.xAdvance, 789)
self.assertEqual(liga.statements[0].value.xAdvance, 789)
self.assertEqual(smcp.statements[0].value.xAdvance, 789)
def test_languagesystem(self):
[langsys] = self.parse("languagesystem latn DEU;").statements
self.assertEqual(langsys.script, "latn")
self.assertEqual(langsys.language, "DEU ")
self.assertRaisesRegex(
ParserError, "Expected ';'",
self.parse, "languagesystem latn DEU")
self.assertRaisesRegex(
ParserError, "longer than 4 characters",
self.parse, "languagesystem foobar DEU")
self.assertRaisesRegex(
ParserError, "longer than 4 characters",
self.parse, "languagesystem latn FOOBAR")
def setUp(self):
self.tempdir = None
self.num_tempfiles = 0
def tearDown(self):
if self.tempdir:
shutil.rmtree(self.tempdir)
def parse(self, text):
if not self.tempdir:
self.tempdir = tempfile.mkdtemp()
self.num_tempfiles += 1
path = os.path.join(self.tempdir, "tmp%d.fea" % self.num_tempfiles)
with codecs.open(path, "wb", "utf-8") as outfile:
outfile.write(text)
return Parser(path).parse()
@staticmethod
def getpath(testfile):
path, _ = os.path.split(__file__)
return os.path.join(path, "testdata", testfile)
class SymbolTableTest(unittest.TestCase):
def test_scopes(self):
symtab = SymbolTable()
symtab.define("foo", 23)
self.assertEqual(symtab.resolve("foo"), 23)
symtab.enter_scope()
self.assertEqual(symtab.resolve("foo"), 23)
symtab.define("foo", 42)
self.assertEqual(symtab.resolve("foo"), 42)
symtab.exit_scope()
self.assertEqual(symtab.resolve("foo"), 23)
def test_resolve_undefined(self):
self.assertEqual(SymbolTable().resolve("abc"), None)
if __name__ == "__main__":
unittest.main()

View File

@ -0,0 +1 @@
I0

View File

@ -0,0 +1,3 @@
I1a
include(include0.fea);
I1b

View File

@ -0,0 +1,3 @@
I2a
include(include1.fea);
I2b

View File

@ -0,0 +1,4 @@
I3a
include(include2.fea);
I3b

View File

@ -0,0 +1,4 @@
I4a
include(include3.fea);
I4b

View File

@ -0,0 +1,3 @@
I5a
include(include4.fea);
I5b

View File

@ -0,0 +1,3 @@
I6a
include(include5.fea);
I6b

View File

@ -0,0 +1 @@
include(missingfile.fea);

View File

@ -0,0 +1 @@
include(includeself.fea);

19
Lib/fontTools/feaLib/testdata/mini.fea vendored Normal file
View File

@ -0,0 +1,19 @@
# Example file from OpenType Feature File specification, section 1.
# http://www.adobe.com/devnet/opentype/afdko/topic_feature_file_syntax.html
# Script and language coverage
languagesystem DFLT dflt;
languagesystem latn dflt;
# Ligature formation
feature liga {
substitute f i by f_i;
substitute f l by f_l;
} liga;
# Kerning
feature kern {
position A Y -100;
position a y -80;
position s f' <0 0 10 0> t;
} kern;

View File

@ -0,0 +1,18 @@
# OpenType Feature File specification, section 5.f.i, example 1
# "Specifying a Chain Sub rule and marking sub-runs"
# http://www.adobe.com/devnet/opentype/afdko/topic_feature_file_syntax.html
lookup CNTXT_LIGS {
substitute f i by f_i;
substitute c t by c_t;
} CNTXT_LIGS;
lookup CNTXT_SUB {
substitute n by n.end;
substitute s by s.end;
} CNTXT_SUB;
feature test {
substitute [a e i o u] f' lookup CNTXT_LIGS i' n' lookup CNTXT_SUB;
substitute [a e i o u] c' lookup CNTXT_LIGS t' s' lookup CNTXT_SUB;
} test;