[feaLib.parser] Add option to not follow includes

Currently, the feature file parser always resolves included files,
parses their content and inserts it in the resulting AST. The original
`include` statement is lost.

This commit introduces an option to not follow inclusions. Instead, the
output AST will contain an `include` statement. This allows to process a
feature file on its own, and allows to round-trip it.

For example in glyphsLib, when going from a UFO to a .glyphs file, a
UFO feature file will be sliced up into Glyphs.app classes (e.g. a
GSFeaturePrefix with code `include(../family.fea);`) and when going back
from .glyphs to UFO, the feature file will be patched back together.
This commit is contained in:
Jany Belluz 2018-01-22 18:27:36 +00:00
parent d6a5a489c0
commit 89979dea04
4 changed files with 71 additions and 6 deletions

View File

@ -591,6 +591,22 @@ class IgnoreSubstStatement(Statement):
return "ignore sub " + ", ".join(contexts) + ";"
class IncludeStatement(Statement):
def __init__(self, location, filename):
super(IncludeStatement, self).__init__(location)
self.filename = filename
def build(self):
# TODO: consider lazy-loading the including parser/lexer?
raise FeatureLibError(
"Building an include statement is not implemented yet. "
"Instead, use Parser(..., followIncludes=True) for building.",
self.location)
def asFea(self, indent=""):
return indent + "include(%s);" % self.filename
class LanguageStatement(Statement):
def __init__(self, location, language, include_default, required):
Statement.__init__(self, location)

View File

@ -240,3 +240,9 @@ class IncludingLexer(object):
def scan_anonymous_block(self, tag):
return self.lexers_[-1].scan_anonymous_block(tag)
class NonIncludingLexer(IncludingLexer):
"""Lexer that does not follow `include` statements, emits them as-is."""
def __next__(self): # Python 3
return next(self.lexers_[0])

View File

@ -1,7 +1,7 @@
from __future__ import print_function, division, absolute_import
from __future__ import unicode_literals
from fontTools.feaLib.error import FeatureLibError
from fontTools.feaLib.lexer import Lexer, IncludingLexer
from fontTools.feaLib.lexer import Lexer, IncludingLexer, NonIncludingLexer
from fontTools.misc.encodingTools import getEncoding
from fontTools.misc.py23 import *
import fontTools.feaLib.ast as ast
@ -17,7 +17,8 @@ class Parser(object):
extensions = {}
ast = ast
def __init__(self, featurefile, glyphNames=(), **kwargs):
def __init__(self, featurefile, glyphNames=(), followIncludes=True,
**kwargs):
if "glyphMap" in kwargs:
from fontTools.misc.loggingTools import deprecateArgument
deprecateArgument("glyphMap", "use 'glyphNames' (iterable) instead")
@ -42,7 +43,8 @@ class Parser(object):
self.next_token_type_, self.next_token_ = (None, None)
self.cur_comments_ = []
self.next_token_location_ = None
self.lexer_ = IncludingLexer(featurefile)
lexerClass = IncludingLexer if followIncludes else NonIncludingLexer
self.lexer_ = lexerClass(featurefile)
self.advance_lexer_(comments=True)
def parse(self):
@ -51,6 +53,8 @@ class Parser(object):
self.advance_lexer_(comments=True)
if self.cur_token_type_ is Lexer.COMMENT:
statements.append(self.ast.Comment(self.cur_token_location_, self.cur_token_))
elif self.is_cur_keyword_("include"):
statements.append(self.parse_include_())
elif self.cur_token_type_ is Lexer.GLYPHCLASS:
statements.append(self.parse_glyphclass_definition_())
elif self.is_cur_keyword_(("anon", "anonymous")):
@ -420,6 +424,13 @@ class Parser(object):
"Expected \"substitute\" or \"position\"",
self.cur_token_location_)
def parse_include_(self):
assert self.cur_token_ == "include"
location = self.cur_token_location_
filename = self.expect_filename_()
# self.expect_symbol_(";")
return ast.IncludeStatement(location, filename)
def parse_language_(self):
assert self.is_cur_keyword_("language")
location = self.cur_token_location_
@ -1318,6 +1329,13 @@ class Parser(object):
return self.cur_token_
raise FeatureLibError("Expected a CID", self.cur_token_location_)
def expect_filename_(self):
self.advance_lexer_()
if self.cur_token_type_ is not Lexer.FILENAME:
raise FeatureLibError("Expected file name",
self.cur_token_location_)
return self.cur_token_
def expect_glyph_(self):
self.advance_lexer_()
if self.cur_token_type_ is Lexer.NAME:
@ -1424,7 +1442,6 @@ class Parser(object):
else:
self.cur_token_type_, self.cur_token_, self.cur_token_location_ = (
self.next_token_type_, self.next_token_, self.next_token_location_)
self.cur_comments_ = []
while True:
try:
(self.next_token_type_, self.next_token_,

View File

@ -495,6 +495,32 @@ class ParserTest(unittest.TestCase):
"lookup L { sub [A A.sc] by a; } L;"
"feature test { ignore sub f' i', A' lookup L; } test;")
def test_include_statement(self):
doc = self.parse("""\
include(../family.fea);
include # Comment
(foo)
;
""", followIncludes=False)
s1, s2, s3 = doc.statements
self.assertEqual(type(s1), ast.IncludeStatement)
self.assertEqual(s1.filename, "../family.fea")
self.assertEqual(s1.asFea(), "include(../family.fea);")
self.assertEqual(type(s2), ast.IncludeStatement)
self.assertEqual(s2.filename, "foo")
self.assertEqual(s2.asFea(), "include(foo);")
self.assertEqual(type(s3), ast.Comment)
self.assertEqual(s3.text, "# Comment")
def test_include_statement_no_semicolon(self):
doc = self.parse("""\
include(../family.fea)
""", followIncludes=False)
s1 = doc.statements[0]
self.assertEqual(type(s1), ast.IncludeStatement)
self.assertEqual(s1.filename, "../family.fea")
self.assertEqual(s1.asFea(), "include(../family.fea);")
def test_language(self):
doc = self.parse("feature test {language DEU;} test;")
s = doc.statements[0].statements[0]
@ -1557,9 +1583,9 @@ class ParserTest(unittest.TestCase):
doc = self.parse("table %s { ;;; } %s;" % (table, table))
self.assertEqual(doc.statements[0].statements, [])
def parse(self, text, glyphNames=GLYPHNAMES):
def parse(self, text, glyphNames=GLYPHNAMES, followIncludes=True):
featurefile = UnicodeIO(text)
p = Parser(featurefile, glyphNames)
p = Parser(featurefile, glyphNames, followIncludes=followIncludes)
return p.parse()
@staticmethod