[feaLib] Simplify parser API by removing the ignore_comments option

https://github.com/fonttools/fonttools/pull/879#discussion_r104947968
https://github.com/fonttools/fonttools/issues/829
This commit is contained in:
Sascha Brawer 2017-03-09 14:38:51 +01:00
parent ac2762f0f3
commit bc0670f53f
3 changed files with 25 additions and 34 deletions

View File

@ -14,10 +14,8 @@ log = logging.getLogger(__name__)
class Parser(object):
extensions = {}
ast = ast
ignore_comments = True
def __init__(self, featurefile, glyphMap):
self.glyphMap_ = glyphMap
@ -791,7 +789,7 @@ class Parser(object):
def parse_table_GDEF_(self, table):
statements = table.statements
while self.next_token_ != "}" or (not self.ignore_comments and len(self.cur_comments_)):
while self.next_token_ != "}" or self.cur_comments_:
self.advance_lexer_(comments=True)
if self.cur_token_type_ is Lexer.COMMENT:
statements.append(self.ast.Comment(self.cur_token_location_, self.cur_token_))
@ -813,7 +811,7 @@ class Parser(object):
def parse_table_head_(self, table):
statements = table.statements
while self.next_token_ != "}" or (not self.ignore_comments and len(self.cur_comments_)):
while self.next_token_ != "}" or self.cur_comments_:
self.advance_lexer_(comments=True)
if self.cur_token_type_ is Lexer.COMMENT:
statements.append(self.ast.Comment(self.cur_token_location_, self.cur_token_))
@ -828,7 +826,7 @@ class Parser(object):
def parse_table_hhea_(self, table):
statements = table.statements
fields = ("CaretOffset", "Ascender", "Descender", "LineGap")
while self.next_token_ != "}" or (not self.ignore_comments and len(self.cur_comments_)):
while self.next_token_ != "}" or self.cur_comments_:
self.advance_lexer_(comments=True)
if self.cur_token_type_ is Lexer.COMMENT:
statements.append(self.ast.Comment(self.cur_token_location_, self.cur_token_))
@ -849,7 +847,7 @@ class Parser(object):
def parse_table_vhea_(self, table):
statements = table.statements
fields = ("VertTypoAscender", "VertTypoDescender", "VertTypoLineGap")
while self.next_token_ != "}" or (not self.ignore_comments and len(self.cur_comments_)):
while self.next_token_ != "}" or self.cur_comments_:
self.advance_lexer_(comments=True)
if self.cur_token_type_ is Lexer.COMMENT:
statements.append(self.ast.Comment(self.cur_token_location_, self.cur_token_))
@ -869,7 +867,7 @@ class Parser(object):
def parse_table_name_(self, table):
statements = table.statements
while self.next_token_ != "}" or (not self.ignore_comments and len(self.cur_comments_)):
while self.next_token_ != "}" or self.cur_comments_:
self.advance_lexer_(comments=True)
if self.cur_token_type_ is Lexer.COMMENT:
statements.append(self.ast.Comment(self.cur_token_location_, self.cur_token_))
@ -954,7 +952,7 @@ class Parser(object):
def parse_table_BASE_(self, table):
statements = table.statements
while self.next_token_ != "}" or (not self.ignore_comments and len(self.cur_comments_)):
while self.next_token_ != "}" or self.cur_comments_:
self.advance_lexer_(comments=True)
if self.cur_token_type_ is Lexer.COMMENT:
statements.append(self.ast.Comment(self.cur_token_location_, self.cur_token_))
@ -981,7 +979,7 @@ class Parser(object):
"winAscent", "winDescent", "XHeight", "CapHeight",
"WeightClass", "WidthClass", "LowerOpSize", "UpperOpSize")
ranges = ("UnicodeRange", "CodePageRange")
while self.next_token_ != "}" or (not self.ignore_comments and len(self.cur_comments_)):
while self.next_token_ != "}" or self.cur_comments_:
self.advance_lexer_(comments=True)
if self.cur_token_type_ is Lexer.COMMENT:
statements.append(self.ast.Comment(self.cur_token_location_, self.cur_token_))
@ -1192,7 +1190,7 @@ class Parser(object):
symtab.enter_scope()
statements = block.statements
while self.next_token_ != "}" or (not self.ignore_comments and len(self.cur_comments_)):
while self.next_token_ != "}" or self.cur_comments_:
self.advance_lexer_(comments=True)
if self.cur_token_type_ is Lexer.COMMENT:
statements.append(self.ast.Comment(self.cur_token_location_, self.cur_token_))
@ -1369,8 +1367,8 @@ class Parser(object):
return self.cur_token_
raise FeatureLibError("Expected a string", self.cur_token_location_)
def advance_lexer_(self, comments = False):
if not self.ignore_comments and comments and len(self.cur_comments_):
def advance_lexer_(self, comments=False):
if comments and self.cur_comments_:
self.cur_token_type_ = Lexer.COMMENT
self.cur_token_, self.cur_token_location_ = self.cur_comments_.pop(0)
return

View File

@ -138,27 +138,21 @@ class BuilderTest(unittest.TestCase):
def check_fea2fea_file(self, name, base=None, parser=Parser):
font = makeTTFont()
fname = (name + ".fea") if '.' not in name else name
temp = parser.ignore_comments
parser.ignore_comments = False
try:
p = parser(self.getpath(fname), glyphMap=font.getReverseGlyphMap())
doc = p.parse()
actual = self.normal_fea(doc.asFea().split("\n"))
p = parser(self.getpath(fname), glyphMap=font.getReverseGlyphMap())
doc = p.parse()
actual = self.normal_fea(doc.asFea().split("\n"))
with open(self.getpath(base or fname), "r", encoding="utf-8") as ofile:
expected = self.normal_fea(ofile.readlines())
with open(self.getpath(base or fname), "r", encoding="utf-8") as ofile:
expected = self.normal_fea(ofile.readlines())
if expected != actual:
fname = name.rsplit(".", 1)[0] + ".fea"
for line in difflib.unified_diff(
expected, actual,
fromfile=fname + " (expected)",
tofile=fname + " (actual)"):
sys.stderr.write(line+"\n")
self.fail("Fea2Fea output is different from expected. "
"Generated:\n{}\n".format("\n".join(actual)))
finally:
parser.ignore_comments = temp
if expected != actual:
fname = name.rsplit(".", 1)[0] + ".fea"
for line in difflib.unified_diff(
expected, actual,
fromfile=fname + " (expected)",
tofile=fname + " (actual)"):
sys.stderr.write(line+"\n")
self.fail("Fea2Fea output is different from expected. "
"Generated:\n{}\n".format("\n".join(actual)))
def normal_fea(self, lines):
output = []

View File

@ -1246,7 +1246,7 @@ class ParserTest(unittest.TestCase):
def test_substitute_lookups(self): # GSUB LookupType 6
doc = Parser(self.getpath("spec5fi1.fea"), GLYPHMAP).parse()
[langsys, ligs, sub, feature] = doc.statements
[_, _, _, langsys, ligs, sub, feature] = doc.statements
self.assertEqual(feature.statements[0].lookups, [ligs, None, sub])
self.assertEqual(feature.statements[1].lookups, [ligs, None, sub])
@ -1463,7 +1463,6 @@ class ParserTest(unittest.TestCase):
def parse(self, text, glyphMap=GLYPHMAP):
featurefile = UnicodeIO(text)
p = Parser(featurefile, glyphMap)
p.ignore_comments = False
return p.parse()
@staticmethod