2015-08-01 12:35:22 +02:00
|
|
|
from __future__ import print_function, division, absolute_import
|
|
|
|
from __future__ import unicode_literals
|
2015-08-21 17:09:46 +02:00
|
|
|
from fontTools.feaLib.error import FeatureLibError
|
2015-08-01 12:35:22 +02:00
|
|
|
from fontTools.feaLib.lexer import Lexer, IncludingLexer
|
2015-08-01 14:49:19 +02:00
|
|
|
import fontTools.feaLib.ast as ast
|
2015-08-01 17:34:02 +02:00
|
|
|
import os
|
|
|
|
import re
|
2015-08-01 14:49:19 +02:00
|
|
|
|
2015-08-01 12:35:22 +02:00
|
|
|
|
|
|
|
class Parser(object):
|
|
|
|
def __init__(self, path):
|
2015-08-01 14:49:19 +02:00
|
|
|
self.doc_ = ast.FeatureFile()
|
2015-08-11 12:53:30 +02:00
|
|
|
self.anchors_ = SymbolTable()
|
2015-08-03 09:37:27 +02:00
|
|
|
self.glyphclasses_ = SymbolTable()
|
2015-08-11 10:59:26 +02:00
|
|
|
self.lookups_ = SymbolTable()
|
2015-08-03 09:37:27 +02:00
|
|
|
self.valuerecords_ = SymbolTable()
|
2015-08-11 10:59:26 +02:00
|
|
|
self.symbol_tables_ = {
|
2015-08-11 12:53:30 +02:00
|
|
|
self.anchors_, self.glyphclasses_,
|
|
|
|
self.lookups_, self.valuerecords_
|
2015-08-11 10:59:26 +02:00
|
|
|
}
|
2015-08-01 12:35:22 +02:00
|
|
|
self.next_token_type_, self.next_token_ = (None, None)
|
|
|
|
self.next_token_location_ = None
|
|
|
|
self.lexer_ = IncludingLexer(path)
|
|
|
|
self.advance_lexer_()
|
|
|
|
|
|
|
|
def parse(self):
|
2015-08-04 11:01:04 +02:00
|
|
|
statements = self.doc_.statements
|
2015-08-01 12:35:22 +02:00
|
|
|
while self.next_token_type_ is not None:
|
2015-08-01 17:34:02 +02:00
|
|
|
self.advance_lexer_()
|
|
|
|
if self.cur_token_type_ is Lexer.GLYPHCLASS:
|
2015-08-04 11:01:04 +02:00
|
|
|
statements.append(self.parse_glyphclass_definition_())
|
2015-08-11 12:53:30 +02:00
|
|
|
elif self.is_cur_keyword_("anchorDef"):
|
|
|
|
statements.append(self.parse_anchordef_())
|
2015-08-01 17:34:02 +02:00
|
|
|
elif self.is_cur_keyword_("languagesystem"):
|
2015-08-11 12:55:09 +02:00
|
|
|
statements.append(self.parse_languagesystem_())
|
2015-08-11 10:59:26 +02:00
|
|
|
elif self.is_cur_keyword_("lookup"):
|
|
|
|
statements.append(self.parse_lookup_(vertical=False))
|
2015-12-08 17:04:21 +01:00
|
|
|
elif self.is_cur_keyword_("markClass"):
|
2015-12-12 12:54:23 +01:00
|
|
|
statements.append(self.parse_markClass_())
|
2015-08-01 17:34:02 +02:00
|
|
|
elif self.is_cur_keyword_("feature"):
|
2015-08-11 10:19:39 +02:00
|
|
|
statements.append(self.parse_feature_block_())
|
2016-01-07 16:39:35 +01:00
|
|
|
elif self.is_cur_keyword_("table"):
|
|
|
|
statements.append(self.parse_table_())
|
2015-08-11 12:53:30 +02:00
|
|
|
elif self.is_cur_keyword_("valueRecordDef"):
|
|
|
|
statements.append(
|
|
|
|
self.parse_valuerecord_definition_(vertical=False))
|
2015-08-01 17:34:02 +02:00
|
|
|
else:
|
2015-12-08 17:04:21 +01:00
|
|
|
raise FeatureLibError(
|
|
|
|
"Expected feature, languagesystem, lookup, markClass, "
|
2016-01-07 16:39:35 +01:00
|
|
|
"table, or glyph class definition",
|
2015-12-08 17:04:21 +01:00
|
|
|
self.cur_token_location_)
|
2015-08-01 12:35:22 +02:00
|
|
|
return self.doc_
|
|
|
|
|
2015-12-07 22:48:10 +01:00
|
|
|
def parse_anchor_(self):
|
|
|
|
self.expect_symbol_("<")
|
|
|
|
self.expect_keyword_("anchor")
|
|
|
|
location = self.cur_token_location_
|
|
|
|
|
|
|
|
if self.next_token_ == "NULL":
|
|
|
|
self.expect_keyword_("NULL")
|
|
|
|
self.expect_symbol_(">")
|
|
|
|
return None
|
|
|
|
|
|
|
|
if self.next_token_type_ == Lexer.NAME:
|
|
|
|
name = self.expect_name_()
|
|
|
|
anchordef = self.anchors_.resolve(name)
|
|
|
|
if anchordef is None:
|
|
|
|
raise FeatureLibError(
|
|
|
|
'Unknown anchor "%s"' % name,
|
|
|
|
self.cur_token_location_)
|
|
|
|
self.expect_symbol_(">")
|
|
|
|
return ast.Anchor(location, anchordef.x, anchordef.y,
|
|
|
|
anchordef.contourpoint,
|
|
|
|
xDeviceTable=None, yDeviceTable=None)
|
|
|
|
|
|
|
|
x, y = self.expect_number_(), self.expect_number_()
|
|
|
|
|
|
|
|
contourpoint = None
|
|
|
|
if self.next_token_ == "contourpoint":
|
|
|
|
self.expect_keyword_("contourpoint")
|
|
|
|
contourpoint = self.expect_number_()
|
|
|
|
|
|
|
|
if self.next_token_ == "<":
|
|
|
|
xDeviceTable = self.parse_device_()
|
|
|
|
yDeviceTable = self.parse_device_()
|
|
|
|
else:
|
|
|
|
xDeviceTable, yDeviceTable = None, None
|
|
|
|
|
|
|
|
self.expect_symbol_(">")
|
|
|
|
return ast.Anchor(location, x, y, contourpoint,
|
|
|
|
xDeviceTable, yDeviceTable)
|
|
|
|
|
2015-12-08 23:46:04 +01:00
|
|
|
def parse_anchor_marks_(self):
|
|
|
|
"""Parses a sequence of [<anchor> mark @MARKCLASS]*."""
|
|
|
|
anchorMarks = [] # [(ast.Anchor, markClassName)*]
|
|
|
|
while self.next_token_ == "<":
|
|
|
|
anchor = self.parse_anchor_()
|
2015-12-09 10:40:49 +01:00
|
|
|
if anchor is None and self.next_token_ != "mark":
|
|
|
|
continue # <anchor NULL> without mark, eg. in GPOS type 5
|
2015-12-08 23:46:04 +01:00
|
|
|
self.expect_keyword_("mark")
|
|
|
|
markClass = self.expect_markClass_reference_()
|
|
|
|
anchorMarks.append((anchor, markClass))
|
|
|
|
return anchorMarks
|
|
|
|
|
2015-08-11 12:53:30 +02:00
|
|
|
def parse_anchordef_(self):
|
|
|
|
assert self.is_cur_keyword_("anchorDef")
|
|
|
|
location = self.cur_token_location_
|
|
|
|
x, y = self.expect_number_(), self.expect_number_()
|
|
|
|
contourpoint = None
|
|
|
|
if self.next_token_ == "contourpoint":
|
|
|
|
self.expect_keyword_("contourpoint")
|
|
|
|
contourpoint = self.expect_number_()
|
|
|
|
name = self.expect_name_()
|
|
|
|
self.expect_symbol_(";")
|
|
|
|
anchordef = ast.AnchorDefinition(location, name, x, y, contourpoint)
|
|
|
|
self.anchors_.define(name, anchordef)
|
|
|
|
return anchordef
|
|
|
|
|
2015-12-07 17:18:18 +01:00
|
|
|
def parse_enumerate_(self, vertical):
|
|
|
|
assert self.cur_token_ in {"enumerate", "enum"}
|
|
|
|
self.advance_lexer_()
|
|
|
|
return self.parse_position_(enumerated=True, vertical=vertical)
|
|
|
|
|
2015-08-01 17:34:02 +02:00
|
|
|
def parse_glyphclass_definition_(self):
|
|
|
|
location, name = self.cur_token_location_, self.cur_token_
|
|
|
|
self.expect_symbol_("=")
|
2015-12-11 15:16:22 +01:00
|
|
|
glyphs = self.parse_glyphclass_(accept_glyphname=False).glyphSet()
|
2015-08-01 17:34:02 +02:00
|
|
|
self.expect_symbol_(";")
|
2015-08-03 09:37:27 +02:00
|
|
|
if self.glyphclasses_.resolve(name) is not None:
|
2015-08-21 17:09:46 +02:00
|
|
|
raise FeatureLibError("Glyph class @%s already defined" % name,
|
|
|
|
location)
|
2015-08-01 17:34:02 +02:00
|
|
|
glyphclass = ast.GlyphClassDefinition(location, name, glyphs)
|
2015-08-03 09:37:27 +02:00
|
|
|
self.glyphclasses_.define(name, glyphclass)
|
2015-08-01 19:58:54 +02:00
|
|
|
return glyphclass
|
2015-08-01 17:34:02 +02:00
|
|
|
|
2015-08-04 19:55:55 +02:00
|
|
|
def parse_glyphclass_(self, accept_glyphname):
|
|
|
|
if accept_glyphname and self.next_token_type_ is Lexer.NAME:
|
2015-12-11 15:16:22 +01:00
|
|
|
return ast.GlyphName(self.cur_token_location_, self.expect_name_())
|
2015-08-01 19:25:48 +02:00
|
|
|
if self.next_token_type_ is Lexer.GLYPHCLASS:
|
|
|
|
self.advance_lexer_()
|
2015-08-03 09:37:27 +02:00
|
|
|
gc = self.glyphclasses_.resolve(self.cur_token_)
|
2015-08-01 19:25:48 +02:00
|
|
|
if gc is None:
|
2015-08-21 17:09:46 +02:00
|
|
|
raise FeatureLibError(
|
|
|
|
"Unknown glyph class @%s" % self.cur_token_,
|
|
|
|
self.cur_token_location_)
|
2016-01-07 13:02:21 +01:00
|
|
|
if isinstance(gc, ast.MarkClass):
|
|
|
|
return ast.MarkClassName(self.cur_token_location_, gc)
|
|
|
|
else:
|
|
|
|
return ast.GlyphClassName(self.cur_token_location_, gc)
|
2015-08-01 19:25:48 +02:00
|
|
|
|
2015-08-01 17:34:02 +02:00
|
|
|
self.expect_symbol_("[")
|
2015-12-11 15:16:22 +01:00
|
|
|
glyphs = set()
|
|
|
|
location = self.cur_token_location_
|
2015-08-01 17:34:02 +02:00
|
|
|
while self.next_token_ != "]":
|
2015-08-01 19:25:48 +02:00
|
|
|
self.advance_lexer_()
|
|
|
|
if self.cur_token_type_ is Lexer.NAME:
|
2015-08-01 17:34:02 +02:00
|
|
|
if self.next_token_ == "-":
|
|
|
|
range_location_ = self.cur_token_location_
|
|
|
|
range_start = self.cur_token_
|
|
|
|
self.expect_symbol_("-")
|
|
|
|
range_end = self.expect_name_()
|
2015-12-11 15:16:22 +01:00
|
|
|
glyphs.update(self.make_glyph_range_(range_location_,
|
2015-08-01 17:34:02 +02:00
|
|
|
range_start,
|
|
|
|
range_end))
|
|
|
|
else:
|
2015-12-11 15:16:22 +01:00
|
|
|
glyphs.add(self.cur_token_)
|
2015-08-01 19:25:48 +02:00
|
|
|
elif self.cur_token_type_ is Lexer.GLYPHCLASS:
|
2015-08-03 09:37:27 +02:00
|
|
|
gc = self.glyphclasses_.resolve(self.cur_token_)
|
2015-08-01 19:25:48 +02:00
|
|
|
if gc is None:
|
2015-08-21 17:09:46 +02:00
|
|
|
raise FeatureLibError(
|
2015-08-01 19:25:48 +02:00
|
|
|
"Unknown glyph class @%s" % self.cur_token_,
|
|
|
|
self.cur_token_location_)
|
2015-12-11 16:28:01 +01:00
|
|
|
glyphs.update(gc.glyphSet())
|
2015-08-01 19:25:48 +02:00
|
|
|
else:
|
2015-08-21 17:09:46 +02:00
|
|
|
raise FeatureLibError(
|
2015-08-03 09:47:06 +02:00
|
|
|
"Expected glyph name, glyph range, "
|
|
|
|
"or glyph class reference",
|
|
|
|
self.cur_token_location_)
|
2015-08-01 17:34:02 +02:00
|
|
|
self.expect_symbol_("]")
|
2015-12-11 15:16:22 +01:00
|
|
|
return ast.GlyphClass(location, glyphs)
|
2015-08-01 17:34:02 +02:00
|
|
|
|
2015-12-11 16:28:01 +01:00
|
|
|
def parse_class_name_(self):
|
2015-12-10 13:04:16 +01:00
|
|
|
name = self.expect_class_name_()
|
|
|
|
gc = self.glyphclasses_.resolve(name)
|
|
|
|
if gc is None:
|
|
|
|
raise FeatureLibError(
|
|
|
|
"Unknown glyph class @%s" % name,
|
|
|
|
self.cur_token_location_)
|
2015-12-12 12:54:23 +01:00
|
|
|
if isinstance(gc, ast.MarkClass):
|
2015-12-11 16:28:01 +01:00
|
|
|
return ast.MarkClassName(self.cur_token_location_, gc)
|
|
|
|
else:
|
|
|
|
return ast.GlyphClassName(self.cur_token_location_, gc)
|
2015-12-10 13:04:16 +01:00
|
|
|
|
2015-08-05 10:41:04 +02:00
|
|
|
def parse_glyph_pattern_(self):
|
2015-08-11 12:22:07 +02:00
|
|
|
prefix, glyphs, lookups, suffix = ([], [], [], [])
|
2015-12-09 22:56:24 +01:00
|
|
|
while (self.next_token_ not in {"by", "from", ";", "<"} and
|
|
|
|
self.next_token_type_ != Lexer.NUMBER):
|
2015-12-14 12:20:57 +01:00
|
|
|
gc = self.parse_glyphclass_(accept_glyphname=True)
|
2015-08-04 19:55:55 +02:00
|
|
|
marked = False
|
|
|
|
if self.next_token_ == "'":
|
|
|
|
self.expect_symbol_("'")
|
2015-08-05 10:41:04 +02:00
|
|
|
marked = True
|
2015-08-04 19:55:55 +02:00
|
|
|
if marked:
|
2015-08-05 10:41:04 +02:00
|
|
|
glyphs.append(gc)
|
|
|
|
elif glyphs:
|
|
|
|
suffix.append(gc)
|
2015-08-04 19:55:55 +02:00
|
|
|
else:
|
2015-08-05 10:41:04 +02:00
|
|
|
prefix.append(gc)
|
2015-08-11 12:22:07 +02:00
|
|
|
|
|
|
|
lookup = None
|
|
|
|
if self.next_token_ == "lookup":
|
|
|
|
self.expect_keyword_("lookup")
|
|
|
|
if not marked:
|
2015-08-21 17:09:46 +02:00
|
|
|
raise FeatureLibError(
|
|
|
|
"Lookups can only follow marked glyphs",
|
|
|
|
self.cur_token_location_)
|
2015-08-11 12:22:07 +02:00
|
|
|
lookup_name = self.expect_name_()
|
|
|
|
lookup = self.lookups_.resolve(lookup_name)
|
|
|
|
if lookup is None:
|
2015-08-21 17:09:46 +02:00
|
|
|
raise FeatureLibError(
|
|
|
|
'Unknown lookup "%s"' % lookup_name,
|
|
|
|
self.cur_token_location_)
|
2015-08-11 12:22:07 +02:00
|
|
|
if marked:
|
|
|
|
lookups.append(lookup)
|
|
|
|
|
2015-08-05 10:41:04 +02:00
|
|
|
if not glyphs and not suffix: # eg., "sub f f i by"
|
2015-08-11 12:22:07 +02:00
|
|
|
assert lookups == []
|
|
|
|
return ([], prefix, [None] * len(prefix), [])
|
2015-08-05 10:41:04 +02:00
|
|
|
else:
|
2015-08-11 12:22:07 +02:00
|
|
|
return (prefix, glyphs, lookups, suffix)
|
2015-08-05 10:41:04 +02:00
|
|
|
|
|
|
|
def parse_ignore_(self):
|
|
|
|
assert self.is_cur_keyword_("ignore")
|
|
|
|
location = self.cur_token_location_
|
|
|
|
self.advance_lexer_()
|
|
|
|
if self.cur_token_ in ["substitute", "sub"]:
|
2015-08-11 12:22:07 +02:00
|
|
|
prefix, glyphs, lookups, suffix = self.parse_glyph_pattern_()
|
2015-08-05 10:41:04 +02:00
|
|
|
self.expect_symbol_(";")
|
|
|
|
return ast.IgnoreSubstitutionRule(location, prefix, glyphs, suffix)
|
2015-08-21 17:09:46 +02:00
|
|
|
raise FeatureLibError(
|
|
|
|
"Expected \"substitute\"", self.next_token_location_)
|
2015-08-05 10:41:04 +02:00
|
|
|
|
2015-08-10 16:30:10 +02:00
|
|
|
def parse_language_(self):
|
|
|
|
assert self.is_cur_keyword_("language")
|
2015-09-08 09:26:24 +02:00
|
|
|
location = self.cur_token_location_
|
|
|
|
language = self.expect_language_tag_()
|
2015-08-10 16:30:10 +02:00
|
|
|
include_default, required = (True, False)
|
|
|
|
if self.next_token_ in {"exclude_dflt", "include_dflt"}:
|
|
|
|
include_default = (self.expect_name_() == "include_dflt")
|
|
|
|
if self.next_token_ == "required":
|
|
|
|
self.expect_keyword_("required")
|
|
|
|
required = True
|
|
|
|
self.expect_symbol_(";")
|
2015-09-07 21:34:10 +02:00
|
|
|
return ast.LanguageStatement(location, language,
|
2015-08-10 16:30:10 +02:00
|
|
|
include_default, required)
|
|
|
|
|
2016-01-07 16:39:35 +01:00
|
|
|
def parse_ligatureCaretByPos_(self):
|
|
|
|
assert self.is_cur_keyword_("LigatureCaretByPos")
|
|
|
|
location = self.cur_token_location_
|
|
|
|
glyphs = self.parse_glyphclass_(accept_glyphname=True)
|
|
|
|
carets = {self.expect_number_()}
|
|
|
|
while self.next_token_ != ";":
|
|
|
|
carets.add(self.expect_number_())
|
|
|
|
self.expect_symbol_(";")
|
|
|
|
return ast.LigatureCaretByPosStatement(location, glyphs, carets)
|
|
|
|
|
2015-08-11 10:59:26 +02:00
|
|
|
def parse_lookup_(self, vertical):
|
|
|
|
assert self.is_cur_keyword_("lookup")
|
|
|
|
location, name = self.cur_token_location_, self.expect_name_()
|
|
|
|
|
|
|
|
if self.next_token_ == ";":
|
|
|
|
lookup = self.lookups_.resolve(name)
|
|
|
|
if lookup is None:
|
2015-08-21 17:09:46 +02:00
|
|
|
raise FeatureLibError("Unknown lookup \"%s\"" % name,
|
|
|
|
self.cur_token_location_)
|
2015-08-11 10:59:26 +02:00
|
|
|
self.expect_symbol_(";")
|
|
|
|
return ast.LookupReferenceStatement(location, lookup)
|
|
|
|
|
|
|
|
use_extension = False
|
|
|
|
if self.next_token_ == "useExtension":
|
|
|
|
self.expect_keyword_("useExtension")
|
|
|
|
use_extension = True
|
|
|
|
|
|
|
|
block = ast.LookupBlock(location, name, use_extension)
|
|
|
|
self.parse_block_(block, vertical)
|
|
|
|
self.lookups_.define(name, block)
|
|
|
|
return block
|
|
|
|
|
2015-12-10 13:04:16 +01:00
|
|
|
def parse_lookupflag_(self):
|
|
|
|
assert self.is_cur_keyword_("lookupflag")
|
|
|
|
location = self.cur_token_location_
|
|
|
|
|
|
|
|
# format B: "lookupflag 6;"
|
|
|
|
if self.next_token_type_ == Lexer.NUMBER:
|
|
|
|
value = self.expect_number_()
|
|
|
|
self.expect_symbol_(";")
|
|
|
|
return ast.LookupFlagStatement(location, value, None, None)
|
|
|
|
|
|
|
|
# format A: "lookupflag RightToLeft MarkAttachmentType @M;"
|
|
|
|
value, markAttachment, markFilteringSet = 0, None, None
|
|
|
|
flags = {
|
|
|
|
"RightToLeft": 1, "IgnoreBaseGlyphs": 2,
|
|
|
|
"IgnoreLigatures": 4, "IgnoreMarks": 8
|
|
|
|
}
|
|
|
|
seen = set()
|
|
|
|
while self.next_token_ != ";":
|
|
|
|
if self.next_token_ in seen:
|
|
|
|
raise FeatureLibError(
|
|
|
|
"%s can be specified only once" % self.next_token_,
|
|
|
|
self.next_token_location_)
|
|
|
|
seen.add(self.next_token_)
|
|
|
|
if self.next_token_ == "MarkAttachmentType":
|
|
|
|
self.expect_keyword_("MarkAttachmentType")
|
2015-12-11 16:28:01 +01:00
|
|
|
markAttachment = self.parse_class_name_()
|
2015-12-10 15:54:12 +01:00
|
|
|
elif self.next_token_ == "UseMarkFilteringSet":
|
2015-12-10 13:04:16 +01:00
|
|
|
self.expect_keyword_("UseMarkFilteringSet")
|
2015-12-11 16:28:01 +01:00
|
|
|
markFilteringSet = self.parse_class_name_()
|
2015-12-10 13:04:16 +01:00
|
|
|
elif self.next_token_ in flags:
|
|
|
|
value = value | flags[self.expect_name_()]
|
|
|
|
else:
|
|
|
|
raise FeatureLibError(
|
|
|
|
'"%s" is not a recognized lookupflag' % self.next_token_,
|
|
|
|
self.next_token_location_)
|
|
|
|
self.expect_symbol_(";")
|
|
|
|
return ast.LookupFlagStatement(location, value,
|
|
|
|
markAttachment, markFilteringSet)
|
|
|
|
|
2015-12-08 17:04:21 +01:00
|
|
|
def parse_markClass_(self):
|
|
|
|
assert self.is_cur_keyword_("markClass")
|
|
|
|
location = self.cur_token_location_
|
2015-12-12 12:54:23 +01:00
|
|
|
glyphs = self.parse_glyphclass_(accept_glyphname=True)
|
2015-12-08 17:04:21 +01:00
|
|
|
anchor = self.parse_anchor_()
|
2015-12-08 19:04:42 +01:00
|
|
|
name = self.expect_class_name_()
|
2015-12-08 17:04:21 +01:00
|
|
|
self.expect_symbol_(";")
|
|
|
|
markClass = self.doc_.markClasses.get(name)
|
|
|
|
if markClass is None:
|
2015-12-12 12:54:23 +01:00
|
|
|
markClass = ast.MarkClass(name)
|
2015-12-08 17:04:21 +01:00
|
|
|
self.doc_.markClasses[name] = markClass
|
2015-12-12 12:54:23 +01:00
|
|
|
self.glyphclasses_.define(name, markClass)
|
|
|
|
mcdef = ast.MarkClassDefinition(location, markClass, anchor, glyphs)
|
|
|
|
markClass.addDefinition(mcdef)
|
|
|
|
return mcdef
|
2015-12-08 17:04:21 +01:00
|
|
|
|
2015-12-07 17:18:18 +01:00
|
|
|
def is_next_glyphclass_(self):
|
|
|
|
return (self.next_token_ == "[" or
|
|
|
|
self.next_token_type_ in (Lexer.GLYPHCLASS, Lexer.NAME))
|
|
|
|
|
|
|
|
def parse_position_(self, enumerated, vertical):
|
2015-12-04 11:16:43 +01:00
|
|
|
assert self.cur_token_ in {"position", "pos"}
|
2015-12-08 19:04:42 +01:00
|
|
|
if self.next_token_ == "cursive": # GPOS type 3
|
2015-12-08 23:46:04 +01:00
|
|
|
return self.parse_position_cursive_(enumerated, vertical)
|
|
|
|
elif self.next_token_ == "base": # GPOS type 4
|
|
|
|
return self.parse_position_base_(enumerated, vertical)
|
2015-12-09 10:40:49 +01:00
|
|
|
elif self.next_token_ == "ligature": # GPOS type 5
|
|
|
|
return self.parse_position_ligature_(enumerated, vertical)
|
2015-12-09 17:14:13 +01:00
|
|
|
elif self.next_token_ == "mark": # GPOS type 6
|
|
|
|
return self.parse_position_mark_(enumerated, vertical)
|
2015-12-08 19:04:42 +01:00
|
|
|
|
2015-12-08 23:46:04 +01:00
|
|
|
location = self.cur_token_location_
|
2015-12-09 22:56:24 +01:00
|
|
|
prefix, glyphs, lookups, suffix = self.parse_glyph_pattern_()
|
2015-12-07 17:18:18 +01:00
|
|
|
gc2, value2 = None, None
|
2015-12-09 22:56:24 +01:00
|
|
|
if not prefix and len(glyphs) == 2 and not suffix and not any(lookups):
|
|
|
|
# Pair positioning, format B: 'pos' glyphs gc2 value1
|
|
|
|
gc2 = glyphs[1]
|
|
|
|
glyphs = [glyphs[0]]
|
|
|
|
|
|
|
|
if prefix or len(glyphs) > 1 or suffix or any(lookups):
|
|
|
|
# GPOS type 8: Chaining contextual positioning
|
|
|
|
self.expect_symbol_(";")
|
|
|
|
return ast.ChainContextPosStatement(
|
|
|
|
location, prefix, glyphs, suffix, lookups)
|
|
|
|
|
2015-12-07 17:18:18 +01:00
|
|
|
value1 = self.parse_valuerecord_(vertical)
|
|
|
|
if self.next_token_ != ";" and gc2 is None:
|
|
|
|
# Pair positioning, format A: 'pos' gc1 value1 gc2 value2
|
2015-12-14 12:20:57 +01:00
|
|
|
gc2 = self.parse_glyphclass_(accept_glyphname=True)
|
2015-12-07 17:18:18 +01:00
|
|
|
value2 = self.parse_valuerecord_(vertical)
|
2015-12-04 11:16:43 +01:00
|
|
|
self.expect_symbol_(";")
|
2015-12-09 22:56:24 +01:00
|
|
|
|
2015-12-07 17:18:18 +01:00
|
|
|
if gc2 is None:
|
|
|
|
if enumerated:
|
|
|
|
raise FeatureLibError(
|
|
|
|
'"enumerate" is only allowed with pair positionings',
|
|
|
|
self.cur_token_location_)
|
2015-12-09 22:56:24 +01:00
|
|
|
return ast.SinglePosStatement(location, glyphs[0], value1)
|
2015-12-07 17:18:18 +01:00
|
|
|
else:
|
2015-12-09 13:58:05 +01:00
|
|
|
return ast.PairPosStatement(location, enumerated,
|
2015-12-09 22:56:24 +01:00
|
|
|
glyphs[0], value1, gc2, value2)
|
2015-12-04 11:16:43 +01:00
|
|
|
|
2015-12-08 23:46:04 +01:00
|
|
|
def parse_position_cursive_(self, enumerated, vertical):
|
|
|
|
location = self.cur_token_location_
|
|
|
|
self.expect_keyword_("cursive")
|
|
|
|
if enumerated:
|
|
|
|
raise FeatureLibError(
|
|
|
|
'"enumerate" is not allowed with '
|
|
|
|
'cursive attachment positioning',
|
|
|
|
location)
|
2015-12-11 15:16:22 +01:00
|
|
|
glyphclass = self.parse_glyphclass_(accept_glyphname=True).glyphSet()
|
2015-12-08 23:46:04 +01:00
|
|
|
entryAnchor = self.parse_anchor_()
|
|
|
|
exitAnchor = self.parse_anchor_()
|
|
|
|
self.expect_symbol_(";")
|
2015-12-09 13:58:05 +01:00
|
|
|
return ast.CursivePosStatement(
|
2015-12-08 23:46:04 +01:00
|
|
|
location, glyphclass, entryAnchor, exitAnchor)
|
|
|
|
|
|
|
|
def parse_position_base_(self, enumerated, vertical):
|
|
|
|
location = self.cur_token_location_
|
|
|
|
self.expect_keyword_("base")
|
|
|
|
if enumerated:
|
|
|
|
raise FeatureLibError(
|
|
|
|
'"enumerate" is not allowed with '
|
|
|
|
'mark-to-base attachment positioning',
|
|
|
|
location)
|
2015-12-11 15:16:22 +01:00
|
|
|
base = self.parse_glyphclass_(accept_glyphname=True).glyphSet()
|
2015-12-08 23:46:04 +01:00
|
|
|
marks = self.parse_anchor_marks_()
|
|
|
|
self.expect_symbol_(";")
|
2015-12-09 13:58:05 +01:00
|
|
|
return ast.MarkBasePosStatement(location, base, marks)
|
2015-12-08 23:46:04 +01:00
|
|
|
|
2015-12-09 10:40:49 +01:00
|
|
|
def parse_position_ligature_(self, enumerated, vertical):
|
|
|
|
location = self.cur_token_location_
|
|
|
|
self.expect_keyword_("ligature")
|
|
|
|
if enumerated:
|
|
|
|
raise FeatureLibError(
|
|
|
|
'"enumerate" is not allowed with '
|
|
|
|
'mark-to-ligature attachment positioning',
|
|
|
|
location)
|
2015-12-11 15:16:22 +01:00
|
|
|
ligatures = self.parse_glyphclass_(accept_glyphname=True).glyphSet()
|
2015-12-09 10:40:49 +01:00
|
|
|
marks = [self.parse_anchor_marks_()]
|
|
|
|
while self.next_token_ == "ligComponent":
|
|
|
|
self.expect_keyword_("ligComponent")
|
|
|
|
marks.append(self.parse_anchor_marks_())
|
|
|
|
self.expect_symbol_(";")
|
2015-12-09 13:58:05 +01:00
|
|
|
return ast.MarkLigPosStatement(location, ligatures, marks)
|
2015-12-09 10:40:49 +01:00
|
|
|
|
2015-12-09 17:14:13 +01:00
|
|
|
def parse_position_mark_(self, enumerated, vertical):
|
|
|
|
location = self.cur_token_location_
|
|
|
|
self.expect_keyword_("mark")
|
|
|
|
if enumerated:
|
|
|
|
raise FeatureLibError(
|
|
|
|
'"enumerate" is not allowed with '
|
|
|
|
'mark-to-mark attachment positioning',
|
|
|
|
location)
|
2015-12-11 15:16:22 +01:00
|
|
|
baseMarks = self.parse_glyphclass_(accept_glyphname=True).glyphSet()
|
2015-12-09 17:14:13 +01:00
|
|
|
marks = self.parse_anchor_marks_()
|
|
|
|
self.expect_symbol_(";")
|
|
|
|
return ast.MarkMarkPosStatement(location, baseMarks, marks)
|
|
|
|
|
2015-08-10 11:30:47 +02:00
|
|
|
def parse_script_(self):
|
|
|
|
assert self.is_cur_keyword_("script")
|
2015-09-08 09:26:24 +02:00
|
|
|
location, script = self.cur_token_location_, self.expect_script_tag_()
|
2015-08-10 11:30:47 +02:00
|
|
|
self.expect_symbol_(";")
|
|
|
|
return ast.ScriptStatement(location, script)
|
|
|
|
|
2015-08-05 10:41:04 +02:00
|
|
|
def parse_substitute_(self):
|
2015-12-03 13:05:42 +01:00
|
|
|
assert self.cur_token_ in {"substitute", "sub", "reversesub", "rsub"}
|
2015-08-05 10:41:04 +02:00
|
|
|
location = self.cur_token_location_
|
2015-12-03 13:05:42 +01:00
|
|
|
reverse = self.cur_token_ in {"reversesub", "rsub"}
|
2015-08-11 12:22:07 +02:00
|
|
|
old_prefix, old, lookups, old_suffix = self.parse_glyph_pattern_()
|
2015-08-11 15:54:54 +02:00
|
|
|
|
2015-08-11 16:45:54 +02:00
|
|
|
new = []
|
2015-08-11 12:22:07 +02:00
|
|
|
if self.next_token_ == "by":
|
2015-08-11 15:54:54 +02:00
|
|
|
keyword = self.expect_keyword_("by")
|
2015-08-11 16:45:54 +02:00
|
|
|
while self.next_token_ != ";":
|
2015-12-11 15:16:22 +01:00
|
|
|
gc = self.parse_glyphclass_(accept_glyphname=True)
|
2016-01-06 16:15:26 +01:00
|
|
|
new.append(gc)
|
2015-08-11 15:54:54 +02:00
|
|
|
elif self.next_token_ == "from":
|
|
|
|
keyword = self.expect_keyword_("from")
|
2016-01-06 16:15:26 +01:00
|
|
|
new = [self.parse_glyphclass_(accept_glyphname=False)]
|
2015-08-11 12:22:07 +02:00
|
|
|
else:
|
2015-08-11 15:54:54 +02:00
|
|
|
keyword = None
|
2015-08-04 19:55:55 +02:00
|
|
|
self.expect_symbol_(";")
|
2015-08-11 12:22:07 +02:00
|
|
|
if len(new) is 0 and not any(lookups):
|
2015-08-21 17:09:46 +02:00
|
|
|
raise FeatureLibError(
|
2015-08-11 15:54:54 +02:00
|
|
|
'Expected "by", "from" or explicit lookup references',
|
|
|
|
self.cur_token_location_)
|
|
|
|
|
2015-09-08 10:33:07 +02:00
|
|
|
# GSUB lookup type 3: Alternate substitution.
|
|
|
|
# Format: "substitute a from [a.1 a.2 a.3];"
|
2015-08-11 15:54:54 +02:00
|
|
|
if keyword == "from":
|
2015-12-03 13:05:42 +01:00
|
|
|
if reverse:
|
|
|
|
raise FeatureLibError(
|
|
|
|
'Reverse chaining substitutions do not support "from"',
|
|
|
|
location)
|
2015-12-14 12:20:57 +01:00
|
|
|
if len(old) != 1 or len(old[0].glyphSet()) != 1:
|
2015-08-21 17:09:46 +02:00
|
|
|
raise FeatureLibError(
|
|
|
|
'Expected a single glyph before "from"',
|
|
|
|
location)
|
2015-08-11 15:54:54 +02:00
|
|
|
if len(new) != 1:
|
2015-08-21 17:09:46 +02:00
|
|
|
raise FeatureLibError(
|
|
|
|
'Expected a single glyphclass after "from"',
|
|
|
|
location)
|
2016-01-07 11:32:54 +01:00
|
|
|
return ast.AlternateSubstStatement(
|
|
|
|
location, old_prefix, old[0], old_suffix, new[0])
|
2015-08-11 15:54:54 +02:00
|
|
|
|
2015-09-07 16:10:13 +02:00
|
|
|
num_lookups = len([l for l in lookups if l is not None])
|
2015-09-08 10:33:07 +02:00
|
|
|
|
|
|
|
# GSUB lookup type 1: Single substitution.
|
|
|
|
# Format A: "substitute a by a.sc;"
|
|
|
|
# Format B: "substitute [one.fitted one.oldstyle] by one;"
|
|
|
|
# Format C: "substitute [a-d] by [A.sc-D.sc];"
|
2016-01-06 16:15:26 +01:00
|
|
|
if (not reverse and len(old) == 1 and len(new) == 1 and
|
|
|
|
num_lookups == 0):
|
2015-12-14 12:20:57 +01:00
|
|
|
glyphs = sorted(list(old[0].glyphSet()))
|
2016-01-06 16:15:26 +01:00
|
|
|
replacements = sorted(list(new[0].glyphSet()))
|
2015-09-08 10:33:07 +02:00
|
|
|
if len(replacements) == 1:
|
|
|
|
replacements = replacements * len(glyphs)
|
|
|
|
if len(glyphs) != len(replacements):
|
|
|
|
raise FeatureLibError(
|
|
|
|
'Expected a glyph class with %d elements after "by", '
|
|
|
|
'but found a glyph class with %d elements' %
|
|
|
|
(len(glyphs), len(replacements)), location)
|
2015-12-09 13:58:05 +01:00
|
|
|
return ast.SingleSubstStatement(location,
|
2016-01-06 16:15:26 +01:00
|
|
|
dict(zip(glyphs, replacements)),
|
|
|
|
old_prefix, old_suffix)
|
2015-09-08 10:33:07 +02:00
|
|
|
|
2015-09-08 12:05:44 +02:00
|
|
|
# GSUB lookup type 2: Multiple substitution.
|
|
|
|
# Format: "substitute f_f_i by f f i;"
|
2016-01-06 17:33:34 +01:00
|
|
|
if (not reverse and
|
2015-12-14 12:20:57 +01:00
|
|
|
len(old) == 1 and len(old[0].glyphSet()) == 1 and
|
2016-01-06 16:15:26 +01:00
|
|
|
len(new) > 1 and max([len(n.glyphSet()) for n in new]) == 1 and
|
2015-09-08 12:05:44 +02:00
|
|
|
num_lookups == 0):
|
2016-01-06 16:15:26 +01:00
|
|
|
return ast.MultipleSubstStatement(
|
2016-01-06 17:33:34 +01:00
|
|
|
location, old_prefix, tuple(old[0].glyphSet())[0], old_suffix,
|
2016-01-06 16:15:26 +01:00
|
|
|
tuple([list(n.glyphSet())[0] for n in new]))
|
2015-09-08 12:05:44 +02:00
|
|
|
|
2015-09-08 10:33:07 +02:00
|
|
|
# GSUB lookup type 4: Ligature substitution.
|
|
|
|
# Format: "substitute f f i by f_f_i;"
|
2016-01-07 10:31:13 +01:00
|
|
|
if (not reverse and
|
2016-01-06 16:15:26 +01:00
|
|
|
len(old) > 1 and len(new) == 1 and
|
|
|
|
len(new[0].glyphSet()) == 1 and
|
2015-09-07 16:10:13 +02:00
|
|
|
num_lookups == 0):
|
2016-01-06 16:15:26 +01:00
|
|
|
return ast.LigatureSubstStatement(
|
2016-01-07 10:31:13 +01:00
|
|
|
location, old_prefix, old, old_suffix,
|
|
|
|
list(new[0].glyphSet())[0])
|
2015-09-07 16:10:13 +02:00
|
|
|
|
2015-12-03 13:05:42 +01:00
|
|
|
# GSUB lookup type 8: Reverse chaining substitution.
|
|
|
|
if reverse:
|
|
|
|
if len(old) != 1:
|
|
|
|
raise FeatureLibError(
|
|
|
|
"In reverse chaining single substitutions, "
|
|
|
|
"only a single glyph or glyph class can be replaced",
|
|
|
|
location)
|
|
|
|
if len(new) != 1:
|
|
|
|
raise FeatureLibError(
|
|
|
|
'In reverse chaining single substitutions, '
|
|
|
|
'the replacement (after "by") must be a single glyph '
|
|
|
|
'or glyph class', location)
|
|
|
|
if num_lookups != 0:
|
|
|
|
raise FeatureLibError(
|
|
|
|
"Reverse chaining substitutions cannot call named lookups",
|
|
|
|
location)
|
2015-12-14 12:20:57 +01:00
|
|
|
glyphs = sorted(list(old[0].glyphSet()))
|
2016-01-06 16:15:26 +01:00
|
|
|
replacements = sorted(list(new[0].glyphSet()))
|
2015-12-03 13:05:42 +01:00
|
|
|
if len(replacements) == 1:
|
|
|
|
replacements = replacements * len(glyphs)
|
|
|
|
if len(glyphs) != len(replacements):
|
|
|
|
raise FeatureLibError(
|
|
|
|
'Expected a glyph class with %d elements after "by", '
|
|
|
|
'but found a glyph class with %d elements' %
|
|
|
|
(len(glyphs), len(replacements)), location)
|
2015-12-09 13:58:05 +01:00
|
|
|
return ast.ReverseChainSingleSubstStatement(
|
2015-12-03 13:05:42 +01:00
|
|
|
location, old_prefix, old_suffix,
|
|
|
|
dict(zip(glyphs, replacements)))
|
|
|
|
|
2015-12-09 13:58:05 +01:00
|
|
|
# GSUB lookup type 6: Chaining contextual substitution.
|
|
|
|
assert len(new) == 0, new
|
|
|
|
rule = ast.ChainContextSubstStatement(
|
|
|
|
location, old_prefix, old, old_suffix, lookups)
|
2015-08-04 19:55:55 +02:00
|
|
|
return rule
|
|
|
|
|
2015-08-11 15:14:47 +02:00
|
|
|
def parse_subtable_(self):
|
|
|
|
assert self.is_cur_keyword_("subtable")
|
|
|
|
location = self.cur_token_location_
|
|
|
|
self.expect_symbol_(";")
|
|
|
|
return ast.SubtableStatement(location)
|
|
|
|
|
2016-01-07 16:39:35 +01:00
|
|
|
def parse_table_(self):
|
|
|
|
assert self.is_cur_keyword_("table")
|
|
|
|
location, name = self.cur_token_location_, self.expect_tag_()
|
|
|
|
table = ast.TableBlock(location, name)
|
|
|
|
self.expect_symbol_("{")
|
|
|
|
handler = {
|
|
|
|
"GDEF": self.parse_table_GDEF_,
|
|
|
|
}.get(name)
|
|
|
|
if handler:
|
|
|
|
handler(table)
|
|
|
|
else:
|
|
|
|
raise FeatureLibError('"table %s" is not supported' % name.strip(),
|
|
|
|
location)
|
|
|
|
self.expect_symbol_("}")
|
|
|
|
end_tag = self.expect_tag_()
|
|
|
|
if end_tag != name:
|
|
|
|
raise FeatureLibError('Expected "%s"' % name.strip(),
|
|
|
|
self.cur_token_location_)
|
|
|
|
self.expect_symbol_(";")
|
|
|
|
return table
|
|
|
|
|
|
|
|
def parse_table_GDEF_(self, table):
|
|
|
|
statements = table.statements
|
|
|
|
while self.next_token_ != "}":
|
|
|
|
self.advance_lexer_()
|
|
|
|
if self.is_cur_keyword_("LigatureCaretByPos"):
|
|
|
|
statements.append(self.parse_ligatureCaretByPos_())
|
|
|
|
else:
|
|
|
|
raise FeatureLibError(
|
|
|
|
"Expected LigatureCaretByPos",
|
|
|
|
self.cur_token_location_)
|
|
|
|
|
2015-12-04 17:10:20 +01:00
|
|
|
def parse_device_(self):
|
|
|
|
result = None
|
|
|
|
self.expect_symbol_("<")
|
|
|
|
self.expect_keyword_("device")
|
|
|
|
if self.next_token_ == "NULL":
|
|
|
|
self.expect_keyword_("NULL")
|
|
|
|
else:
|
|
|
|
result = [(self.expect_number_(), self.expect_number_())]
|
|
|
|
while self.next_token_ == ",":
|
|
|
|
self.expect_symbol_(",")
|
|
|
|
result.append((self.expect_number_(), self.expect_number_()))
|
|
|
|
result = tuple(result) # make it hashable
|
|
|
|
self.expect_symbol_(">")
|
|
|
|
return result
|
|
|
|
|
2015-08-04 11:01:04 +02:00
|
|
|
def parse_valuerecord_(self, vertical):
|
|
|
|
if self.next_token_type_ is Lexer.NUMBER:
|
|
|
|
number, location = self.expect_number_(), self.cur_token_location_
|
|
|
|
if vertical:
|
2015-12-04 17:10:20 +01:00
|
|
|
val = ast.ValueRecord(location, 0, 0, 0, number,
|
|
|
|
None, None, None, None)
|
2015-08-04 11:01:04 +02:00
|
|
|
else:
|
2015-12-04 17:10:20 +01:00
|
|
|
val = ast.ValueRecord(location, 0, 0, number, 0,
|
|
|
|
None, None, None, None)
|
2015-08-04 11:01:04 +02:00
|
|
|
return val
|
|
|
|
self.expect_symbol_("<")
|
|
|
|
location = self.cur_token_location_
|
|
|
|
if self.next_token_type_ is Lexer.NAME:
|
|
|
|
name = self.expect_name_()
|
2015-12-07 17:18:18 +01:00
|
|
|
if name == "NULL":
|
|
|
|
self.expect_symbol_(">")
|
|
|
|
return None
|
2015-08-04 11:01:04 +02:00
|
|
|
vrd = self.valuerecords_.resolve(name)
|
|
|
|
if vrd is None:
|
2015-08-21 17:09:46 +02:00
|
|
|
raise FeatureLibError("Unknown valueRecordDef \"%s\"" % name,
|
|
|
|
self.cur_token_location_)
|
2015-08-04 11:01:04 +02:00
|
|
|
value = vrd.value
|
|
|
|
xPlacement, yPlacement = (value.xPlacement, value.yPlacement)
|
|
|
|
xAdvance, yAdvance = (value.xAdvance, value.yAdvance)
|
|
|
|
else:
|
|
|
|
xPlacement, yPlacement, xAdvance, yAdvance = (
|
|
|
|
self.expect_number_(), self.expect_number_(),
|
|
|
|
self.expect_number_(), self.expect_number_())
|
2015-12-04 17:10:20 +01:00
|
|
|
|
|
|
|
if self.next_token_ == "<":
|
2015-12-04 15:49:04 +01:00
|
|
|
xPlaDevice, yPlaDevice, xAdvDevice, yAdvDevice = (
|
2015-12-04 17:10:20 +01:00
|
|
|
self.parse_device_(), self.parse_device_(),
|
|
|
|
self.parse_device_(), self.parse_device_())
|
2015-12-05 08:15:05 +00:00
|
|
|
allDeltas = sorted([
|
|
|
|
delta
|
|
|
|
for size, delta
|
|
|
|
in (xPlaDevice if xPlaDevice else ()) +
|
|
|
|
(yPlaDevice if yPlaDevice else ()) +
|
|
|
|
(xAdvDevice if xAdvDevice else ()) +
|
|
|
|
(yAdvDevice if yAdvDevice else ())])
|
|
|
|
if allDeltas[0] < -128 or allDeltas[-1] > 127:
|
|
|
|
raise FeatureLibError(
|
|
|
|
"Device value out of valid range (-128..127)",
|
|
|
|
self.cur_token_location_)
|
2015-12-04 15:49:04 +01:00
|
|
|
else:
|
2015-12-04 17:10:20 +01:00
|
|
|
xPlaDevice, yPlaDevice, xAdvDevice, yAdvDevice = (
|
|
|
|
None, None, None, None)
|
|
|
|
|
2015-08-04 11:01:04 +02:00
|
|
|
self.expect_symbol_(">")
|
|
|
|
return ast.ValueRecord(
|
2015-12-04 15:49:04 +01:00
|
|
|
location, xPlacement, yPlacement, xAdvance, yAdvance,
|
|
|
|
xPlaDevice, yPlaDevice, xAdvDevice, yAdvDevice)
|
2015-08-04 11:01:04 +02:00
|
|
|
|
|
|
|
def parse_valuerecord_definition_(self, vertical):
|
|
|
|
assert self.is_cur_keyword_("valueRecordDef")
|
|
|
|
location = self.cur_token_location_
|
|
|
|
value = self.parse_valuerecord_(vertical)
|
|
|
|
name = self.expect_name_()
|
|
|
|
self.expect_symbol_(";")
|
|
|
|
vrd = ast.ValueRecordDefinition(location, name, value)
|
|
|
|
self.valuerecords_.define(name, vrd)
|
|
|
|
return vrd
|
|
|
|
|
2015-08-01 12:35:22 +02:00
|
|
|
def parse_languagesystem_(self):
|
2015-08-01 19:58:54 +02:00
|
|
|
assert self.cur_token_ == "languagesystem"
|
2015-08-01 14:49:19 +02:00
|
|
|
location = self.cur_token_location_
|
2015-09-08 09:26:24 +02:00
|
|
|
script = self.expect_script_tag_()
|
|
|
|
language = self.expect_language_tag_()
|
2015-08-01 12:35:22 +02:00
|
|
|
self.expect_symbol_(";")
|
2015-08-21 21:14:06 +02:00
|
|
|
if script == "DFLT" and language != "dflt":
|
|
|
|
raise FeatureLibError(
|
|
|
|
'For script "DFLT", the language must be "dflt"',
|
|
|
|
self.cur_token_location_)
|
2015-08-11 12:55:09 +02:00
|
|
|
return ast.LanguageSystemStatement(location, script, language)
|
2015-08-01 12:35:22 +02:00
|
|
|
|
2015-08-01 19:58:54 +02:00
|
|
|
def parse_feature_block_(self):
|
|
|
|
assert self.cur_token_ == "feature"
|
|
|
|
location = self.cur_token_location_
|
|
|
|
tag = self.expect_tag_()
|
2016-01-07 12:31:28 +01:00
|
|
|
vertical = (tag in {"vkrn", "vpal", "vhal", "valt"})
|
2015-08-11 15:28:59 +02:00
|
|
|
|
|
|
|
use_extension = False
|
|
|
|
if self.next_token_ == "useExtension":
|
|
|
|
self.expect_keyword_("useExtension")
|
|
|
|
use_extension = True
|
|
|
|
|
|
|
|
block = ast.FeatureBlock(location, tag, use_extension)
|
2015-08-11 10:19:39 +02:00
|
|
|
self.parse_block_(block, vertical)
|
|
|
|
return block
|
2015-08-03 09:37:27 +02:00
|
|
|
|
2015-08-11 10:19:39 +02:00
|
|
|
def parse_block_(self, block, vertical):
|
2015-08-01 19:58:54 +02:00
|
|
|
self.expect_symbol_("{")
|
2015-08-03 09:37:27 +02:00
|
|
|
for symtab in self.symbol_tables_:
|
|
|
|
symtab.enter_scope()
|
|
|
|
|
2015-08-04 11:01:04 +02:00
|
|
|
statements = block.statements
|
2015-08-01 19:58:54 +02:00
|
|
|
while self.next_token_ != "}":
|
|
|
|
self.advance_lexer_()
|
|
|
|
if self.cur_token_type_ is Lexer.GLYPHCLASS:
|
2015-08-04 11:01:04 +02:00
|
|
|
statements.append(self.parse_glyphclass_definition_())
|
2015-08-11 12:53:30 +02:00
|
|
|
elif self.is_cur_keyword_("anchorDef"):
|
|
|
|
statements.append(self.parse_anchordef_())
|
2015-12-07 17:18:18 +01:00
|
|
|
elif self.is_cur_keyword_({"enum", "enumerate"}):
|
|
|
|
statements.append(self.parse_enumerate_(vertical=vertical))
|
2015-08-05 10:41:04 +02:00
|
|
|
elif self.is_cur_keyword_("ignore"):
|
|
|
|
statements.append(self.parse_ignore_())
|
2015-08-10 16:30:10 +02:00
|
|
|
elif self.is_cur_keyword_("language"):
|
|
|
|
statements.append(self.parse_language_())
|
2015-08-11 10:59:26 +02:00
|
|
|
elif self.is_cur_keyword_("lookup"):
|
|
|
|
statements.append(self.parse_lookup_(vertical))
|
2015-12-10 13:04:16 +01:00
|
|
|
elif self.is_cur_keyword_("lookupflag"):
|
|
|
|
statements.append(self.parse_lookupflag_())
|
2015-12-08 17:04:21 +01:00
|
|
|
elif self.is_cur_keyword_("markClass"):
|
2015-12-12 12:54:23 +01:00
|
|
|
statements.append(self.parse_markClass_())
|
2015-12-04 11:16:43 +01:00
|
|
|
elif self.is_cur_keyword_({"pos", "position"}):
|
2015-12-07 17:18:18 +01:00
|
|
|
statements.append(
|
|
|
|
self.parse_position_(enumerated=False, vertical=vertical))
|
2015-08-10 16:30:10 +02:00
|
|
|
elif self.is_cur_keyword_("script"):
|
|
|
|
statements.append(self.parse_script_())
|
2015-12-03 13:05:42 +01:00
|
|
|
elif (self.is_cur_keyword_({"sub", "substitute",
|
|
|
|
"rsub", "reversesub"})):
|
2015-08-04 19:55:55 +02:00
|
|
|
statements.append(self.parse_substitute_())
|
2015-08-11 15:14:47 +02:00
|
|
|
elif self.is_cur_keyword_("subtable"):
|
|
|
|
statements.append(self.parse_subtable_())
|
2015-08-04 11:01:04 +02:00
|
|
|
elif self.is_cur_keyword_("valueRecordDef"):
|
|
|
|
statements.append(self.parse_valuerecord_definition_(vertical))
|
2015-08-01 19:58:54 +02:00
|
|
|
else:
|
2015-08-21 17:09:46 +02:00
|
|
|
raise FeatureLibError(
|
2015-08-11 12:53:30 +02:00
|
|
|
"Expected glyph class definition or statement",
|
2015-08-04 16:38:29 +02:00
|
|
|
self.cur_token_location_)
|
2015-08-01 19:58:54 +02:00
|
|
|
|
|
|
|
self.expect_symbol_("}")
|
2015-08-03 09:37:27 +02:00
|
|
|
for symtab in self.symbol_tables_:
|
|
|
|
symtab.exit_scope()
|
|
|
|
|
2015-08-11 10:19:39 +02:00
|
|
|
name = self.expect_name_()
|
|
|
|
if name != block.name.strip():
|
2015-08-21 17:09:46 +02:00
|
|
|
raise FeatureLibError("Expected \"%s\"" % block.name.strip(),
|
|
|
|
self.cur_token_location_)
|
2015-08-01 19:58:54 +02:00
|
|
|
self.expect_symbol_(";")
|
|
|
|
|
2015-08-01 17:34:02 +02:00
|
|
|
def is_cur_keyword_(self, k):
|
2015-12-03 13:05:42 +01:00
|
|
|
if self.cur_token_type_ is Lexer.NAME:
|
|
|
|
if isinstance(k, type("")): # basestring is gone in Python3
|
|
|
|
return self.cur_token_ == k
|
|
|
|
else:
|
|
|
|
return self.cur_token_ in k
|
|
|
|
return False
|
2015-08-01 12:35:22 +02:00
|
|
|
|
2015-12-08 19:04:42 +01:00
|
|
|
def expect_class_name_(self):
|
2015-12-08 17:04:21 +01:00
|
|
|
self.advance_lexer_()
|
|
|
|
if self.cur_token_type_ is not Lexer.GLYPHCLASS:
|
|
|
|
raise FeatureLibError("Expected @NAME", self.cur_token_location_)
|
|
|
|
return self.cur_token_
|
|
|
|
|
2015-12-08 19:04:42 +01:00
|
|
|
def expect_markClass_reference_(self):
|
|
|
|
name = self.expect_class_name_()
|
2015-12-12 12:54:23 +01:00
|
|
|
mc = self.glyphclasses_.resolve(name)
|
|
|
|
if mc is None:
|
2015-12-08 19:04:42 +01:00
|
|
|
raise FeatureLibError("Unknown markClass @%s" % name,
|
|
|
|
self.cur_token_location_)
|
2015-12-12 12:54:23 +01:00
|
|
|
if not isinstance(mc, ast.MarkClass):
|
|
|
|
raise FeatureLibError("@%s is not a markClass" % name,
|
|
|
|
self.cur_token_location_)
|
|
|
|
return mc
|
2015-12-08 19:04:42 +01:00
|
|
|
|
2015-08-01 12:35:22 +02:00
|
|
|
def expect_tag_(self):
|
|
|
|
self.advance_lexer_()
|
|
|
|
if self.cur_token_type_ is not Lexer.NAME:
|
2015-08-21 17:09:46 +02:00
|
|
|
raise FeatureLibError("Expected a tag", self.cur_token_location_)
|
2015-08-01 12:35:22 +02:00
|
|
|
if len(self.cur_token_) > 4:
|
2015-08-21 17:09:46 +02:00
|
|
|
raise FeatureLibError("Tags can not be longer than 4 characters",
|
|
|
|
self.cur_token_location_)
|
2015-08-01 12:35:22 +02:00
|
|
|
return (self.cur_token_ + " ")[:4]
|
|
|
|
|
2015-09-08 09:26:24 +02:00
|
|
|
def expect_script_tag_(self):
|
|
|
|
tag = self.expect_tag_()
|
|
|
|
if tag == "dflt":
|
|
|
|
raise FeatureLibError(
|
|
|
|
'"dflt" is not a valid script tag; use "DFLT" instead',
|
|
|
|
self.cur_token_location_)
|
|
|
|
return tag
|
|
|
|
|
|
|
|
def expect_language_tag_(self):
|
|
|
|
tag = self.expect_tag_()
|
|
|
|
if tag == "DFLT":
|
|
|
|
raise FeatureLibError(
|
|
|
|
'"DFLT" is not a valid language tag; use "dflt" instead',
|
|
|
|
self.cur_token_location_)
|
|
|
|
return tag
|
|
|
|
|
2015-08-01 12:35:22 +02:00
|
|
|
def expect_symbol_(self, symbol):
|
|
|
|
self.advance_lexer_()
|
|
|
|
if self.cur_token_type_ is Lexer.SYMBOL and self.cur_token_ == symbol:
|
|
|
|
return symbol
|
2015-08-21 17:09:46 +02:00
|
|
|
raise FeatureLibError("Expected '%s'" % symbol,
|
|
|
|
self.cur_token_location_)
|
2015-08-01 12:35:22 +02:00
|
|
|
|
2015-08-05 10:41:04 +02:00
|
|
|
def expect_keyword_(self, keyword):
|
|
|
|
self.advance_lexer_()
|
|
|
|
if self.cur_token_type_ is Lexer.NAME and self.cur_token_ == keyword:
|
|
|
|
return self.cur_token_
|
2015-08-21 17:09:46 +02:00
|
|
|
raise FeatureLibError("Expected \"%s\"" % keyword,
|
|
|
|
self.cur_token_location_)
|
2015-08-05 10:41:04 +02:00
|
|
|
|
2015-08-01 17:34:02 +02:00
|
|
|
def expect_name_(self):
|
|
|
|
self.advance_lexer_()
|
|
|
|
if self.cur_token_type_ is Lexer.NAME:
|
|
|
|
return self.cur_token_
|
2015-08-21 17:09:46 +02:00
|
|
|
raise FeatureLibError("Expected a name", self.cur_token_location_)
|
2015-08-01 17:34:02 +02:00
|
|
|
|
2015-08-04 11:01:04 +02:00
|
|
|
def expect_number_(self):
|
|
|
|
self.advance_lexer_()
|
|
|
|
if self.cur_token_type_ is Lexer.NUMBER:
|
|
|
|
return self.cur_token_
|
2015-08-21 17:09:46 +02:00
|
|
|
raise FeatureLibError("Expected a number", self.cur_token_location_)
|
2015-08-04 11:01:04 +02:00
|
|
|
|
2015-08-01 12:35:22 +02:00
|
|
|
def advance_lexer_(self):
|
|
|
|
self.cur_token_type_, self.cur_token_, self.cur_token_location_ = (
|
|
|
|
self.next_token_type_, self.next_token_, self.next_token_location_)
|
|
|
|
try:
|
|
|
|
(self.next_token_type_, self.next_token_,
|
|
|
|
self.next_token_location_) = self.lexer_.next()
|
|
|
|
except StopIteration:
|
|
|
|
self.next_token_type_, self.next_token_ = (None, None)
|
2015-08-01 17:34:02 +02:00
|
|
|
|
2015-09-04 16:22:16 +02:00
|
|
|
@staticmethod
|
|
|
|
def reverse_string_(s):
|
2015-09-10 07:13:10 +02:00
|
|
|
"""'abc' --> 'cba'"""
|
2015-09-04 16:22:16 +02:00
|
|
|
return ''.join(reversed(list(s)))
|
|
|
|
|
2015-08-01 17:34:02 +02:00
|
|
|
def make_glyph_range_(self, location, start, limit):
|
|
|
|
"""("a.sc", "d.sc") --> {"a.sc", "b.sc", "c.sc", "d.sc"}"""
|
|
|
|
result = set()
|
|
|
|
if len(start) != len(limit):
|
2015-08-21 17:09:46 +02:00
|
|
|
raise FeatureLibError(
|
2015-08-01 17:34:02 +02:00
|
|
|
"Bad range: \"%s\" and \"%s\" should have the same length" %
|
|
|
|
(start, limit), location)
|
2015-09-04 16:22:16 +02:00
|
|
|
|
|
|
|
rev = self.reverse_string_
|
2015-08-01 17:34:02 +02:00
|
|
|
prefix = os.path.commonprefix([start, limit])
|
|
|
|
suffix = rev(os.path.commonprefix([rev(start), rev(limit)]))
|
|
|
|
if len(suffix) > 0:
|
|
|
|
start_range = start[len(prefix):-len(suffix)]
|
|
|
|
limit_range = limit[len(prefix):-len(suffix)]
|
|
|
|
else:
|
|
|
|
start_range = start[len(prefix):]
|
|
|
|
limit_range = limit[len(prefix):]
|
|
|
|
|
|
|
|
if start_range >= limit_range:
|
2015-08-21 17:09:46 +02:00
|
|
|
raise FeatureLibError(
|
|
|
|
"Start of range must be smaller than its end",
|
|
|
|
location)
|
2015-08-01 17:34:02 +02:00
|
|
|
|
|
|
|
uppercase = re.compile(r'^[A-Z]$')
|
|
|
|
if uppercase.match(start_range) and uppercase.match(limit_range):
|
|
|
|
for c in range(ord(start_range), ord(limit_range) + 1):
|
|
|
|
result.add("%s%c%s" % (prefix, c, suffix))
|
|
|
|
return result
|
|
|
|
|
|
|
|
lowercase = re.compile(r'^[a-z]$')
|
|
|
|
if lowercase.match(start_range) and lowercase.match(limit_range):
|
|
|
|
for c in range(ord(start_range), ord(limit_range) + 1):
|
|
|
|
result.add("%s%c%s" % (prefix, c, suffix))
|
|
|
|
return result
|
|
|
|
|
|
|
|
digits = re.compile(r'^[0-9]{1,3}$')
|
|
|
|
if digits.match(start_range) and digits.match(limit_range):
|
|
|
|
for i in range(int(start_range, 10), int(limit_range, 10) + 1):
|
|
|
|
number = ("000" + str(i))[-len(start_range):]
|
|
|
|
result.add("%s%s%s" % (prefix, number, suffix))
|
|
|
|
return result
|
|
|
|
|
2015-08-21 17:09:46 +02:00
|
|
|
raise FeatureLibError("Bad range: \"%s-%s\"" % (start, limit),
|
|
|
|
location)
|
2015-08-03 09:37:27 +02:00
|
|
|
|
|
|
|
|
|
|
|
class SymbolTable(object):
|
|
|
|
def __init__(self):
|
|
|
|
self.scopes_ = [{}]
|
|
|
|
|
|
|
|
def enter_scope(self):
|
|
|
|
self.scopes_.append({})
|
|
|
|
|
|
|
|
def exit_scope(self):
|
|
|
|
self.scopes_.pop()
|
|
|
|
|
|
|
|
def define(self, name, item):
|
|
|
|
self.scopes_[-1][name] = item
|
|
|
|
|
|
|
|
def resolve(self, name):
|
|
|
|
for scope in reversed(self.scopes_):
|
|
|
|
item = scope.get(name)
|
|
|
|
if item:
|
|
|
|
return item
|
|
|
|
return None
|