[feaLib] Merged LexerError and ParserError
This simplifies the public API to the library. For clients, it does not matter which exact component was detecting an error. And we will soon have more components; there would be little point in declaring CompilerError, TableBuilderError, and so forth.
This commit is contained in:
parent
f38cc9f821
commit
9ddd313577
16
Lib/fontTools/feaLib/error.py
Normal file
16
Lib/fontTools/feaLib/error.py
Normal file
@ -0,0 +1,16 @@
|
||||
from __future__ import print_function, division, absolute_import
|
||||
from __future__ import unicode_literals
|
||||
|
||||
|
||||
class FeatureLibError(Exception):
|
||||
def __init__(self, message, location):
|
||||
Exception.__init__(self, message)
|
||||
self.location = location
|
||||
|
||||
def __str__(self):
|
||||
message = Exception.__str__(self)
|
||||
if self.location:
|
||||
path, line, column = self.location
|
||||
return "%s:%d:%d: %s" % (path, line, column, message)
|
||||
else:
|
||||
return message
|
18
Lib/fontTools/feaLib/error_test.py
Normal file
18
Lib/fontTools/feaLib/error_test.py
Normal file
@ -0,0 +1,18 @@
|
||||
from __future__ import print_function, division, absolute_import
|
||||
from __future__ import unicode_literals
|
||||
from fontTools.feaLib.error import FeatureLibError
|
||||
import unittest
|
||||
|
||||
|
||||
class FeatureLibErrorTest(unittest.TestCase):
|
||||
def test_str(self):
|
||||
err = FeatureLibError("Squeak!", ("foo.fea", 23, 42))
|
||||
self.assertEqual(str(err), "foo.fea:23:42: Squeak!")
|
||||
|
||||
def test_str_nolocation(self):
|
||||
err = FeatureLibError("Squeak!", None)
|
||||
self.assertEqual(str(err), "Squeak!")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
@ -1,23 +1,10 @@
|
||||
from __future__ import print_function, division, absolute_import
|
||||
from __future__ import unicode_literals
|
||||
from fontTools.feaLib.error import FeatureLibError
|
||||
import codecs
|
||||
import os
|
||||
|
||||
|
||||
class LexerError(Exception):
|
||||
def __init__(self, message, location):
|
||||
Exception.__init__(self, message)
|
||||
self.location = location
|
||||
|
||||
def __str__(self):
|
||||
message = Exception.__str__(self)
|
||||
if self.location:
|
||||
path, line, column = self.location
|
||||
return "%s:%d:%d: %s" % (path, line, column, message)
|
||||
else:
|
||||
return message
|
||||
|
||||
|
||||
class Lexer(object):
|
||||
NUMBER = "NUMBER"
|
||||
STRING = "STRING"
|
||||
@ -90,11 +77,13 @@ class Lexer(object):
|
||||
|
||||
if self.mode_ is Lexer.MODE_FILENAME_:
|
||||
if cur_char != "(":
|
||||
raise LexerError("Expected '(' before file name", location)
|
||||
raise FeatureLibError("Expected '(' before file name",
|
||||
location)
|
||||
self.scan_until_(")")
|
||||
cur_char = text[self.pos_] if self.pos_ < limit else None
|
||||
if cur_char != ")":
|
||||
raise LexerError("Expected ')' after file name", location)
|
||||
raise FeatureLibError("Expected ')' after file name",
|
||||
location)
|
||||
self.pos_ += 1
|
||||
self.mode_ = Lexer.MODE_NORMAL_
|
||||
return (Lexer.FILENAME, text[start + 1:self.pos_ - 1], location)
|
||||
@ -108,9 +97,9 @@ class Lexer(object):
|
||||
self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_)
|
||||
glyphclass = text[start + 1:self.pos_]
|
||||
if len(glyphclass) < 1:
|
||||
raise LexerError("Expected glyph class name", location)
|
||||
raise FeatureLibError("Expected glyph class name", location)
|
||||
if len(glyphclass) > 30:
|
||||
raise LexerError(
|
||||
raise FeatureLibError(
|
||||
"Glyph class names must not be longer than 30 characters",
|
||||
location)
|
||||
return (Lexer.GLYPHCLASS, glyphclass, location)
|
||||
@ -142,8 +131,10 @@ class Lexer(object):
|
||||
self.pos_ += 1
|
||||
return (Lexer.STRING, text[start + 1:self.pos_ - 1], location)
|
||||
else:
|
||||
raise LexerError("Expected '\"' to terminate string", location)
|
||||
raise LexerError("Unexpected character: '%s'" % cur_char, location)
|
||||
raise FeatureLibError("Expected '\"' to terminate string",
|
||||
location)
|
||||
raise FeatureLibError("Unexpected character: '%s'" % cur_char,
|
||||
location)
|
||||
|
||||
def scan_over_(self, valid):
|
||||
p = self.pos_
|
||||
@ -179,15 +170,15 @@ class IncludingLexer(object):
|
||||
if token_type is Lexer.NAME and token == "include":
|
||||
fname_type, fname_token, fname_location = lexer.next()
|
||||
if fname_type is not Lexer.FILENAME:
|
||||
raise LexerError("Expected file name", fname_location)
|
||||
raise FeatureLibError("Expected file name", fname_location)
|
||||
semi_type, semi_token, semi_location = lexer.next()
|
||||
if semi_type is not Lexer.SYMBOL or semi_token != ";":
|
||||
raise LexerError("Expected ';'", semi_location)
|
||||
raise FeatureLibError("Expected ';'", semi_location)
|
||||
curpath, _ = os.path.split(lexer.filename_)
|
||||
path = os.path.join(curpath, fname_token)
|
||||
if len(self.lexers_) >= 5:
|
||||
raise LexerError("Too many recursive includes",
|
||||
fname_location)
|
||||
raise FeatureLibError("Too many recursive includes",
|
||||
fname_location)
|
||||
self.lexers_.append(self.make_lexer_(path, fname_location))
|
||||
continue
|
||||
else:
|
||||
@ -200,4 +191,4 @@ class IncludingLexer(object):
|
||||
with codecs.open(filename, "rb", "utf-8") as f:
|
||||
return Lexer(f.read(), filename)
|
||||
except IOError as err:
|
||||
raise LexerError(str(err), location)
|
||||
raise FeatureLibError(str(err), location)
|
||||
|
@ -1,6 +1,7 @@
|
||||
from __future__ import print_function, division, absolute_import
|
||||
from __future__ import unicode_literals
|
||||
from fontTools.feaLib.lexer import IncludingLexer, Lexer, LexerError
|
||||
from fontTools.feaLib.error import FeatureLibError
|
||||
from fontTools.feaLib.lexer import IncludingLexer, Lexer
|
||||
import os
|
||||
import unittest
|
||||
|
||||
@ -9,16 +10,6 @@ def lex(s):
|
||||
return [(typ, tok) for (typ, tok, _) in Lexer(s, "test.fea")]
|
||||
|
||||
|
||||
class LexerErrorTest(unittest.TestCase):
|
||||
def test_str(self):
|
||||
err = LexerError("Squeak!", ("foo.fea", 23, 42))
|
||||
self.assertEqual(str(err), "foo.fea:23:42: Squeak!")
|
||||
|
||||
def test_str_nolocation(self):
|
||||
err = LexerError("Squeak!", None)
|
||||
self.assertEqual(str(err), "Squeak!")
|
||||
|
||||
|
||||
class LexerTest(unittest.TestCase):
|
||||
def __init__(self, methodName):
|
||||
unittest.TestCase.__init__(self, methodName)
|
||||
@ -43,9 +34,12 @@ class LexerTest(unittest.TestCase):
|
||||
|
||||
def test_glyphclass(self):
|
||||
self.assertEqual(lex("@Vowel.sc"), [(Lexer.GLYPHCLASS, "Vowel.sc")])
|
||||
self.assertRaisesRegex(LexerError, "Expected glyph class", lex, "@(a)")
|
||||
self.assertRaisesRegex(LexerError, "Expected glyph class", lex, "@ A")
|
||||
self.assertRaisesRegex(LexerError, "not be longer than 30 characters",
|
||||
self.assertRaisesRegex(FeatureLibError,
|
||||
"Expected glyph class", lex, "@(a)")
|
||||
self.assertRaisesRegex(FeatureLibError,
|
||||
"Expected glyph class", lex, "@ A")
|
||||
self.assertRaisesRegex(FeatureLibError,
|
||||
"not be longer than 30 characters",
|
||||
lex, "@a123456789.a123456789.a123456789.x")
|
||||
|
||||
def test_include(self):
|
||||
@ -59,8 +53,8 @@ class LexerTest(unittest.TestCase):
|
||||
(Lexer.FILENAME, "foo"),
|
||||
(Lexer.SYMBOL, ";")
|
||||
])
|
||||
self.assertRaises(LexerError, lex, "include blah")
|
||||
self.assertRaises(LexerError, lex, "include (blah")
|
||||
self.assertRaises(FeatureLibError, lex, "include blah")
|
||||
self.assertRaises(FeatureLibError, lex, "include (blah")
|
||||
|
||||
def test_number(self):
|
||||
self.assertEqual(lex("123 -456"),
|
||||
@ -80,10 +74,10 @@ class LexerTest(unittest.TestCase):
|
||||
def test_string(self):
|
||||
self.assertEqual(lex('"foo" "bar"'),
|
||||
[(Lexer.STRING, "foo"), (Lexer.STRING, "bar")])
|
||||
self.assertRaises(LexerError, lambda: lex('"foo\n bar"'))
|
||||
self.assertRaises(FeatureLibError, lambda: lex('"foo\n bar"'))
|
||||
|
||||
def test_bad_character(self):
|
||||
self.assertRaises(LexerError, lambda: lex("123 \u0001"))
|
||||
self.assertRaises(FeatureLibError, lambda: lex("123 \u0001"))
|
||||
|
||||
def test_newline(self):
|
||||
lines = lambda s: [loc[1] for (_, _, loc) in Lexer(s, "test.fea")]
|
||||
@ -145,15 +139,15 @@ class IncludingLexerTest(unittest.TestCase):
|
||||
|
||||
def test_include_limit(self):
|
||||
lexer = IncludingLexer(self.getpath("include6.fea"))
|
||||
self.assertRaises(LexerError, lambda: list(lexer))
|
||||
self.assertRaises(FeatureLibError, lambda: list(lexer))
|
||||
|
||||
def test_include_self(self):
|
||||
lexer = IncludingLexer(self.getpath("includeself.fea"))
|
||||
self.assertRaises(LexerError, lambda: list(lexer))
|
||||
self.assertRaises(FeatureLibError, lambda: list(lexer))
|
||||
|
||||
def test_include_missing_file(self):
|
||||
lexer = IncludingLexer(self.getpath("includemissingfile.fea"))
|
||||
self.assertRaises(LexerError, lambda: list(lexer))
|
||||
self.assertRaises(FeatureLibError, lambda: list(lexer))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@ -1,25 +1,12 @@
|
||||
from __future__ import print_function, division, absolute_import
|
||||
from __future__ import unicode_literals
|
||||
from fontTools.feaLib.error import FeatureLibError
|
||||
from fontTools.feaLib.lexer import Lexer, IncludingLexer
|
||||
import fontTools.feaLib.ast as ast
|
||||
import os
|
||||
import re
|
||||
|
||||
|
||||
class ParserError(Exception):
|
||||
def __init__(self, message, location):
|
||||
Exception.__init__(self, message)
|
||||
self.location = location
|
||||
|
||||
def __str__(self):
|
||||
message = Exception.__str__(self)
|
||||
if self.location:
|
||||
path, line, column = self.location
|
||||
return "%s:%d:%d: %s" % (path, line, column, message)
|
||||
else:
|
||||
return message
|
||||
|
||||
|
||||
class Parser(object):
|
||||
def __init__(self, path):
|
||||
self.doc_ = ast.FeatureFile()
|
||||
@ -54,9 +41,9 @@ class Parser(object):
|
||||
statements.append(
|
||||
self.parse_valuerecord_definition_(vertical=False))
|
||||
else:
|
||||
raise ParserError("Expected feature, languagesystem, "
|
||||
"lookup, or glyph class definition",
|
||||
self.cur_token_location_)
|
||||
raise FeatureLibError("Expected feature, languagesystem, "
|
||||
"lookup, or glyph class definition",
|
||||
self.cur_token_location_)
|
||||
return self.doc_
|
||||
|
||||
def parse_anchordef_(self):
|
||||
@ -79,8 +66,8 @@ class Parser(object):
|
||||
glyphs = self.parse_glyphclass_(accept_glyphname=False)
|
||||
self.expect_symbol_(";")
|
||||
if self.glyphclasses_.resolve(name) is not None:
|
||||
raise ParserError("Glyph class @%s already defined" % name,
|
||||
location)
|
||||
raise FeatureLibError("Glyph class @%s already defined" % name,
|
||||
location)
|
||||
glyphclass = ast.GlyphClassDefinition(location, name, glyphs)
|
||||
self.glyphclasses_.define(name, glyphclass)
|
||||
return glyphclass
|
||||
@ -94,8 +81,9 @@ class Parser(object):
|
||||
self.advance_lexer_()
|
||||
gc = self.glyphclasses_.resolve(self.cur_token_)
|
||||
if gc is None:
|
||||
raise ParserError("Unknown glyph class @%s" % self.cur_token_,
|
||||
self.cur_token_location_)
|
||||
raise FeatureLibError(
|
||||
"Unknown glyph class @%s" % self.cur_token_,
|
||||
self.cur_token_location_)
|
||||
result.update(gc.glyphs)
|
||||
return result
|
||||
|
||||
@ -116,12 +104,12 @@ class Parser(object):
|
||||
elif self.cur_token_type_ is Lexer.GLYPHCLASS:
|
||||
gc = self.glyphclasses_.resolve(self.cur_token_)
|
||||
if gc is None:
|
||||
raise ParserError(
|
||||
raise FeatureLibError(
|
||||
"Unknown glyph class @%s" % self.cur_token_,
|
||||
self.cur_token_location_)
|
||||
result.update(gc.glyphs)
|
||||
else:
|
||||
raise ParserError(
|
||||
raise FeatureLibError(
|
||||
"Expected glyph name, glyph range, "
|
||||
"or glyph class reference",
|
||||
self.cur_token_location_)
|
||||
@ -147,13 +135,15 @@ class Parser(object):
|
||||
if self.next_token_ == "lookup":
|
||||
self.expect_keyword_("lookup")
|
||||
if not marked:
|
||||
raise ParserError("Lookups can only follow marked glyphs",
|
||||
self.cur_token_location_)
|
||||
raise FeatureLibError(
|
||||
"Lookups can only follow marked glyphs",
|
||||
self.cur_token_location_)
|
||||
lookup_name = self.expect_name_()
|
||||
lookup = self.lookups_.resolve(lookup_name)
|
||||
if lookup is None:
|
||||
raise ParserError('Unknown lookup "%s"' % lookup_name,
|
||||
self.cur_token_location_)
|
||||
raise FeatureLibError(
|
||||
'Unknown lookup "%s"' % lookup_name,
|
||||
self.cur_token_location_)
|
||||
if marked:
|
||||
lookups.append(lookup)
|
||||
|
||||
@ -171,7 +161,8 @@ class Parser(object):
|
||||
prefix, glyphs, lookups, suffix = self.parse_glyph_pattern_()
|
||||
self.expect_symbol_(";")
|
||||
return ast.IgnoreSubstitutionRule(location, prefix, glyphs, suffix)
|
||||
raise ParserError("Expected \"substitute\"", self.next_token_location_)
|
||||
raise FeatureLibError(
|
||||
"Expected \"substitute\"", self.next_token_location_)
|
||||
|
||||
def parse_language_(self):
|
||||
assert self.is_cur_keyword_("language")
|
||||
@ -193,8 +184,8 @@ class Parser(object):
|
||||
if self.next_token_ == ";":
|
||||
lookup = self.lookups_.resolve(name)
|
||||
if lookup is None:
|
||||
raise ParserError("Unknown lookup \"%s\"" % name,
|
||||
self.cur_token_location_)
|
||||
raise FeatureLibError("Unknown lookup \"%s\"" % name,
|
||||
self.cur_token_location_)
|
||||
self.expect_symbol_(";")
|
||||
return ast.LookupReferenceStatement(location, lookup)
|
||||
|
||||
@ -231,17 +222,19 @@ class Parser(object):
|
||||
keyword = None
|
||||
self.expect_symbol_(";")
|
||||
if len(new) is 0 and not any(lookups):
|
||||
raise ParserError(
|
||||
raise FeatureLibError(
|
||||
'Expected "by", "from" or explicit lookup references',
|
||||
self.cur_token_location_)
|
||||
|
||||
if keyword == "from":
|
||||
if len(old) != 1 or len(old[0]) != 1:
|
||||
raise ParserError('Expected a single glyph before "from"',
|
||||
location)
|
||||
raise FeatureLibError(
|
||||
'Expected a single glyph before "from"',
|
||||
location)
|
||||
if len(new) != 1:
|
||||
raise ParserError('Expected a single glyphclass after "from"',
|
||||
location)
|
||||
raise FeatureLibError(
|
||||
'Expected a single glyphclass after "from"',
|
||||
location)
|
||||
return ast.AlternateSubstitution(location, list(old[0])[0], new[0])
|
||||
|
||||
rule = ast.SubstitutionRule(location, old, new)
|
||||
@ -269,8 +262,8 @@ class Parser(object):
|
||||
name = self.expect_name_()
|
||||
vrd = self.valuerecords_.resolve(name)
|
||||
if vrd is None:
|
||||
raise ParserError("Unknown valueRecordDef \"%s\"" % name,
|
||||
self.cur_token_location_)
|
||||
raise FeatureLibError("Unknown valueRecordDef \"%s\"" % name,
|
||||
self.cur_token_location_)
|
||||
value = vrd.value
|
||||
xPlacement, yPlacement = (value.xPlacement, value.yPlacement)
|
||||
xAdvance, yAdvance = (value.xAdvance, value.yAdvance)
|
||||
@ -342,7 +335,7 @@ class Parser(object):
|
||||
elif self.is_cur_keyword_("valueRecordDef"):
|
||||
statements.append(self.parse_valuerecord_definition_(vertical))
|
||||
else:
|
||||
raise ParserError(
|
||||
raise FeatureLibError(
|
||||
"Expected glyph class definition or statement",
|
||||
self.cur_token_location_)
|
||||
|
||||
@ -352,8 +345,8 @@ class Parser(object):
|
||||
|
||||
name = self.expect_name_()
|
||||
if name != block.name.strip():
|
||||
raise ParserError("Expected \"%s\"" % block.name.strip(),
|
||||
self.cur_token_location_)
|
||||
raise FeatureLibError("Expected \"%s\"" % block.name.strip(),
|
||||
self.cur_token_location_)
|
||||
self.expect_symbol_(";")
|
||||
|
||||
def is_cur_keyword_(self, k):
|
||||
@ -362,36 +355,37 @@ class Parser(object):
|
||||
def expect_tag_(self):
|
||||
self.advance_lexer_()
|
||||
if self.cur_token_type_ is not Lexer.NAME:
|
||||
raise ParserError("Expected a tag", self.cur_token_location_)
|
||||
raise FeatureLibError("Expected a tag", self.cur_token_location_)
|
||||
if len(self.cur_token_) > 4:
|
||||
raise ParserError("Tags can not be longer than 4 characters",
|
||||
self.cur_token_location_)
|
||||
raise FeatureLibError("Tags can not be longer than 4 characters",
|
||||
self.cur_token_location_)
|
||||
return (self.cur_token_ + " ")[:4]
|
||||
|
||||
def expect_symbol_(self, symbol):
|
||||
self.advance_lexer_()
|
||||
if self.cur_token_type_ is Lexer.SYMBOL and self.cur_token_ == symbol:
|
||||
return symbol
|
||||
raise ParserError("Expected '%s'" % symbol, self.cur_token_location_)
|
||||
raise FeatureLibError("Expected '%s'" % symbol,
|
||||
self.cur_token_location_)
|
||||
|
||||
def expect_keyword_(self, keyword):
|
||||
self.advance_lexer_()
|
||||
if self.cur_token_type_ is Lexer.NAME and self.cur_token_ == keyword:
|
||||
return self.cur_token_
|
||||
raise ParserError("Expected \"%s\"" % keyword,
|
||||
self.cur_token_location_)
|
||||
raise FeatureLibError("Expected \"%s\"" % keyword,
|
||||
self.cur_token_location_)
|
||||
|
||||
def expect_name_(self):
|
||||
self.advance_lexer_()
|
||||
if self.cur_token_type_ is Lexer.NAME:
|
||||
return self.cur_token_
|
||||
raise ParserError("Expected a name", self.cur_token_location_)
|
||||
raise FeatureLibError("Expected a name", self.cur_token_location_)
|
||||
|
||||
def expect_number_(self):
|
||||
self.advance_lexer_()
|
||||
if self.cur_token_type_ is Lexer.NUMBER:
|
||||
return self.cur_token_
|
||||
raise ParserError("Expected a number", self.cur_token_location_)
|
||||
raise FeatureLibError("Expected a number", self.cur_token_location_)
|
||||
|
||||
def advance_lexer_(self):
|
||||
self.cur_token_type_, self.cur_token_, self.cur_token_location_ = (
|
||||
@ -406,7 +400,7 @@ class Parser(object):
|
||||
"""("a.sc", "d.sc") --> {"a.sc", "b.sc", "c.sc", "d.sc"}"""
|
||||
result = set()
|
||||
if len(start) != len(limit):
|
||||
raise ParserError(
|
||||
raise FeatureLibError(
|
||||
"Bad range: \"%s\" and \"%s\" should have the same length" %
|
||||
(start, limit), location)
|
||||
rev = lambda s: ''.join(reversed(list(s))) # string reversal
|
||||
@ -420,8 +414,9 @@ class Parser(object):
|
||||
limit_range = limit[len(prefix):]
|
||||
|
||||
if start_range >= limit_range:
|
||||
raise ParserError("Start of range must be smaller than its end",
|
||||
location)
|
||||
raise FeatureLibError(
|
||||
"Start of range must be smaller than its end",
|
||||
location)
|
||||
|
||||
uppercase = re.compile(r'^[A-Z]$')
|
||||
if uppercase.match(start_range) and uppercase.match(limit_range):
|
||||
@ -442,7 +437,8 @@ class Parser(object):
|
||||
result.add("%s%s%s" % (prefix, number, suffix))
|
||||
return result
|
||||
|
||||
raise ParserError("Bad range: \"%s-%s\"" % (start, limit), location)
|
||||
raise FeatureLibError("Bad range: \"%s-%s\"" % (start, limit),
|
||||
location)
|
||||
|
||||
|
||||
class SymbolTable(object):
|
||||
|
@ -1,7 +1,7 @@
|
||||
from __future__ import print_function, division, absolute_import
|
||||
from __future__ import unicode_literals
|
||||
from fontTools.feaLib.lexer import LexerError
|
||||
from fontTools.feaLib.parser import Parser, ParserError, SymbolTable
|
||||
from fontTools.feaLib.error import FeatureLibError
|
||||
from fontTools.feaLib.parser import Parser, SymbolTable
|
||||
from fontTools.misc.py23 import *
|
||||
import fontTools.feaLib.ast as ast
|
||||
import codecs
|
||||
@ -53,13 +53,13 @@ class ParserTest(unittest.TestCase):
|
||||
|
||||
def test_glyphclass_bad(self):
|
||||
self.assertRaisesRegex(
|
||||
ParserError,
|
||||
FeatureLibError,
|
||||
"Expected glyph name, glyph range, or glyph class reference",
|
||||
self.parse, "@bad = [a 123];")
|
||||
|
||||
def test_glyphclass_duplicate(self):
|
||||
self.assertRaisesRegex(
|
||||
ParserError, "Glyph class @dup already defined",
|
||||
FeatureLibError, "Glyph class @dup already defined",
|
||||
self.parse, "@dup = [a b]; @dup = [x];")
|
||||
|
||||
def test_glyphclass_empty(self):
|
||||
@ -96,17 +96,17 @@ class ParserTest(unittest.TestCase):
|
||||
|
||||
def test_glyphclass_range_bad(self):
|
||||
self.assertRaisesRegex(
|
||||
ParserError,
|
||||
FeatureLibError,
|
||||
"Bad range: \"a\" and \"foobar\" should have the same length",
|
||||
self.parse, "@bad = [a-foobar];")
|
||||
self.assertRaisesRegex(
|
||||
ParserError, "Bad range: \"A.swash-z.swash\"",
|
||||
FeatureLibError, "Bad range: \"A.swash-z.swash\"",
|
||||
self.parse, "@bad = [A.swash-z.swash];")
|
||||
self.assertRaisesRegex(
|
||||
ParserError, "Start of range must be smaller than its end",
|
||||
FeatureLibError, "Start of range must be smaller than its end",
|
||||
self.parse, "@bad = [B.swash-A.swash];")
|
||||
self.assertRaisesRegex(
|
||||
ParserError, "Bad range: \"foo.1234-foo.9876\"",
|
||||
FeatureLibError, "Bad range: \"foo.1234-foo.9876\"",
|
||||
self.parse, "@bad = [foo.1234-foo.9876];")
|
||||
|
||||
def test_glyphclass_range_mixed(self):
|
||||
@ -123,7 +123,7 @@ class ParserTest(unittest.TestCase):
|
||||
self.assertEqual(vowels_uc.glyphs, set(list("AEIOU")))
|
||||
self.assertEqual(vowels.glyphs, set(list("aeiouyAEIOUY")))
|
||||
self.assertRaisesRegex(
|
||||
ParserError, "Unknown glyph class @unknown",
|
||||
FeatureLibError, "Unknown glyph class @unknown",
|
||||
self.parse, "@bad = [@unknown];")
|
||||
|
||||
def test_glyphclass_scoping(self):
|
||||
@ -211,7 +211,7 @@ class ParserTest(unittest.TestCase):
|
||||
|
||||
def test_lookup_block_name_mismatch(self):
|
||||
self.assertRaisesRegex(
|
||||
ParserError, 'Expected "Foo"',
|
||||
FeatureLibError, 'Expected "Foo"',
|
||||
self.parse, "lookup Foo {} Bar;")
|
||||
|
||||
def test_lookup_block_with_horizontal_valueRecordDef(self):
|
||||
@ -247,7 +247,7 @@ class ParserTest(unittest.TestCase):
|
||||
|
||||
def test_lookup_reference_unknown(self):
|
||||
self.assertRaisesRegex(
|
||||
ParserError, 'Unknown lookup "Huh"',
|
||||
FeatureLibError, 'Unknown lookup "Huh"',
|
||||
self.parse, "feature liga {lookup Huh;} liga;")
|
||||
|
||||
def test_script(self):
|
||||
@ -335,7 +335,8 @@ class ParserTest(unittest.TestCase):
|
||||
|
||||
def test_substitute_missing_by(self):
|
||||
self.assertRaisesRegex(
|
||||
ParserError, 'Expected "by", "from" or explicit lookup references',
|
||||
FeatureLibError,
|
||||
'Expected "by", "from" or explicit lookup references',
|
||||
self.parse, "feature liga {substitute f f i;} liga;")
|
||||
|
||||
def test_subtable(self):
|
||||
@ -378,7 +379,7 @@ class ParserTest(unittest.TestCase):
|
||||
|
||||
def test_valuerecord_named_unknown(self):
|
||||
self.assertRaisesRegex(
|
||||
ParserError, "Unknown valueRecordDef \"unknown\"",
|
||||
FeatureLibError, "Unknown valueRecordDef \"unknown\"",
|
||||
self.parse, "valueRecordDef <unknown> foo;")
|
||||
|
||||
def test_valuerecord_scoping(self):
|
||||
@ -396,13 +397,13 @@ class ParserTest(unittest.TestCase):
|
||||
self.assertEqual(langsys.script, "latn")
|
||||
self.assertEqual(langsys.language, "DEU ")
|
||||
self.assertRaisesRegex(
|
||||
ParserError, "Expected ';'",
|
||||
FeatureLibError, "Expected ';'",
|
||||
self.parse, "languagesystem latn DEU")
|
||||
self.assertRaisesRegex(
|
||||
ParserError, "longer than 4 characters",
|
||||
FeatureLibError, "longer than 4 characters",
|
||||
self.parse, "languagesystem foobar DEU")
|
||||
self.assertRaisesRegex(
|
||||
ParserError, "longer than 4 characters",
|
||||
FeatureLibError, "longer than 4 characters",
|
||||
self.parse, "languagesystem latn FOOBAR")
|
||||
|
||||
def setUp(self):
|
||||
|
Loading…
x
Reference in New Issue
Block a user