Merge pull request #161 from anthrotype/optional-lxml
WIP: make lxml (and singledispatch) optional
This commit is contained in:
commit
25c1060059
5
.codecov.yml
Normal file
5
.codecov.yml
Normal file
@ -0,0 +1,5 @@
|
||||
comment: false
|
||||
coverage:
|
||||
status:
|
||||
project: off
|
||||
patch: off
|
35
.coveragerc
Normal file
35
.coveragerc
Normal file
@ -0,0 +1,35 @@
|
||||
[run]
|
||||
# measure 'branch' coverage in addition to 'statement' coverage
|
||||
# See: http://coverage.readthedocs.io/en/coverage-4.5.1/branch.html
|
||||
branch = True
|
||||
|
||||
# list of directories or packages to measure
|
||||
source = ufoLib
|
||||
|
||||
# these are treated as equivalent when combining data
|
||||
[paths]
|
||||
source =
|
||||
Lib/ufoLib
|
||||
.tox/*/lib/python*/site-packages/ufoLib
|
||||
.tox/*/Lib/site-packages/ufoLib
|
||||
.tox/pypy*/site-packages/ufoLib
|
||||
|
||||
[report]
|
||||
# Regexes for lines to exclude from consideration
|
||||
exclude_lines =
|
||||
# keywords to use in inline comments to skip coverage
|
||||
pragma: no cover
|
||||
|
||||
# don't complain if tests don't hit defensive assertion code
|
||||
raise AssertionError
|
||||
raise NotImplementedError
|
||||
|
||||
# don't complain if non-runnable code isn't run
|
||||
if 0:
|
||||
if __name__ == .__main__.:
|
||||
|
||||
# Don't include files that are 100% covered
|
||||
skip_covered = True
|
||||
|
||||
# ignore source code that can’t be found
|
||||
ignore_errors = True
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -10,3 +10,4 @@ dist/
|
||||
.eggs/
|
||||
.tox/
|
||||
/.pytest_cache
|
||||
htmlcov/
|
||||
|
23
.travis.yml
23
.travis.yml
@ -1,16 +1,23 @@
|
||||
language: python
|
||||
sudo: false
|
||||
python:
|
||||
- "2.7"
|
||||
- "3.5"
|
||||
- "3.6"
|
||||
matrix:
|
||||
include:
|
||||
- env: TOXENV=py27-cov,py27-cov-lxml
|
||||
python: 2.7
|
||||
- env: TOXENV=py36-cov,py36-cov-lxml
|
||||
python: 3.6
|
||||
- env: TOXENV=py37-cov,py37-cov-lxml
|
||||
python: 3.7
|
||||
dist: xenial
|
||||
sudo: true
|
||||
install:
|
||||
- pip install --upgrade pip setuptools wheel
|
||||
- pip install tox-travis
|
||||
- pip install tox
|
||||
script: tox
|
||||
after_success:
|
||||
# upload code coverage to Codecov.io
|
||||
- tox -e codecov
|
||||
before_deploy:
|
||||
- pip wheel --no-deps -w dist .
|
||||
- python setup.py sdist
|
||||
- tox -e wheel
|
||||
- export WHL=$(ls dist/ufoLib*.whl)
|
||||
- export ZIP=$(ls dist/ufoLib*.zip)
|
||||
deploy:
|
||||
|
@ -1,8 +1,9 @@
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
import os
|
||||
import shutil
|
||||
from io import StringIO, BytesIO, open
|
||||
from copy import deepcopy
|
||||
from fontTools.misc.py23 import basestring, unicode
|
||||
from fontTools.misc.py23 import basestring, unicode, tounicode
|
||||
from ufoLib.glifLib import GlyphSet
|
||||
from ufoLib.validators import *
|
||||
from ufoLib.filenames import userNameToFileName
|
||||
@ -1108,6 +1109,8 @@ class UFOWriter(object):
|
||||
for layerName in layerOrder:
|
||||
if layerName is None:
|
||||
layerName = DEFAULT_LAYER_NAME
|
||||
else:
|
||||
layerName = tounicode(layerName)
|
||||
newOrder.append(layerName)
|
||||
layerOrder = newOrder
|
||||
else:
|
||||
@ -1381,9 +1384,9 @@ def writeDataFileAtomically(data, path):
|
||||
f.close()
|
||||
if data == oldData:
|
||||
return
|
||||
# if the data is empty, remove the existing file
|
||||
if not data:
|
||||
os.remove(path)
|
||||
# if the data is empty, remove the existing file
|
||||
if not data:
|
||||
os.remove(path)
|
||||
if data:
|
||||
f = open(path, "wb")
|
||||
f.write(data)
|
||||
|
@ -2,6 +2,9 @@
|
||||
Conversion functions.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
|
||||
|
||||
# adapted from the UFO spec
|
||||
|
||||
def convertUFO1OrUFO2KerningToUFO3Kerning(kerning, groups):
|
||||
|
485
Lib/ufoLib/etree.py
Normal file
485
Lib/ufoLib/etree.py
Normal file
@ -0,0 +1,485 @@
|
||||
"""Shim module exporting the same ElementTree API for lxml and
|
||||
xml.etree backends.
|
||||
|
||||
When lxml is installed, it is automatically preferred over the built-in
|
||||
xml.etree module.
|
||||
On Python 2.7, the cElementTree module is preferred over the pure-python
|
||||
ElementTree module.
|
||||
|
||||
Besides exporting a unified interface, this also defines extra functions
|
||||
or subclasses built-in ElementTree classes to add features that are
|
||||
only availble in lxml, like OrderedDict for attributes, pretty_print and
|
||||
iterwalk.
|
||||
"""
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
from fontTools.misc.py23 import basestring, unicode, tounicode, open
|
||||
|
||||
# we use a custom XML declaration for backward compatibility with older
|
||||
# ufoLib versions which would write it using double quotes.
|
||||
# https://github.com/unified-font-object/ufoLib/issues/158
|
||||
XML_DECLARATION = """<?xml version="1.0" encoding="%s"?>"""
|
||||
|
||||
__all__ = [
|
||||
# public symbols
|
||||
"Comment",
|
||||
"dump",
|
||||
"Element",
|
||||
"ElementTree",
|
||||
"fromstring",
|
||||
"fromstringlist",
|
||||
"iselement",
|
||||
"iterparse",
|
||||
"parse",
|
||||
"ParseError",
|
||||
"PI",
|
||||
"ProcessingInstruction",
|
||||
"QName",
|
||||
"SubElement",
|
||||
"tostring",
|
||||
"tostringlist",
|
||||
"TreeBuilder",
|
||||
"XML",
|
||||
"XMLParser",
|
||||
"XMLTreeBuilder",
|
||||
"register_namespace",
|
||||
]
|
||||
|
||||
try:
|
||||
from lxml.etree import *
|
||||
|
||||
_have_lxml = True
|
||||
except ImportError:
|
||||
try:
|
||||
from xml.etree.cElementTree import *
|
||||
|
||||
# the cElementTree version of XML function doesn't support
|
||||
# the optional 'parser' keyword argument
|
||||
from xml.etree.ElementTree import XML
|
||||
except ImportError: # pragma: no cover
|
||||
from xml.etree.ElementTree import *
|
||||
_have_lxml = False
|
||||
|
||||
import sys
|
||||
|
||||
# dict is always ordered in python >= 3.6 and on pypy
|
||||
PY36 = sys.version_info >= (3, 6)
|
||||
try:
|
||||
import __pypy__
|
||||
except ImportError:
|
||||
__pypy__ = None
|
||||
_dict_is_ordered = bool(PY36 or __pypy__)
|
||||
del PY36, __pypy__
|
||||
|
||||
if _dict_is_ordered:
|
||||
_Attrib = dict
|
||||
else:
|
||||
from collections import OrderedDict as _Attrib
|
||||
|
||||
if isinstance(Element, type):
|
||||
_Element = Element
|
||||
else:
|
||||
# in py27, cElementTree.Element cannot be subclassed, so
|
||||
# we need to import the pure-python class
|
||||
from xml.etree.ElementTree import Element as _Element
|
||||
|
||||
class Element(_Element):
|
||||
"""Element subclass that keeps the order of attributes."""
|
||||
|
||||
def __init__(self, tag, attrib=_Attrib(), **extra):
|
||||
super(Element, self).__init__(tag)
|
||||
self.attrib = _Attrib()
|
||||
if attrib:
|
||||
self.attrib.update(attrib)
|
||||
if extra:
|
||||
self.attrib.update(extra)
|
||||
|
||||
def SubElement(parent, tag, attrib=_Attrib(), **extra):
|
||||
"""Must override SubElement as well otherwise _elementtree.SubElement
|
||||
fails if 'parent' is a subclass of Element object.
|
||||
"""
|
||||
element = parent.__class__(tag, attrib, **extra)
|
||||
parent.append(element)
|
||||
return element
|
||||
|
||||
def _iterwalk(element, events, tag):
|
||||
include = tag is None or element.tag == tag
|
||||
if include and "start" in events:
|
||||
yield ("start", element)
|
||||
for e in element:
|
||||
for item in _iterwalk(e, events, tag):
|
||||
yield item
|
||||
if include:
|
||||
yield ("end", element)
|
||||
|
||||
def iterwalk(element_or_tree, events=("end",), tag=None):
|
||||
"""A tree walker that generates events from an existing tree as
|
||||
if it was parsing XML data with iterparse().
|
||||
Drop-in replacement for lxml.etree.iterwalk.
|
||||
"""
|
||||
if iselement(element_or_tree):
|
||||
element = element_or_tree
|
||||
else:
|
||||
element = element_or_tree.getroot()
|
||||
if tag == "*":
|
||||
tag = None
|
||||
for item in _iterwalk(element, events, tag):
|
||||
yield item
|
||||
|
||||
_ElementTree = ElementTree
|
||||
|
||||
class ElementTree(_ElementTree):
|
||||
"""ElementTree subclass that adds 'pretty_print' and 'doctype'
|
||||
arguments to the 'write' method.
|
||||
Currently these are only supported for the default XML serialization
|
||||
'method', and not also for "html" or "text", for these are delegated
|
||||
to the base class.
|
||||
"""
|
||||
|
||||
def write(
|
||||
self,
|
||||
file_or_filename,
|
||||
encoding=None,
|
||||
xml_declaration=False,
|
||||
method=None,
|
||||
doctype=None,
|
||||
pretty_print=False,
|
||||
):
|
||||
if method and method != "xml":
|
||||
# delegate to super-class
|
||||
super(ElementTree, self).write(
|
||||
file_or_filename,
|
||||
encoding=encoding,
|
||||
xml_declaration=xml_declaration,
|
||||
method=method,
|
||||
)
|
||||
return
|
||||
|
||||
if encoding is unicode or (
|
||||
encoding is not None and encoding.lower() == "unicode"
|
||||
):
|
||||
if xml_declaration:
|
||||
raise ValueError(
|
||||
"Serialisation to unicode must not request an XML declaration"
|
||||
)
|
||||
write_declaration = False
|
||||
encoding = "unicode"
|
||||
elif xml_declaration is None:
|
||||
# by default, write an XML declaration only for non-standard encodings
|
||||
write_declaration = encoding is not None and encoding.upper() not in (
|
||||
"ASCII",
|
||||
"UTF-8",
|
||||
"UTF8",
|
||||
"US-ASCII",
|
||||
)
|
||||
else:
|
||||
write_declaration = xml_declaration
|
||||
|
||||
if encoding is None:
|
||||
encoding = "ASCII"
|
||||
|
||||
if pretty_print:
|
||||
# NOTE this will modify the tree in-place
|
||||
_indent(self._root)
|
||||
|
||||
with _get_writer(file_or_filename, encoding) as write:
|
||||
if write_declaration:
|
||||
write(XML_DECLARATION % encoding.upper())
|
||||
if pretty_print:
|
||||
write("\n")
|
||||
if doctype:
|
||||
write(_tounicode(doctype))
|
||||
if pretty_print:
|
||||
write("\n")
|
||||
|
||||
qnames, namespaces = _namespaces(self._root)
|
||||
_serialize_xml(write, self._root, qnames, namespaces)
|
||||
|
||||
import io
|
||||
|
||||
def tostring(
|
||||
element,
|
||||
encoding=None,
|
||||
xml_declaration=None,
|
||||
method=None,
|
||||
doctype=None,
|
||||
pretty_print=False,
|
||||
):
|
||||
"""Custom 'tostring' function that uses our ElementTree subclass, with
|
||||
pretty_print support.
|
||||
"""
|
||||
stream = io.StringIO() if encoding == "unicode" else io.BytesIO()
|
||||
ElementTree(element).write(
|
||||
stream,
|
||||
encoding=encoding,
|
||||
xml_declaration=xml_declaration,
|
||||
method=method,
|
||||
doctype=doctype,
|
||||
pretty_print=pretty_print,
|
||||
)
|
||||
return stream.getvalue()
|
||||
|
||||
# serialization support
|
||||
|
||||
import re
|
||||
|
||||
# Valid XML strings can include any Unicode character, excluding control
|
||||
# characters, the surrogate blocks, FFFE, and FFFF:
|
||||
# Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF]
|
||||
# Here we reversed the pattern to match only the invalid characters.
|
||||
# For the 'narrow' python builds supporting only UCS-2, which represent
|
||||
# characters beyond BMP as UTF-16 surrogate pairs, we need to pass through
|
||||
# the surrogate block. I haven't found a more elegant solution...
|
||||
UCS2 = sys.maxunicode < 0x10FFFF
|
||||
if UCS2:
|
||||
_invalid_xml_string = re.compile(
|
||||
"[\u0000-\u0008\u000B-\u000C\u000E-\u001F\uFFFE-\uFFFF]"
|
||||
)
|
||||
else:
|
||||
_invalid_xml_string = re.compile(
|
||||
"[\u0000-\u0008\u000B-\u000C\u000E-\u001F\uD800-\uDFFF\uFFFE-\uFFFF]"
|
||||
)
|
||||
|
||||
def _tounicode(s):
|
||||
"""Test if a string is valid user input and decode it to unicode string
|
||||
using ASCII encoding if it's a bytes string.
|
||||
Reject all bytes/unicode input that contains non-XML characters.
|
||||
Reject all bytes input that contains non-ASCII characters.
|
||||
"""
|
||||
try:
|
||||
s = tounicode(s, encoding="ascii", errors="strict")
|
||||
except UnicodeDecodeError:
|
||||
raise ValueError(
|
||||
"Bytes strings can only contain ASCII characters. "
|
||||
"Use unicode strings for non-ASCII characters.")
|
||||
except AttributeError:
|
||||
_raise_serialization_error(s)
|
||||
if s and _invalid_xml_string.search(s):
|
||||
raise ValueError(
|
||||
"All strings must be XML compatible: Unicode or ASCII, "
|
||||
"no NULL bytes or control characters"
|
||||
)
|
||||
return s
|
||||
|
||||
import contextlib
|
||||
|
||||
@contextlib.contextmanager
|
||||
def _get_writer(file_or_filename, encoding):
|
||||
# returns text write method and release all resources after using
|
||||
try:
|
||||
write = file_or_filename.write
|
||||
except AttributeError:
|
||||
# file_or_filename is a file name
|
||||
f = open(
|
||||
file_or_filename,
|
||||
"w",
|
||||
encoding="utf-8" if encoding == "unicode" else encoding,
|
||||
errors="xmlcharrefreplace",
|
||||
)
|
||||
with f:
|
||||
yield f.write
|
||||
else:
|
||||
# file_or_filename is a file-like object
|
||||
# encoding determines if it is a text or binary writer
|
||||
if encoding == "unicode":
|
||||
# use a text writer as is
|
||||
yield write
|
||||
else:
|
||||
# wrap a binary writer with TextIOWrapper
|
||||
detach_buffer = False
|
||||
if isinstance(file_or_filename, io.BufferedIOBase):
|
||||
buf = file_or_filename
|
||||
elif isinstance(file_or_filename, io.RawIOBase):
|
||||
buf = io.BufferedWriter(file_or_filename)
|
||||
detach_buffer = True
|
||||
else:
|
||||
# This is to handle passed objects that aren't in the
|
||||
# IOBase hierarchy, but just have a write method
|
||||
buf = io.BufferedIOBase()
|
||||
buf.writable = lambda: True
|
||||
buf.write = write
|
||||
try:
|
||||
# TextIOWrapper uses this methods to determine
|
||||
# if BOM (for UTF-16, etc) should be added
|
||||
buf.seekable = file_or_filename.seekable
|
||||
buf.tell = file_or_filename.tell
|
||||
except AttributeError:
|
||||
pass
|
||||
wrapper = io.TextIOWrapper(
|
||||
buf,
|
||||
encoding=encoding,
|
||||
errors="xmlcharrefreplace",
|
||||
newline="\n",
|
||||
)
|
||||
try:
|
||||
yield wrapper.write
|
||||
finally:
|
||||
# Keep the original file open when the TextIOWrapper and
|
||||
# the BufferedWriter are destroyed
|
||||
wrapper.detach()
|
||||
if detach_buffer:
|
||||
buf.detach()
|
||||
|
||||
from xml.etree.ElementTree import _namespace_map
|
||||
|
||||
def _namespaces(elem):
|
||||
# identify namespaces used in this tree
|
||||
|
||||
# maps qnames to *encoded* prefix:local names
|
||||
qnames = {None: None}
|
||||
|
||||
# maps uri:s to prefixes
|
||||
namespaces = {}
|
||||
|
||||
def add_qname(qname):
|
||||
# calculate serialized qname representation
|
||||
try:
|
||||
qname = _tounicode(qname)
|
||||
if qname[:1] == "{":
|
||||
uri, tag = qname[1:].rsplit("}", 1)
|
||||
prefix = namespaces.get(uri)
|
||||
if prefix is None:
|
||||
prefix = _namespace_map.get(uri)
|
||||
if prefix is None:
|
||||
prefix = "ns%d" % len(namespaces)
|
||||
else:
|
||||
prefix = _tounicode(prefix)
|
||||
if prefix != "xml":
|
||||
namespaces[uri] = prefix
|
||||
if prefix:
|
||||
qnames[qname] = "%s:%s" % (prefix, tag)
|
||||
else:
|
||||
qnames[qname] = tag # default element
|
||||
else:
|
||||
qnames[qname] = qname
|
||||
except TypeError:
|
||||
_raise_serialization_error(qname)
|
||||
|
||||
# populate qname and namespaces table
|
||||
for elem in elem.iter():
|
||||
tag = elem.tag
|
||||
if isinstance(tag, QName):
|
||||
if tag.text not in qnames:
|
||||
add_qname(tag.text)
|
||||
elif isinstance(tag, basestring):
|
||||
if tag not in qnames:
|
||||
add_qname(tag)
|
||||
elif tag is not None and tag is not Comment and tag is not PI:
|
||||
_raise_serialization_error(tag)
|
||||
for key, value in elem.items():
|
||||
if isinstance(key, QName):
|
||||
key = key.text
|
||||
if key not in qnames:
|
||||
add_qname(key)
|
||||
if isinstance(value, QName) and value.text not in qnames:
|
||||
add_qname(value.text)
|
||||
text = elem.text
|
||||
if isinstance(text, QName) and text.text not in qnames:
|
||||
add_qname(text.text)
|
||||
return qnames, namespaces
|
||||
|
||||
def _serialize_xml(write, elem, qnames, namespaces, **kwargs):
|
||||
tag = elem.tag
|
||||
text = elem.text
|
||||
if tag is Comment:
|
||||
write("<!--%s-->" % _tounicode(text))
|
||||
elif tag is ProcessingInstruction:
|
||||
write("<?%s?>" % _tounicode(text))
|
||||
else:
|
||||
tag = qnames[_tounicode(tag) if tag is not None else None]
|
||||
if tag is None:
|
||||
if text:
|
||||
write(_escape_cdata(text))
|
||||
for e in elem:
|
||||
_serialize_xml(write, e, qnames, None)
|
||||
else:
|
||||
write("<" + tag)
|
||||
if namespaces:
|
||||
for uri, prefix in sorted(
|
||||
namespaces.items(), key=lambda x: x[1]
|
||||
): # sort on prefix
|
||||
if prefix:
|
||||
prefix = ":" + prefix
|
||||
write(' xmlns%s="%s"' % (prefix, _escape_attrib(uri)))
|
||||
attrs = elem.attrib
|
||||
if attrs:
|
||||
# try to keep existing attrib order
|
||||
if len(attrs) <= 1 or type(attrs) is _Attrib:
|
||||
items = attrs.items()
|
||||
else:
|
||||
# if plain dict, use lexical order
|
||||
items = sorted(attrs.items())
|
||||
for k, v in items:
|
||||
if isinstance(k, QName):
|
||||
k = _tounicode(k.text)
|
||||
else:
|
||||
k = _tounicode(k)
|
||||
if isinstance(v, QName):
|
||||
v = qnames[_tounicode(v.text)]
|
||||
else:
|
||||
v = _escape_attrib(v)
|
||||
write(' %s="%s"' % (qnames[k], v))
|
||||
if text is not None or len(elem):
|
||||
write(">")
|
||||
if text:
|
||||
write(_escape_cdata(text))
|
||||
for e in elem:
|
||||
_serialize_xml(write, e, qnames, None)
|
||||
write("</" + tag + ">")
|
||||
else:
|
||||
write("/>")
|
||||
if elem.tail:
|
||||
write(_escape_cdata(elem.tail))
|
||||
|
||||
def _raise_serialization_error(text):
|
||||
raise TypeError(
|
||||
"cannot serialize %r (type %s)" % (text, type(text).__name__)
|
||||
)
|
||||
|
||||
def _escape_cdata(text):
|
||||
# escape character data
|
||||
try:
|
||||
text = _tounicode(text)
|
||||
# it's worth avoiding do-nothing calls for short strings
|
||||
if "&" in text:
|
||||
text = text.replace("&", "&")
|
||||
if "<" in text:
|
||||
text = text.replace("<", "<")
|
||||
if ">" in text:
|
||||
text = text.replace(">", ">")
|
||||
return text
|
||||
except (TypeError, AttributeError):
|
||||
_raise_serialization_error(text)
|
||||
|
||||
def _escape_attrib(text):
|
||||
# escape attribute value
|
||||
try:
|
||||
text = _tounicode(text)
|
||||
if "&" in text:
|
||||
text = text.replace("&", "&")
|
||||
if "<" in text:
|
||||
text = text.replace("<", "<")
|
||||
if ">" in text:
|
||||
text = text.replace(">", ">")
|
||||
if '"' in text:
|
||||
text = text.replace('"', """)
|
||||
if "\n" in text:
|
||||
text = text.replace("\n", " ")
|
||||
return text
|
||||
except (TypeError, AttributeError):
|
||||
_raise_serialization_error(text)
|
||||
|
||||
def _indent(elem, level=0):
|
||||
# From http://effbot.org/zone/element-lib.htm#prettyprint
|
||||
i = "\n" + level * " "
|
||||
if len(elem):
|
||||
if not elem.text or not elem.text.strip():
|
||||
elem.text = i + " "
|
||||
if not elem.tail or not elem.tail.strip():
|
||||
elem.tail = i
|
||||
for elem in elem:
|
||||
_indent(elem, level + 1)
|
||||
if not elem.tail or not elem.tail.strip():
|
||||
elem.tail = i
|
||||
else:
|
||||
if level and (not elem.tail or not elem.tail.strip()):
|
||||
elem.tail = i
|
@ -2,7 +2,7 @@
|
||||
User name to file name conversion.
|
||||
This was taken form the UFO 3 spec.
|
||||
"""
|
||||
from __future__ import unicode_literals
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
from fontTools.misc.py23 import basestring, unicode
|
||||
|
||||
|
||||
|
@ -11,7 +11,7 @@ in a folder. It offers two ways to read glyph data, and one way to write
|
||||
glyph data. See the class doc string for details.
|
||||
"""
|
||||
|
||||
from __future__ import unicode_literals
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
import os
|
||||
from io import BytesIO, open
|
||||
from warnings import warn
|
||||
@ -22,8 +22,8 @@ from ufoLib.pointPen import AbstractPointPen, PointToSegmentPen
|
||||
from ufoLib.filenames import userNameToFileName
|
||||
from ufoLib.validators import isDictEnough, genericTypeValidator, colorValidator,\
|
||||
guidelinesValidator, anchorsValidator, identifierValidator, imageValidator, glyphLibValidator
|
||||
from ufoLib import etree
|
||||
|
||||
from lxml import etree
|
||||
|
||||
__all__ = [
|
||||
"GlyphSet",
|
||||
|
@ -1,3 +1,6 @@
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
|
||||
|
||||
def lookupKerningValue(pair, kerning, groups, fallback=0, glyphToFirstGroup=None, glyphToSecondGroup=None):
|
||||
"""
|
||||
Note: This expects kerning to be a flat dictionary
|
||||
|
@ -8,8 +8,13 @@ from numbers import Integral
|
||||
try:
|
||||
from functools import singledispatch
|
||||
except ImportError:
|
||||
from singledispatch import singledispatch
|
||||
from lxml import etree
|
||||
try:
|
||||
from singledispatch import singledispatch
|
||||
except ImportError:
|
||||
singledispatch = None
|
||||
|
||||
from ufoLib import etree
|
||||
|
||||
from fontTools.misc.py23 import (
|
||||
unicode,
|
||||
basestring,
|
||||
@ -73,7 +78,7 @@ class PlistTarget(object):
|
||||
It is based on the CPython plistlib module's _PlistParser class,
|
||||
but does not use the expat parser.
|
||||
|
||||
>>> from lxml import etree
|
||||
>>> from ufoLib import etree
|
||||
>>> parser = etree.XMLParser(target=PlistTarget())
|
||||
>>> result = etree.XML(
|
||||
... "<dict>"
|
||||
@ -208,23 +213,16 @@ _TARGET_END_HANDLERS = {
|
||||
"date": end_date,
|
||||
}
|
||||
|
||||
# single-dispatch generic function and overloaded implementations based
|
||||
# on the type of argument, to build an element tree from a plist data
|
||||
|
||||
# functions to build element tree from plist data
|
||||
|
||||
|
||||
@singledispatch
|
||||
def _make_element(value, ctx):
|
||||
raise TypeError("unsupported type: %s" % type(value))
|
||||
|
||||
|
||||
@_make_element.register(unicode)
|
||||
def _unicode_element(value, ctx):
|
||||
el = etree.Element("string")
|
||||
el.text = value
|
||||
return el
|
||||
|
||||
|
||||
@_make_element.register(bool)
|
||||
def _bool_element(value, ctx):
|
||||
if value:
|
||||
return etree.Element("true")
|
||||
@ -232,7 +230,6 @@ def _bool_element(value, ctx):
|
||||
return etree.Element("false")
|
||||
|
||||
|
||||
@_make_element.register(Integral)
|
||||
def _integer_element(value, ctx):
|
||||
if -1 << 63 <= value < 1 << 64:
|
||||
el = etree.Element("integer")
|
||||
@ -242,14 +239,12 @@ def _integer_element(value, ctx):
|
||||
raise OverflowError(value)
|
||||
|
||||
|
||||
@_make_element.register(float)
|
||||
def _float_element(value, ctx):
|
||||
el = etree.Element("real")
|
||||
el.text = repr(value)
|
||||
return el
|
||||
|
||||
|
||||
@_make_element.register(dict)
|
||||
def _dict_element(d, ctx):
|
||||
el = etree.Element("dict")
|
||||
items = d.items()
|
||||
@ -268,8 +263,6 @@ def _dict_element(d, ctx):
|
||||
return el
|
||||
|
||||
|
||||
@_make_element.register(list)
|
||||
@_make_element.register(tuple)
|
||||
def _array_element(array, ctx):
|
||||
el = etree.Element("array")
|
||||
if len(array) == 0:
|
||||
@ -281,15 +274,12 @@ def _array_element(array, ctx):
|
||||
return el
|
||||
|
||||
|
||||
@_make_element.register(datetime)
|
||||
def _date_element(date, ctx):
|
||||
el = etree.Element("date")
|
||||
el.text = _date_to_string(date)
|
||||
return el
|
||||
|
||||
|
||||
@_make_element.register(bytes)
|
||||
@_make_element.register(bytearray)
|
||||
def _data_element(data, ctx):
|
||||
data = b64encode(data)
|
||||
if data and ctx.pretty_print:
|
||||
@ -307,6 +297,49 @@ def _data_element(data, ctx):
|
||||
return el
|
||||
|
||||
|
||||
# if singledispatch is available, we use a generic '_make_element' function
|
||||
# and register overloaded implementations that are run based on the type of
|
||||
# the first argument
|
||||
|
||||
if singledispatch is not None:
|
||||
|
||||
@singledispatch
|
||||
def _make_element(value, ctx):
|
||||
raise TypeError("unsupported type: %s" % type(value))
|
||||
|
||||
_make_element.register(unicode)(_unicode_element)
|
||||
_make_element.register(bool)(_bool_element)
|
||||
_make_element.register(Integral)(_integer_element)
|
||||
_make_element.register(float)(_float_element)
|
||||
_make_element.register(dict)(_dict_element)
|
||||
_make_element.register(list)(_array_element)
|
||||
_make_element.register(tuple)(_array_element)
|
||||
_make_element.register(datetime)(_date_element)
|
||||
_make_element.register(bytes)(_data_element)
|
||||
_make_element.register(bytearray)(_data_element)
|
||||
|
||||
else:
|
||||
# otherwise we use a long switch-like if statement
|
||||
|
||||
def _make_element(value, ctx):
|
||||
if isinstance(value, unicode):
|
||||
return _unicode_element(value, ctx)
|
||||
elif isinstance(value, bool):
|
||||
return _bool_element(value, ctx)
|
||||
elif isinstance(value, Integral):
|
||||
return _integer_element(value, ctx)
|
||||
elif isinstance(value, float):
|
||||
return _float_element(value, ctx)
|
||||
elif isinstance(value, dict):
|
||||
return _dict_element(value, ctx)
|
||||
elif isinstance(value, (list, tuple)):
|
||||
return _array_element(value, ctx)
|
||||
elif isinstance(value, datetime):
|
||||
return _date_element(value, ctx)
|
||||
elif isinstance(value, (bytes, bytearray)):
|
||||
return _data_element(value, ctx)
|
||||
|
||||
|
||||
# Public functions to create element tree from plist-compatible python
|
||||
# data structures and viceversa, for use when (de)serializing GLIF xml.
|
||||
|
||||
@ -347,7 +380,13 @@ def load(fp, dict_type=dict):
|
||||
)
|
||||
target = PlistTarget(dict_type=dict_type)
|
||||
parser = etree.XMLParser(target=target)
|
||||
return etree.parse(fp, parser=parser)
|
||||
result = etree.parse(fp, parser=parser)
|
||||
# lxml returns the target object directly, while ElementTree wraps
|
||||
# it as the root of an ElementTree object
|
||||
try:
|
||||
return result.getroot()
|
||||
except AttributeError:
|
||||
return result
|
||||
|
||||
|
||||
def loads(value, dict_type=dict):
|
||||
@ -369,6 +408,9 @@ def dump(value, fp, sort_keys=True, skipkeys=False, pretty_print=True):
|
||||
)
|
||||
root.append(el)
|
||||
tree = etree.ElementTree(root)
|
||||
# we write the doctype ourselves instead of using the 'doctype' argument
|
||||
# of 'write' method, becuse lxml will force adding a '\n' even when
|
||||
# pretty_print is False.
|
||||
if pretty_print:
|
||||
header = b"\n".join((XML_DECLARATION, PLIST_DOCTYPE, b""))
|
||||
else:
|
||||
|
@ -11,6 +11,7 @@ steps through all the points in a call from glyph.drawPoints().
|
||||
This allows the caller to provide more data for each point.
|
||||
For instance, whether or not a point is smooth, and its name.
|
||||
"""
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
from fontTools.pens.basePen import AbstractPen
|
||||
import math
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
"""Miscellaneous helpers for our test suite."""
|
||||
|
||||
from __future__ import unicode_literals
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
import sys
|
||||
import os
|
||||
import unittest
|
||||
|
@ -1,4 +1,4 @@
|
||||
from __future__ import unicode_literals
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
import unittest
|
||||
from ufoLib.glifLib import GlifLibError, readGlyphFromString, writeGlyphToString
|
||||
from ufoLib.test.testSupport import Glyph, stripText
|
||||
|
@ -1,4 +1,4 @@
|
||||
from __future__ import unicode_literals
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
import unittest
|
||||
from ufoLib.glifLib import GlifLibError, readGlyphFromString, writeGlyphToString
|
||||
from ufoLib.test.testSupport import Glyph, stripText
|
||||
|
@ -1,4 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
import os
|
||||
import shutil
|
||||
import unittest
|
||||
|
@ -1,4 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
import os
|
||||
import shutil
|
||||
import unittest
|
||||
|
@ -1,10 +1,11 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
import os
|
||||
import shutil
|
||||
import unittest
|
||||
import tempfile
|
||||
from io import open
|
||||
from fontTools.misc.py23 import unicode
|
||||
from ufoLib import UFOReader, UFOWriter, UFOLibError
|
||||
from ufoLib.glifLib import GlifLibError
|
||||
from ufoLib import plistlib
|
||||
@ -4130,6 +4131,28 @@ class UFO3WriteLayersTestCase(unittest.TestCase):
|
||||
writer = UFOWriter(self.ufoPath)
|
||||
self.assertRaises(UFOLibError, writer.deleteGlyphSet, "does not exist")
|
||||
|
||||
def testWriteAsciiLayerOrder(self):
|
||||
self.makeUFO(
|
||||
layerContents=[
|
||||
["public.default", "glyphs"],
|
||||
["layer 1", "glyphs.layer 1"],
|
||||
["layer 2", "glyphs.layer 2"],
|
||||
]
|
||||
)
|
||||
writer = UFOWriter(self.ufoPath)
|
||||
# if passed bytes string, it'll be decoded to ASCII unicode string
|
||||
writer.writeLayerContents(["public.default", "layer 2", b"layer 1"])
|
||||
path = os.path.join(self.ufoPath, "layercontents.plist")
|
||||
with open(path, "rb") as f:
|
||||
result = plistlib.load(f)
|
||||
expected = [
|
||||
["public.default", "glyphs"],
|
||||
["layer 2", "glyphs.layer 2"],
|
||||
["layer 1", "glyphs.layer 1"],
|
||||
]
|
||||
self.assertEqual(expected, result)
|
||||
for layerName, directory in result:
|
||||
assert isinstance(layerName, unicode)
|
||||
|
||||
# -----
|
||||
# /data
|
||||
|
@ -1,4 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
import os
|
||||
import shutil
|
||||
import unittest
|
||||
|
55
Lib/ufoLib/test/test_etree.py
Normal file
55
Lib/ufoLib/test/test_etree.py
Normal file
@ -0,0 +1,55 @@
|
||||
# coding: utf-8
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
from ufoLib import etree
|
||||
from collections import OrderedDict
|
||||
import io
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"xml",
|
||||
[
|
||||
(
|
||||
"<root>"
|
||||
'<element key="value">text</element>'
|
||||
"<element>text</element>tail"
|
||||
"<empty-element/>"
|
||||
"</root>"
|
||||
),
|
||||
(
|
||||
"<root>\n"
|
||||
' <element key="value">text</element>\n'
|
||||
" <element>text</element>tail\n"
|
||||
" <empty-element/>\n"
|
||||
"</root>"
|
||||
),
|
||||
(
|
||||
'<axis default="400" maximum="1000" minimum="1" name="weight" tag="wght">'
|
||||
'<labelname xml:lang="fa-IR">قطر</labelname>'
|
||||
"</axis>"
|
||||
),
|
||||
],
|
||||
ids=["simple_xml_no_indent", "simple_xml_indent", "xml_ns_attrib_utf_8"],
|
||||
)
|
||||
def test_roundtrip_string(xml):
|
||||
root = etree.fromstring(xml.encode("utf-8"))
|
||||
result = etree.tostring(root, encoding="utf-8").decode("utf-8")
|
||||
assert result == xml
|
||||
|
||||
|
||||
def test_pretty_print():
|
||||
root = etree.Element("root")
|
||||
attrs = OrderedDict([("c", "2"), ("b", "1"), ("a", "0")])
|
||||
etree.SubElement(root, "element", attrs).text = "text"
|
||||
etree.SubElement(root, "element").text = "text"
|
||||
root.append(etree.Element("empty-element"))
|
||||
|
||||
result = etree.tostring(root, encoding="unicode", pretty_print=True)
|
||||
|
||||
assert result == (
|
||||
"<root>\n"
|
||||
' <element c="2" b="1" a="0">text</element>\n'
|
||||
" <element>text</element>\n"
|
||||
" <empty-element/>\n"
|
||||
"</root>\n"
|
||||
)
|
@ -1,4 +1,4 @@
|
||||
from __future__ import unicode_literals
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
import unittest
|
||||
from ufoLib.filenames import userNameToFileName, handleClash1, handleClash2
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
import os
|
||||
import tempfile
|
||||
import shutil
|
||||
|
@ -1,15 +1,27 @@
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
from ufoLib import plistlib
|
||||
import sys
|
||||
import os
|
||||
import datetime
|
||||
import codecs
|
||||
import collections
|
||||
from io import BytesIO
|
||||
from numbers import Integral
|
||||
from lxml import etree
|
||||
from fontTools.misc.py23 import tounicode
|
||||
from ufoLib import etree
|
||||
from ufoLib import plistlib
|
||||
import pytest
|
||||
|
||||
|
||||
PY2 = sys.version_info < (3,)
|
||||
if PY2:
|
||||
# This is a ResourceWarning that only happens on py27 at interpreter
|
||||
# finalization, and only when coverage is enabled. We can ignore it.
|
||||
# https://github.com/numpy/numpy/issues/3778#issuecomment-24885336
|
||||
pytestmark = pytest.mark.filterwarnings(
|
||||
"ignore:tp_compare didn't return -1 or -2 for exception"
|
||||
)
|
||||
|
||||
|
||||
# The testdata is generated using https://github.com/python/cpython/...
|
||||
# Mac/Tools/plistlib_generate_testdata.py
|
||||
# which uses PyObjC to control the Cocoa classes for generating plists
|
||||
@ -271,11 +283,11 @@ def test_controlcharacters():
|
||||
if i >= 32 or c in "\r\n\t":
|
||||
# \r, \n and \t are the only legal control chars in XML
|
||||
data = plistlib.dumps(testString)
|
||||
# the stdlib's plistlib writer always replaces \r with \n
|
||||
# inside string values; we don't (the ctrl character is
|
||||
# escaped by lxml, so it roundtrips)
|
||||
# if c != "\r":
|
||||
assert plistlib.loads(data) == testString
|
||||
# the stdlib's plistlib writer, as well as the elementtree
|
||||
# parser, always replace \r with \n inside string values;
|
||||
# lxml doesn't (the ctrl character is escaped), so it roundtrips
|
||||
if c != "\r" or etree._have_lxml:
|
||||
assert plistlib.loads(data) == testString
|
||||
else:
|
||||
with pytest.raises(ValueError):
|
||||
plistlib.dumps(testString)
|
||||
@ -340,8 +352,9 @@ def test_invalidreal():
|
||||
(b"utf-8", "utf-8", codecs.BOM_UTF8),
|
||||
(b"utf-16", "utf-16-le", codecs.BOM_UTF16_LE),
|
||||
(b"utf-16", "utf-16-be", codecs.BOM_UTF16_BE),
|
||||
(b"utf-32", "utf-32-le", codecs.BOM_UTF32_LE),
|
||||
(b"utf-32", "utf-32-be", codecs.BOM_UTF32_BE),
|
||||
# expat parser (used by ElementTree) does't support UTF-32
|
||||
# (b"utf-32", "utf-32-le", codecs.BOM_UTF32_LE),
|
||||
# (b"utf-32", "utf-32-be", codecs.BOM_UTF32_BE),
|
||||
],
|
||||
)
|
||||
def test_xml_encodings(pl, xml_encoding, encoding, bom):
|
||||
@ -359,7 +372,7 @@ def test_fromtree(pl):
|
||||
|
||||
def _strip(txt):
|
||||
return (
|
||||
"".join(l.strip() for l in txt.splitlines())
|
||||
"".join(l.strip() for l in tounicode(txt, "utf-8").splitlines())
|
||||
if txt is not None
|
||||
else ""
|
||||
)
|
||||
@ -380,9 +393,9 @@ def test_totree(pl):
|
||||
def test_no_pretty_print():
|
||||
data = plistlib.dumps({"data": b"hello"}, pretty_print=False)
|
||||
assert data == (
|
||||
plistlib.XML_DECLARATION +
|
||||
plistlib.PLIST_DOCTYPE +
|
||||
b'<plist version="1.0">'
|
||||
plistlib.XML_DECLARATION
|
||||
+ plistlib.PLIST_DOCTYPE
|
||||
+ b'<plist version="1.0">'
|
||||
b"<dict>"
|
||||
b"<key>data</key>"
|
||||
b"<data>aGVsbG8=</data>"
|
||||
|
@ -1,6 +1,7 @@
|
||||
"""The module contains miscellaneous helpers.
|
||||
It's not considered part of the public ufoLib API.
|
||||
"""
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
import warnings
|
||||
import functools
|
||||
|
||||
|
@ -1,5 +1,6 @@
|
||||
"""Various low level data validators."""
|
||||
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
import os
|
||||
import calendar
|
||||
from io import open
|
||||
@ -951,21 +952,26 @@ def fontLibValidator(value):
|
||||
>>> fontLibValidator(lib)
|
||||
(True, None)
|
||||
|
||||
>>> lib = {"public.glyphOrder" : [u"A", u"C", u"B"]}
|
||||
>>> fontLibValidator(lib)
|
||||
(True, None)
|
||||
|
||||
>>> lib = "hello"
|
||||
>>> fontLibValidator(lib)
|
||||
(False, 'The lib data is not in the correct format: expected a dictionary, found str')
|
||||
>>> valid, msg = fontLibValidator(lib)
|
||||
>>> valid
|
||||
False
|
||||
>>> print(msg) # doctest: +ELLIPSIS
|
||||
The lib data is not in the correct format: expected a dictionary, ...
|
||||
|
||||
>>> lib = {1: "hello"}
|
||||
>>> fontLibValidator(lib)
|
||||
(False, 'The lib key is not properly formatted: expected basestring, found int: 1')
|
||||
>>> valid, msg = fontLibValidator(lib)
|
||||
>>> valid
|
||||
False
|
||||
>>> print(msg)
|
||||
The lib key is not properly formatted: expected basestring, found int: 1
|
||||
|
||||
>>> lib = {"public.glyphOrder" : "hello"}
|
||||
>>> fontLibValidator(lib)
|
||||
(False, 'public.glyphOrder is not properly formatted: expected list or tuple, found str')
|
||||
>>> valid, msg = fontLibValidator(lib)
|
||||
>>> valid
|
||||
False
|
||||
>>> print(msg) # doctest: +ELLIPSIS
|
||||
public.glyphOrder is not properly formatted: expected list or tuple,...
|
||||
|
||||
>>> lib = {"public.glyphOrder" : ["A", 1, "B"]}
|
||||
>>> fontLibValidator(lib)
|
||||
|
14
README.md
14
README.md
@ -10,6 +10,18 @@ A low-level [UFO] reader and writer.
|
||||
|
||||
[UFO] is a human-readable, XML-based file format that stores font source files.
|
||||
|
||||
### Installation
|
||||
|
||||
```sh
|
||||
$ pip install ufoLib
|
||||
```
|
||||
|
||||
For better speed, you can install with extra dependencies like this:
|
||||
|
||||
```sh
|
||||
$ pip install ufoLib[lxml]
|
||||
```
|
||||
|
||||
[UFO]: http://unifiedfontobject.org/
|
||||
|
||||
|
||||
@ -19,4 +31,4 @@ UFO 4
|
||||
This is a branch in which we are **experimenting** with UFO 4 ideas/support.
|
||||
|
||||
- Single file structure (99.9% sure this will be zip)
|
||||
- Other stuff.
|
||||
- Other stuff.
|
||||
|
52
appveyor.yml
52
appveyor.yml
@ -4,38 +4,24 @@ environment:
|
||||
- PYTHON: "C:\\Python27"
|
||||
PYTHON_VERSION: "2.7.x"
|
||||
PYTHON_ARCH: "32"
|
||||
TOXENV: "py27"
|
||||
TOXPYTHON: "C:\\Python27\\python.exe"
|
||||
|
||||
- PYTHON: "C:\\Python35"
|
||||
PYTHON_VERSION: "3.5.x"
|
||||
PYTHON_ARCH: "32"
|
||||
TOXENV: "py35"
|
||||
TOXPYTHON: "C:\\Python35\\python.exe"
|
||||
|
||||
- PYTHON: "C:\\Python36"
|
||||
PYTHON_VERSION: "3.6.x"
|
||||
PYTHON_ARCH: "32"
|
||||
TOXENV: "py36"
|
||||
TOXPYTHON: "C:\\Python36\\python.exe"
|
||||
|
||||
- PYTHON: "C:\\Python27-x64"
|
||||
PYTHON_VERSION: "2.7.x"
|
||||
PYTHON_ARCH: "64"
|
||||
TOXENV: "py27"
|
||||
TOXPYTHON: "C:\\Python27-x64\\python.exe"
|
||||
|
||||
- PYTHON: "C:\\Python35-x64"
|
||||
PYTHON_VERSION: "3.5.x"
|
||||
PYTHON_ARCH: "64"
|
||||
TOXENV: "py35"
|
||||
TOXPYTHON: "C:\\Python35-x64\\python.exe"
|
||||
TOXENV: "py27-cov,py27-cov-lxml"
|
||||
|
||||
- PYTHON: "C:\\Python36-x64"
|
||||
PYTHON_VERSION: "3.6.x"
|
||||
PYTHON_ARCH: "64"
|
||||
TOXENV: "py36"
|
||||
TOXPYTHON: "C:\\Python36-x64\\python.exe"
|
||||
TOXENV: "py36-cov,py36-cov-lxml"
|
||||
|
||||
- PYTHON: "C:\\Python37-x64"
|
||||
PYTHON_VERSION: "3.7.x"
|
||||
PYTHON_ARCH: "64"
|
||||
# lxml doesn't have windows 3.7 wheels yet
|
||||
# TOXENV: "py37-cov,py37-cov-lxml"
|
||||
TOXENV: "py37-cov"
|
||||
|
||||
skip_branch_with_pr: true
|
||||
|
||||
matrix:
|
||||
fast_finish: true
|
||||
|
||||
init:
|
||||
- "ECHO %PYTHON% %PYTHON_VERSION% %PYTHON_ARCH%"
|
||||
@ -61,7 +47,17 @@ install:
|
||||
# install tox to run test suite in a virtual environment
|
||||
- "pip install -U tox"
|
||||
|
||||
# Make a python3.7.bat file in the current directory so that tox will find it
|
||||
# and python3.7 will mean what we want it to. E.g. for TOXENV=py27, this will
|
||||
# save a python2.7.bat file containing "@C:\Python27\python %*"
|
||||
# Credit: https://nedbatchelder.com/blog/201509/appveyor.html
|
||||
- "python -c \"import os; open('python{0}.{1}.bat'.format(*os.environ['TOXENV'][2:]), 'w').write('@{0}\\\\python \\x25*\\n'.format(os.environ['PYTHON']))\""
|
||||
|
||||
build: false
|
||||
|
||||
test_script:
|
||||
- "tox"
|
||||
|
||||
on_success:
|
||||
# upload test coverage to codecov.io
|
||||
- tox -e codecov
|
||||
|
2
extra_requirements.txt
Normal file
2
extra_requirements.txt
Normal file
@ -0,0 +1,2 @@
|
||||
lxml==4.2.3
|
||||
singledispatch==3.4.0.3; python_version < '3.4'
|
@ -1,3 +1 @@
|
||||
fonttools==3.28.0
|
||||
lxml==4.2.3
|
||||
singledispatch==3.4.0.3; python_version < '3.4'
|
||||
|
@ -39,9 +39,11 @@ license_file = LICENSE.txt
|
||||
[tool:pytest]
|
||||
minversion = 3.0.2
|
||||
testpaths =
|
||||
Lib/ufoLib
|
||||
ufoLib
|
||||
doctest_optionflags = ALLOW_UNICODE ALLOW_BYTES
|
||||
addopts =
|
||||
-v
|
||||
-r a
|
||||
--doctest-modules
|
||||
--doctest-ignore-import-errors
|
||||
--pyargs
|
||||
|
||||
|
13
setup.py
13
setup.py
@ -166,9 +166,18 @@ setup_params = dict(
|
||||
],
|
||||
install_requires=[
|
||||
"fonttools >= 3.1.2, < 4",
|
||||
"lxml >= 4.0, < 5",
|
||||
"singledispatch >= 3.4.0.3, < 4; python_version < '3.4'",
|
||||
],
|
||||
extras_require={
|
||||
"lxml": [
|
||||
"lxml >= 4.0, < 5",
|
||||
"singledispatch >= 3.4.0.3, < 4; python_version < '3.4'",
|
||||
],
|
||||
"testing": [
|
||||
"pytest >= 3.0.0, <4",
|
||||
"pytest-cov >= 2.5.1, <3",
|
||||
"pytest-randomly >= 1.2.3, <2",
|
||||
],
|
||||
},
|
||||
cmdclass={
|
||||
"release": release,
|
||||
"bump_version": bump_version,
|
||||
|
75
tox.ini
75
tox.ini
@ -1,16 +1,71 @@
|
||||
[tox]
|
||||
envlist = py27, py35, py36
|
||||
envlist = py{27,36,37}-cov,py{27,36,37}-cov-lxml,coverage
|
||||
minversion = 2.9.1
|
||||
skip_missing_interpreters = true
|
||||
|
||||
[testenv]
|
||||
basepython =
|
||||
# we use TOXPYTHON env variable to specify the location of Appveyor Python
|
||||
py27: {env:TOXPYTHON:python2.7}
|
||||
py35: {env:TOXPYTHON:python3.5}
|
||||
py36: {env:TOXPYTHON:python3.6}
|
||||
description = run the tests with pytest under {basepython}
|
||||
setenv =
|
||||
COVERAGE_FILE={toxinidir}/.coverage.{envname}
|
||||
deps =
|
||||
pytest
|
||||
-rrequirements.txt
|
||||
lxml: -rextra_requirements.txt
|
||||
extras = testing
|
||||
commands =
|
||||
# run the test suite against the package installed inside tox env.
|
||||
# any extra positional arguments after `tox -- ...` are passed on to pytest
|
||||
pytest {posargs:--pyargs ufoLib}
|
||||
python --version
|
||||
python -c "import struct; print('%s-bit' % (struct.calcsize('P') * 8))"
|
||||
nocov: pytest {posargs}
|
||||
cov: pytest --cov="{envsitepackagesdir}/ufoLib" --cov-config={toxinidir}/.coveragerc {posargs}
|
||||
|
||||
[testenv:coverage]
|
||||
description = run locally after tests to combine coverage data and create reports;
|
||||
generates a diff coverage against origin/master (or DIFF_AGAINST env var)
|
||||
deps =
|
||||
coverage >= 4.4.1, < 5
|
||||
diff_cover
|
||||
skip_install = true
|
||||
setenv =
|
||||
COVERAGE_FILE={toxinidir}/.coverage
|
||||
passenv =
|
||||
DIFF_AGAINST
|
||||
changedir = {toxinidir}
|
||||
commands =
|
||||
coverage erase
|
||||
coverage combine
|
||||
coverage report
|
||||
coverage xml -o {toxworkdir}/coverage.xml
|
||||
coverage html
|
||||
diff-cover --compare-branch {env:DIFF_AGAINST:origin/master} {toxworkdir}/coverage.xml
|
||||
|
||||
[testenv:codecov]
|
||||
description = upload coverage data to codecov (only run on CI)
|
||||
deps =
|
||||
{[testenv:coverage]deps}
|
||||
codecov
|
||||
skip_install = true
|
||||
setenv = {[testenv:coverage]setenv}
|
||||
passenv = TOXENV CI TRAVIS TRAVIS_* APPVEYOR APPVEYOR_* CODECOV_*
|
||||
changedir = {toxinidir}
|
||||
commands =
|
||||
coverage combine
|
||||
codecov --env TOXENV
|
||||
|
||||
[testenv:sdist]
|
||||
description = build sdist to be uploaded to PyPI
|
||||
skip_install = true
|
||||
deps =
|
||||
setuptools >= 36.4.0
|
||||
wheel >= 0.31.0
|
||||
changedir = {toxinidir}
|
||||
commands =
|
||||
python -c 'import shutil; shutil.rmtree("dist", ignore_errors=True)'
|
||||
python setup.py sdist --dist-dir dist
|
||||
|
||||
[testenv:wheel]
|
||||
description = build wheel package for upload to PyPI
|
||||
skip_install = true
|
||||
deps = {[testenv:sdist]deps}
|
||||
changedir = {toxinidir}
|
||||
commands =
|
||||
{[testenv:sdist]commands}
|
||||
pip wheel -v --no-deps --no-index --no-cache-dir --wheel-dir {toxinidir}/dist --find-links {toxinidir}/dist ufoLib
|
||||
|
Loading…
x
Reference in New Issue
Block a user