Created a new library directory called "FreeLib". All OpenSource RFMKII components will reside there, fontTools being the flagship.
git-svn-id: svn://svn.code.sf.net/p/fonttools/code/trunk@2 4cde692c-a291-49d1-8350-778aa11640f8
This commit is contained in:
parent
a8f3feacb0
commit
7842e56b97
0
Lib/fontTools/__init__.py
Normal file
0
Lib/fontTools/__init__.py
Normal file
265
Lib/fontTools/afmLib.py
Normal file
265
Lib/fontTools/afmLib.py
Normal file
@ -0,0 +1,265 @@
|
|||||||
|
"""Module for reading and writing AFM files."""
|
||||||
|
|
||||||
|
# XXX reads AFM's generated by Fog, not tested with much else.
|
||||||
|
# It does not implement the full spec (Adobe Technote 5004, Adobe Font Metrics
|
||||||
|
# File Format Specification). Still, it should read most "common" AFM files.
|
||||||
|
|
||||||
|
import re
|
||||||
|
import string
|
||||||
|
import types
|
||||||
|
|
||||||
|
__version__ = "$Id: afmLib.py,v 1.1 1999-12-16 21:34:51 Just Exp $"
|
||||||
|
|
||||||
|
|
||||||
|
# every single line starts with a "word"
|
||||||
|
identifierRE = re.compile("^([A-Za-z]+).*")
|
||||||
|
|
||||||
|
# regular expression to parse char lines
|
||||||
|
charRE = re.compile(
|
||||||
|
"(-?\d+)" # charnum
|
||||||
|
"\s*;\s*WX\s+" # ; WX
|
||||||
|
"(\d+)" # width
|
||||||
|
"\s*;\s*N\s+" # ; N
|
||||||
|
"(\.?[A-Za-z0-9_]+)" # charname
|
||||||
|
"\s*;\s*B\s+" # ; B
|
||||||
|
"(-?\d+)" # left
|
||||||
|
"\s+" #
|
||||||
|
"(-?\d+)" # bottom
|
||||||
|
"\s+" #
|
||||||
|
"(-?\d+)" # right
|
||||||
|
"\s+" #
|
||||||
|
"(-?\d+)" # top
|
||||||
|
"\s*;\s*" # ;
|
||||||
|
)
|
||||||
|
|
||||||
|
# regular expression to parse kerning lines
|
||||||
|
kernRE = re.compile(
|
||||||
|
"([.A-Za-z0-9_]+)" # leftchar
|
||||||
|
"\s+" #
|
||||||
|
"([.A-Za-z0-9_]+)" # rightchar
|
||||||
|
"\s+" #
|
||||||
|
"(-?\d+)" # value
|
||||||
|
"\s*" #
|
||||||
|
)
|
||||||
|
|
||||||
|
error = "AFM.error"
|
||||||
|
|
||||||
|
class AFM:
|
||||||
|
|
||||||
|
_keywords = ['StartFontMetrics',
|
||||||
|
'EndFontMetrics',
|
||||||
|
'StartCharMetrics',
|
||||||
|
'EndCharMetrics',
|
||||||
|
'StartKernData',
|
||||||
|
'StartKernPairs',
|
||||||
|
'EndKernPairs',
|
||||||
|
'EndKernData', ]
|
||||||
|
|
||||||
|
def __init__(self, path = None):
|
||||||
|
self._attrs = {}
|
||||||
|
self._chars = {}
|
||||||
|
self._kerning = {}
|
||||||
|
self._index = {}
|
||||||
|
self._comments = []
|
||||||
|
if path is not None:
|
||||||
|
self.read(path)
|
||||||
|
|
||||||
|
def read(self, path):
|
||||||
|
lines = readlines(path)
|
||||||
|
for line in lines:
|
||||||
|
if not string.strip(line):
|
||||||
|
continue
|
||||||
|
m = identifierRE.match(line)
|
||||||
|
if m is None:
|
||||||
|
raise error, "syntax error in AFM file: " + `line`
|
||||||
|
|
||||||
|
pos = m.regs[1][1]
|
||||||
|
word = line[:pos]
|
||||||
|
rest = string.strip(line[pos:])
|
||||||
|
if word in self._keywords:
|
||||||
|
continue
|
||||||
|
if word == 'C':
|
||||||
|
self.parsechar(rest)
|
||||||
|
elif word == "KPX":
|
||||||
|
self.parsekernpair(rest)
|
||||||
|
else:
|
||||||
|
self.parseattr(word, rest)
|
||||||
|
|
||||||
|
def parsechar(self, rest):
|
||||||
|
m = charRE.match(rest)
|
||||||
|
if m is None:
|
||||||
|
raise error, "syntax error in AFM file: " + `rest`
|
||||||
|
things = []
|
||||||
|
for fr, to in m.regs[1:]:
|
||||||
|
things.append(rest[fr:to])
|
||||||
|
charname = things[2]
|
||||||
|
del things[2]
|
||||||
|
charnum, width, l, b, r, t = map(string.atoi, things)
|
||||||
|
self._chars[charname] = charnum, width, (l, b, r, t)
|
||||||
|
|
||||||
|
def parsekernpair(self, rest):
|
||||||
|
m = kernRE.match(rest)
|
||||||
|
if m is None:
|
||||||
|
raise error, "syntax error in AFM file: " + `rest`
|
||||||
|
things = []
|
||||||
|
for fr, to in m.regs[1:]:
|
||||||
|
things.append(rest[fr:to])
|
||||||
|
leftchar, rightchar, value = things
|
||||||
|
value = string.atoi(value)
|
||||||
|
self._kerning[(leftchar, rightchar)] = value
|
||||||
|
|
||||||
|
def parseattr(self, word, rest):
|
||||||
|
if word == "FontBBox":
|
||||||
|
l, b, r, t = map(string.atoi, string.split(rest))
|
||||||
|
self._attrs[word] = l, b, r, t
|
||||||
|
elif word == "Comment":
|
||||||
|
self._comments.append(rest)
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
value = string.atoi(rest)
|
||||||
|
except (ValueError, OverflowError):
|
||||||
|
self._attrs[word] = rest
|
||||||
|
else:
|
||||||
|
self._attrs[word] = value
|
||||||
|
|
||||||
|
def write(self, path, sep = '\r'):
|
||||||
|
import time
|
||||||
|
lines = [ "StartFontMetrics 2.0",
|
||||||
|
"Comment Generated by afmLib, version %s; at %s" %
|
||||||
|
(string.split(__version__)[2],
|
||||||
|
time.strftime("%m/%d/%Y %H:%M:%S",
|
||||||
|
time.localtime(time.time())))]
|
||||||
|
|
||||||
|
# write attributes
|
||||||
|
items = self._attrs.items()
|
||||||
|
items.sort() # XXX proper ordering???
|
||||||
|
for attr, value in items:
|
||||||
|
if attr == "FontBBox":
|
||||||
|
value = string.join(map(str, value), " ")
|
||||||
|
lines.append(attr + " " + str(value))
|
||||||
|
|
||||||
|
# write char metrics
|
||||||
|
lines.append("StartCharMetrics " + `len(self._chars)`)
|
||||||
|
items = map(lambda (charname, (charnum, width, box)):
|
||||||
|
(charnum, (charname, width, box)),
|
||||||
|
self._chars.items())
|
||||||
|
|
||||||
|
def myCmp(a, b):
|
||||||
|
"""Custom compare function to make sure unencoded chars (-1)
|
||||||
|
end up at the end of the list after sorting."""
|
||||||
|
if a[0] == -1:
|
||||||
|
a = (0xffff,) + a[1:] # 0xffff is an arbitrary large number
|
||||||
|
if b[0] == -1:
|
||||||
|
b = (0xffff,) + b[1:]
|
||||||
|
return cmp(a, b)
|
||||||
|
items.sort(myCmp)
|
||||||
|
|
||||||
|
for charnum, (charname, width, (l, b, r, t)) in items:
|
||||||
|
lines.append("C %d ; WX %d ; N %s ; B %d %d %d %d ;" %
|
||||||
|
(charnum, width, charname, l, b, r, t))
|
||||||
|
lines.append("EndCharMetrics")
|
||||||
|
|
||||||
|
# write kerning info
|
||||||
|
lines.append("StartKernData")
|
||||||
|
lines.append("StartKernPairs " + `len(self._kerning)`)
|
||||||
|
items = self._kerning.items()
|
||||||
|
items.sort() # XXX is order important?
|
||||||
|
for (leftchar, rightchar), value in items:
|
||||||
|
lines.append("KPX %s %s %d" % (leftchar, rightchar, value))
|
||||||
|
|
||||||
|
lines.append("EndKernPairs")
|
||||||
|
lines.append("EndKernData")
|
||||||
|
lines.append("EndFontMetrics")
|
||||||
|
|
||||||
|
writelines(path, lines, sep)
|
||||||
|
|
||||||
|
def has_kernpair(self, pair):
|
||||||
|
return self._kerning.has_key(pair)
|
||||||
|
|
||||||
|
def kernpairs(self):
|
||||||
|
return self._kerning.keys()
|
||||||
|
|
||||||
|
def has_char(self, char):
|
||||||
|
return self._chars.has_key(char)
|
||||||
|
|
||||||
|
def chars(self):
|
||||||
|
return self._chars.keys()
|
||||||
|
|
||||||
|
def comments(self):
|
||||||
|
return self._comments
|
||||||
|
|
||||||
|
def __getattr__(self, attr):
|
||||||
|
if self._attrs.has_key(attr):
|
||||||
|
return self._attrs[attr]
|
||||||
|
else:
|
||||||
|
raise AttributeError, attr
|
||||||
|
|
||||||
|
def __setattr__(self, attr, value):
|
||||||
|
# all attrs *not* starting with "_" are consider to be AFM keywords
|
||||||
|
if attr[:1] == "_":
|
||||||
|
self.__dict__[attr] = value
|
||||||
|
else:
|
||||||
|
self._attrs[attr] = value
|
||||||
|
|
||||||
|
def __getitem__(self, key):
|
||||||
|
if type(key) == types.TupleType:
|
||||||
|
# key is a tuple, return the kernpair
|
||||||
|
if self._kerning.has_key(key):
|
||||||
|
return self._kerning[key]
|
||||||
|
else:
|
||||||
|
raise KeyError, "no kerning pair: " + str(key)
|
||||||
|
else:
|
||||||
|
# return the metrics instead
|
||||||
|
if self._chars.has_key(key):
|
||||||
|
return self._chars[key]
|
||||||
|
else:
|
||||||
|
raise KeyError, "metrics index " + str(key) + " out of range"
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
if hasattr(self, "FullName"):
|
||||||
|
return '<AFM object for %s>' % self.FullName
|
||||||
|
else:
|
||||||
|
return '<AFM object at %x>' % id(self)
|
||||||
|
|
||||||
|
|
||||||
|
def readlines(path):
|
||||||
|
f = open(path, 'rb')
|
||||||
|
data = f.read()
|
||||||
|
f.close()
|
||||||
|
# read any text file, regardless whether it's formatted for Mac, Unix or Dos
|
||||||
|
sep = ""
|
||||||
|
if '\r' in data:
|
||||||
|
sep = sep + '\r' # mac or dos
|
||||||
|
if '\n' in data:
|
||||||
|
sep = sep + '\n' # unix or dos
|
||||||
|
return string.split(data, sep)
|
||||||
|
|
||||||
|
def writelines(path, lines, sep = '\r'):
|
||||||
|
f = open(path, 'wb')
|
||||||
|
for line in lines:
|
||||||
|
f.write(line + sep)
|
||||||
|
f.close()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
import macfs
|
||||||
|
fss, ok = macfs.StandardGetFile('TEXT')
|
||||||
|
if ok:
|
||||||
|
path = fss.as_pathname()
|
||||||
|
afm = AFM(path)
|
||||||
|
char = 'A'
|
||||||
|
if afm.has_char(char):
|
||||||
|
print afm[char] # print charnum, width and boundingbox
|
||||||
|
pair = ('A', 'V')
|
||||||
|
if afm.has_kernpair(pair):
|
||||||
|
print afm[pair] # print kerning value for pair
|
||||||
|
print afm.Version # various other afm entries have become attributes
|
||||||
|
print afm.Weight
|
||||||
|
# afm.comments() returns a list of all Comment lines found in the AFM
|
||||||
|
print afm.comments()
|
||||||
|
#print afm.chars()
|
||||||
|
#print afm.kernpairs()
|
||||||
|
print afm
|
||||||
|
afm.write(path + ".xxx")
|
||||||
|
|
1189
Lib/fontTools/agl.py
Normal file
1189
Lib/fontTools/agl.py
Normal file
File diff suppressed because it is too large
Load Diff
459
Lib/fontTools/cffLib.py
Normal file
459
Lib/fontTools/cffLib.py
Normal file
@ -0,0 +1,459 @@
|
|||||||
|
"""cffLib.py -- read/write tools for Adobe CFF fonts."""
|
||||||
|
|
||||||
|
__version__ = "1.0b1"
|
||||||
|
__author__ = "jvr"
|
||||||
|
|
||||||
|
import struct, sstruct
|
||||||
|
import string
|
||||||
|
import types
|
||||||
|
import psCharStrings
|
||||||
|
|
||||||
|
|
||||||
|
cffHeaderFormat = """
|
||||||
|
major: B
|
||||||
|
minor: B
|
||||||
|
hdrSize: B
|
||||||
|
offSize: B
|
||||||
|
"""
|
||||||
|
|
||||||
|
class CFFFontSet:
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.fonts = {}
|
||||||
|
|
||||||
|
def decompile(self, data):
|
||||||
|
sstruct.unpack(cffHeaderFormat, data[:4], self)
|
||||||
|
assert self.major == 1 and self.minor == 0, \
|
||||||
|
"unknown CFF format: %d.%d" % (self.major, self.minor)
|
||||||
|
restdata = data[self.hdrSize:]
|
||||||
|
|
||||||
|
self.fontNames, restdata = readINDEX(restdata)
|
||||||
|
topDicts, restdata = readINDEX(restdata)
|
||||||
|
strings, restdata = readINDEX(restdata)
|
||||||
|
strings = IndexedStrings(strings)
|
||||||
|
globalSubrs, restdata = readINDEX(restdata)
|
||||||
|
self.GlobalSubrs = map(psCharStrings.T2CharString, globalSubrs)
|
||||||
|
|
||||||
|
for i in range(len(topDicts)):
|
||||||
|
font = self.fonts[self.fontNames[i]] = CFFFont()
|
||||||
|
font.GlobalSubrs = self.GlobalSubrs # Hmm.
|
||||||
|
font.decompile(data, topDicts[i], strings, self) # maybe only 'on demand'?
|
||||||
|
|
||||||
|
|
||||||
|
def compile(self):
|
||||||
|
strings = IndexedStrings()
|
||||||
|
XXXX
|
||||||
|
|
||||||
|
def toXML(self, xmlWriter, progress=None):
|
||||||
|
xmlWriter.newline()
|
||||||
|
for fontName in self.fontNames:
|
||||||
|
xmlWriter.begintag("CFFFont", name=fontName)
|
||||||
|
xmlWriter.newline()
|
||||||
|
font = self.fonts[fontName]
|
||||||
|
font.toXML(xmlWriter, progress)
|
||||||
|
xmlWriter.endtag("CFFFont")
|
||||||
|
xmlWriter.newline()
|
||||||
|
xmlWriter.newline()
|
||||||
|
xmlWriter.begintag("GlobalSubrs")
|
||||||
|
xmlWriter.newline()
|
||||||
|
for i in range(len(self.GlobalSubrs)):
|
||||||
|
xmlWriter.newline()
|
||||||
|
xmlWriter.begintag("CharString", id=i)
|
||||||
|
xmlWriter.newline()
|
||||||
|
self.GlobalSubrs[i].toXML(xmlWriter)
|
||||||
|
xmlWriter.endtag("CharString")
|
||||||
|
xmlWriter.newline()
|
||||||
|
xmlWriter.newline()
|
||||||
|
xmlWriter.endtag("GlobalSubrs")
|
||||||
|
xmlWriter.newline()
|
||||||
|
xmlWriter.newline()
|
||||||
|
|
||||||
|
def fromXML(self, (name, attrs, content)):
|
||||||
|
xxx
|
||||||
|
|
||||||
|
|
||||||
|
class IndexedStrings:
|
||||||
|
|
||||||
|
def __init__(self, strings=None):
|
||||||
|
if strings is None:
|
||||||
|
strings = []
|
||||||
|
self.strings = strings
|
||||||
|
|
||||||
|
def __getitem__(self, SID):
|
||||||
|
if SID < cffStandardStringCount:
|
||||||
|
return cffStandardStrings[SID]
|
||||||
|
else:
|
||||||
|
return self.strings[SID - cffStandardStringCount]
|
||||||
|
|
||||||
|
def getSID(self, s):
|
||||||
|
if not hasattr(self, "stringMapping"):
|
||||||
|
self.buildStringMapping()
|
||||||
|
if cffStandardStringMapping.has_key(s):
|
||||||
|
SID = cffStandardStringMapping[s]
|
||||||
|
if self.stringMapping.has_key(s):
|
||||||
|
SID = self.stringMapping[s]
|
||||||
|
else:
|
||||||
|
SID = len(self.strings) + cffStandardStringCount
|
||||||
|
self.strings.append(s)
|
||||||
|
self.stringMapping[s] = SID
|
||||||
|
return SID
|
||||||
|
|
||||||
|
def getStrings(self):
|
||||||
|
return self.strings
|
||||||
|
|
||||||
|
def buildStringMapping(self):
|
||||||
|
self.stringMapping = {}
|
||||||
|
for index in range(len(self.strings)):
|
||||||
|
self.stringMapping[self.strings[index]] = index + cffStandardStringCount
|
||||||
|
|
||||||
|
|
||||||
|
class CFFFont:
|
||||||
|
|
||||||
|
defaults = psCharStrings.topDictDefaults
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def __getattr__(self, attr):
|
||||||
|
if not self.defaults.has_key(attr):
|
||||||
|
raise AttributeError, attr
|
||||||
|
return self.defaults[attr]
|
||||||
|
|
||||||
|
def fromDict(self, dict):
|
||||||
|
self.__dict__.update(dict)
|
||||||
|
|
||||||
|
def decompile(self, data, topDictData, strings, fontSet):
|
||||||
|
top = psCharStrings.TopDictDecompiler(strings)
|
||||||
|
top.decompile(topDictData)
|
||||||
|
self.fromDict(top.getDict())
|
||||||
|
|
||||||
|
# get private dict
|
||||||
|
size, offset = self.Private
|
||||||
|
#print "YYY Private (size, offset):", size, offset
|
||||||
|
privateData = data[offset:offset+size]
|
||||||
|
self.Private = PrivateDict()
|
||||||
|
self.Private.decompile(data[offset:], privateData, strings)
|
||||||
|
|
||||||
|
# get raw charstrings
|
||||||
|
#print "YYYY CharStrings offset:", self.CharStrings
|
||||||
|
rawCharStrings, restdata = readINDEX(data[self.CharStrings:])
|
||||||
|
nGlyphs = len(rawCharStrings)
|
||||||
|
|
||||||
|
# get charset (or rather: get glyphNames)
|
||||||
|
charsetOffset = self.charset
|
||||||
|
if charsetOffset == 0:
|
||||||
|
xxx # standard charset
|
||||||
|
else:
|
||||||
|
#print "YYYYY charsetOffset:", charsetOffset
|
||||||
|
format = ord(data[charsetOffset])
|
||||||
|
if format == 0:
|
||||||
|
xxx
|
||||||
|
elif format == 1:
|
||||||
|
charSet = parseCharsetFormat1(nGlyphs,
|
||||||
|
data[charsetOffset+1:], strings)
|
||||||
|
elif format == 2:
|
||||||
|
charSet = parseCharsetFormat2(nGlyphs,
|
||||||
|
data[charsetOffset+1:], strings)
|
||||||
|
elif format == 3:
|
||||||
|
xxx
|
||||||
|
else:
|
||||||
|
xxx
|
||||||
|
self.charset = charSet
|
||||||
|
|
||||||
|
assert len(charSet) == nGlyphs
|
||||||
|
self.CharStrings = charStrings = {}
|
||||||
|
if self.CharstringType == 2:
|
||||||
|
# Type 2 CharStrings
|
||||||
|
charStringClass = psCharStrings.T2CharString
|
||||||
|
else:
|
||||||
|
# Type 1 CharStrings
|
||||||
|
charStringClass = psCharStrings.T1CharString
|
||||||
|
for i in range(nGlyphs):
|
||||||
|
charStrings[charSet[i]] = charStringClass(rawCharStrings[i])
|
||||||
|
assert len(charStrings) == nGlyphs
|
||||||
|
|
||||||
|
# XXX Encoding!
|
||||||
|
encoding = self.Encoding
|
||||||
|
if encoding not in (0, 1):
|
||||||
|
# encoding is an _offset_ from the beginning of 'data' to an encoding subtable
|
||||||
|
XXX
|
||||||
|
self.Encoding = encoding
|
||||||
|
|
||||||
|
def getGlyphOrder(self):
|
||||||
|
return self.charset
|
||||||
|
|
||||||
|
def setGlyphOrder(self, glyphOrder):
|
||||||
|
self.charset = glyphOrder
|
||||||
|
|
||||||
|
def decompileAllCharStrings(self):
|
||||||
|
if self.CharstringType == 2:
|
||||||
|
# Type 2 CharStrings
|
||||||
|
decompiler = psCharStrings.SimpleT2Decompiler(self.Private.Subrs, self.GlobalSubrs)
|
||||||
|
for charString in self.CharStrings.values():
|
||||||
|
if charString.needsDecompilation():
|
||||||
|
decompiler.reset()
|
||||||
|
decompiler.execute(charString)
|
||||||
|
else:
|
||||||
|
# Type 1 CharStrings
|
||||||
|
for charString in self.CharStrings.values():
|
||||||
|
charString.decompile()
|
||||||
|
|
||||||
|
def toXML(self, xmlWriter, progress=None):
|
||||||
|
xmlWriter.newline()
|
||||||
|
# first dump the simple values
|
||||||
|
self.toXMLSimpleValues(xmlWriter)
|
||||||
|
|
||||||
|
# dump charset
|
||||||
|
# XXX
|
||||||
|
|
||||||
|
# decompile all charstrings
|
||||||
|
if progress:
|
||||||
|
progress.setlabel("Decompiling CharStrings...")
|
||||||
|
self.decompileAllCharStrings()
|
||||||
|
|
||||||
|
# dump private dict
|
||||||
|
xmlWriter.begintag("Private")
|
||||||
|
xmlWriter.newline()
|
||||||
|
self.Private.toXML(xmlWriter)
|
||||||
|
xmlWriter.endtag("Private")
|
||||||
|
xmlWriter.newline()
|
||||||
|
|
||||||
|
self.toXMLCharStrings(xmlWriter, progress)
|
||||||
|
|
||||||
|
def toXMLSimpleValues(self, xmlWriter):
|
||||||
|
keys = self.__dict__.keys()
|
||||||
|
keys.remove("CharStrings")
|
||||||
|
keys.remove("Private")
|
||||||
|
keys.remove("charset")
|
||||||
|
keys.remove("GlobalSubrs")
|
||||||
|
keys.sort()
|
||||||
|
for key in keys:
|
||||||
|
value = getattr(self, key)
|
||||||
|
if key == "Encoding":
|
||||||
|
if value == 0:
|
||||||
|
# encoding is (Adobe) Standard Encoding
|
||||||
|
value = "StandardEncoding"
|
||||||
|
elif value == 1:
|
||||||
|
# encoding is Expert Encoding
|
||||||
|
value = "ExpertEncoding"
|
||||||
|
if type(value) == types.ListType:
|
||||||
|
value = string.join(map(str, value), " ")
|
||||||
|
else:
|
||||||
|
value = str(value)
|
||||||
|
xmlWriter.begintag(key)
|
||||||
|
if hasattr(value, "toXML"):
|
||||||
|
xmlWriter.newline()
|
||||||
|
value.toXML(xmlWriter)
|
||||||
|
xmlWriter.newline()
|
||||||
|
else:
|
||||||
|
xmlWriter.write(value)
|
||||||
|
xmlWriter.endtag(key)
|
||||||
|
xmlWriter.newline()
|
||||||
|
xmlWriter.newline()
|
||||||
|
|
||||||
|
def toXMLCharStrings(self, xmlWriter, progress=None):
|
||||||
|
charStrings = self.CharStrings
|
||||||
|
xmlWriter.newline()
|
||||||
|
xmlWriter.begintag("CharStrings")
|
||||||
|
xmlWriter.newline()
|
||||||
|
glyphNames = charStrings.keys()
|
||||||
|
glyphNames.sort()
|
||||||
|
for glyphName in glyphNames:
|
||||||
|
if progress:
|
||||||
|
progress.setlabel("Dumping 'CFF ' table... (%s)" % glyphName)
|
||||||
|
progress.increment()
|
||||||
|
xmlWriter.newline()
|
||||||
|
charString = charStrings[glyphName]
|
||||||
|
xmlWriter.begintag("CharString", name=glyphName)
|
||||||
|
xmlWriter.newline()
|
||||||
|
charString.toXML(xmlWriter)
|
||||||
|
xmlWriter.endtag("CharString")
|
||||||
|
xmlWriter.newline()
|
||||||
|
xmlWriter.newline()
|
||||||
|
xmlWriter.endtag("CharStrings")
|
||||||
|
xmlWriter.newline()
|
||||||
|
|
||||||
|
|
||||||
|
class PrivateDict:
|
||||||
|
|
||||||
|
defaults = psCharStrings.privateDictDefaults
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def decompile(self, data, privateData, strings):
|
||||||
|
p = psCharStrings.PrivateDictDecompiler(strings)
|
||||||
|
p.decompile(privateData)
|
||||||
|
self.fromDict(p.getDict())
|
||||||
|
|
||||||
|
# get local subrs
|
||||||
|
#print "YYY Private.Subrs:", self.Subrs
|
||||||
|
chunk = data[self.Subrs:]
|
||||||
|
localSubrs, restdata = readINDEX(chunk)
|
||||||
|
self.Subrs = map(psCharStrings.T2CharString, localSubrs)
|
||||||
|
|
||||||
|
def toXML(self, xmlWriter):
|
||||||
|
xmlWriter.newline()
|
||||||
|
keys = self.__dict__.keys()
|
||||||
|
keys.remove("Subrs")
|
||||||
|
for key in keys:
|
||||||
|
value = getattr(self, key)
|
||||||
|
if type(value) == types.ListType:
|
||||||
|
value = string.join(map(str, value), " ")
|
||||||
|
else:
|
||||||
|
value = str(value)
|
||||||
|
xmlWriter.begintag(key)
|
||||||
|
xmlWriter.write(value)
|
||||||
|
xmlWriter.endtag(key)
|
||||||
|
xmlWriter.newline()
|
||||||
|
# write subroutines
|
||||||
|
xmlWriter.newline()
|
||||||
|
xmlWriter.begintag("Subrs")
|
||||||
|
xmlWriter.newline()
|
||||||
|
for i in range(len(self.Subrs)):
|
||||||
|
xmlWriter.newline()
|
||||||
|
xmlWriter.begintag("CharString", id=i)
|
||||||
|
xmlWriter.newline()
|
||||||
|
self.Subrs[i].toXML(xmlWriter)
|
||||||
|
xmlWriter.endtag("CharString")
|
||||||
|
xmlWriter.newline()
|
||||||
|
xmlWriter.newline()
|
||||||
|
xmlWriter.endtag("Subrs")
|
||||||
|
xmlWriter.newline()
|
||||||
|
xmlWriter.newline()
|
||||||
|
|
||||||
|
def __getattr__(self, attr):
|
||||||
|
if not self.defaults.has_key(attr):
|
||||||
|
raise AttributeError, attr
|
||||||
|
return self.defaults[attr]
|
||||||
|
|
||||||
|
def fromDict(self, dict):
|
||||||
|
self.__dict__.update(dict)
|
||||||
|
|
||||||
|
|
||||||
|
def readINDEX(data):
|
||||||
|
count, = struct.unpack(">H", data[:2])
|
||||||
|
count = int(count)
|
||||||
|
offSize = ord(data[2])
|
||||||
|
data = data[3:]
|
||||||
|
offsets = []
|
||||||
|
for index in range(count+1):
|
||||||
|
chunk = data[index * offSize: (index+1) * offSize]
|
||||||
|
chunk = '\0' * (4 - offSize) + chunk
|
||||||
|
offset, = struct.unpack(">L", chunk)
|
||||||
|
offset = int(offset)
|
||||||
|
offsets.append(offset)
|
||||||
|
data = data[(count+1) * offSize:]
|
||||||
|
prev = offsets[0]
|
||||||
|
stuff = []
|
||||||
|
for next in offsets[1:]:
|
||||||
|
chunk = data[prev-1:next-1]
|
||||||
|
assert len(chunk) == next - prev
|
||||||
|
stuff.append(chunk)
|
||||||
|
prev = next
|
||||||
|
data = data[next-1:]
|
||||||
|
return stuff, data
|
||||||
|
|
||||||
|
|
||||||
|
def parseCharsetFormat1(nGlyphs, data, strings):
|
||||||
|
charSet = ['.notdef']
|
||||||
|
count = 1
|
||||||
|
while count < nGlyphs:
|
||||||
|
first = int(struct.unpack(">H", data[:2])[0])
|
||||||
|
nLeft = ord(data[2])
|
||||||
|
data = data[3:]
|
||||||
|
for SID in range(first, first+nLeft+1):
|
||||||
|
charSet.append(strings[SID])
|
||||||
|
count = count + nLeft + 1
|
||||||
|
return charSet
|
||||||
|
|
||||||
|
|
||||||
|
def parseCharsetFormat2(nGlyphs, data, strings):
|
||||||
|
charSet = ['.notdef']
|
||||||
|
count = 1
|
||||||
|
while count < nGlyphs:
|
||||||
|
first = int(struct.unpack(">H", data[:2])[0])
|
||||||
|
nLeft = int(struct.unpack(">H", data[2:4])[0])
|
||||||
|
data = data[4:]
|
||||||
|
for SID in range(first, first+nLeft+1):
|
||||||
|
charSet.append(strings[SID])
|
||||||
|
count = count + nLeft + 1
|
||||||
|
return charSet
|
||||||
|
|
||||||
|
|
||||||
|
# The 391 Standard Strings as used in the CFF format.
|
||||||
|
# from Adobe Technical None #5176, version 1.0, 18 March 1998
|
||||||
|
|
||||||
|
cffStandardStrings = ['.notdef', 'space', 'exclam', 'quotedbl', 'numbersign',
|
||||||
|
'dollar', 'percent', 'ampersand', 'quoteright', 'parenleft', 'parenright',
|
||||||
|
'asterisk', 'plus', 'comma', 'hyphen', 'period', 'slash', 'zero', 'one',
|
||||||
|
'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'colon',
|
||||||
|
'semicolon', 'less', 'equal', 'greater', 'question', 'at', 'A', 'B', 'C',
|
||||||
|
'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R',
|
||||||
|
'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'bracketleft', 'backslash',
|
||||||
|
'bracketright', 'asciicircum', 'underscore', 'quoteleft', 'a', 'b', 'c',
|
||||||
|
'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r',
|
||||||
|
's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'braceleft', 'bar', 'braceright',
|
||||||
|
'asciitilde', 'exclamdown', 'cent', 'sterling', 'fraction', 'yen', 'florin',
|
||||||
|
'section', 'currency', 'quotesingle', 'quotedblleft', 'guillemotleft',
|
||||||
|
'guilsinglleft', 'guilsinglright', 'fi', 'fl', 'endash', 'dagger',
|
||||||
|
'daggerdbl', 'periodcentered', 'paragraph', 'bullet', 'quotesinglbase',
|
||||||
|
'quotedblbase', 'quotedblright', 'guillemotright', 'ellipsis', 'perthousand',
|
||||||
|
'questiondown', 'grave', 'acute', 'circumflex', 'tilde', 'macron', 'breve',
|
||||||
|
'dotaccent', 'dieresis', 'ring', 'cedilla', 'hungarumlaut', 'ogonek', 'caron',
|
||||||
|
'emdash', 'AE', 'ordfeminine', 'Lslash', 'Oslash', 'OE', 'ordmasculine', 'ae',
|
||||||
|
'dotlessi', 'lslash', 'oslash', 'oe', 'germandbls', 'onesuperior',
|
||||||
|
'logicalnot', 'mu', 'trademark', 'Eth', 'onehalf', 'plusminus', 'Thorn',
|
||||||
|
'onequarter', 'divide', 'brokenbar', 'degree', 'thorn', 'threequarters',
|
||||||
|
'twosuperior', 'registered', 'minus', 'eth', 'multiply', 'threesuperior',
|
||||||
|
'copyright', 'Aacute', 'Acircumflex', 'Adieresis', 'Agrave', 'Aring',
|
||||||
|
'Atilde', 'Ccedilla', 'Eacute', 'Ecircumflex', 'Edieresis', 'Egrave',
|
||||||
|
'Iacute', 'Icircumflex', 'Idieresis', 'Igrave', 'Ntilde', 'Oacute',
|
||||||
|
'Ocircumflex', 'Odieresis', 'Ograve', 'Otilde', 'Scaron', 'Uacute',
|
||||||
|
'Ucircumflex', 'Udieresis', 'Ugrave', 'Yacute', 'Ydieresis', 'Zcaron',
|
||||||
|
'aacute', 'acircumflex', 'adieresis', 'agrave', 'aring', 'atilde', 'ccedilla',
|
||||||
|
'eacute', 'ecircumflex', 'edieresis', 'egrave', 'iacute', 'icircumflex',
|
||||||
|
'idieresis', 'igrave', 'ntilde', 'oacute', 'ocircumflex', 'odieresis',
|
||||||
|
'ograve', 'otilde', 'scaron', 'uacute', 'ucircumflex', 'udieresis', 'ugrave',
|
||||||
|
'yacute', 'ydieresis', 'zcaron', 'exclamsmall', 'Hungarumlautsmall',
|
||||||
|
'dollaroldstyle', 'dollarsuperior', 'ampersandsmall', 'Acutesmall',
|
||||||
|
'parenleftsuperior', 'parenrightsuperior', 'twodotenleader', 'onedotenleader',
|
||||||
|
'zerooldstyle', 'oneoldstyle', 'twooldstyle', 'threeoldstyle', 'fouroldstyle',
|
||||||
|
'fiveoldstyle', 'sixoldstyle', 'sevenoldstyle', 'eightoldstyle',
|
||||||
|
'nineoldstyle', 'commasuperior', 'threequartersemdash', 'periodsuperior',
|
||||||
|
'questionsmall', 'asuperior', 'bsuperior', 'centsuperior', 'dsuperior',
|
||||||
|
'esuperior', 'isuperior', 'lsuperior', 'msuperior', 'nsuperior', 'osuperior',
|
||||||
|
'rsuperior', 'ssuperior', 'tsuperior', 'ff', 'ffi', 'ffl', 'parenleftinferior',
|
||||||
|
'parenrightinferior', 'Circumflexsmall', 'hyphensuperior', 'Gravesmall',
|
||||||
|
'Asmall', 'Bsmall', 'Csmall', 'Dsmall', 'Esmall', 'Fsmall', 'Gsmall', 'Hsmall',
|
||||||
|
'Ismall', 'Jsmall', 'Ksmall', 'Lsmall', 'Msmall', 'Nsmall', 'Osmall', 'Psmall',
|
||||||
|
'Qsmall', 'Rsmall', 'Ssmall', 'Tsmall', 'Usmall', 'Vsmall', 'Wsmall', 'Xsmall',
|
||||||
|
'Ysmall', 'Zsmall', 'colonmonetary', 'onefitted', 'rupiah', 'Tildesmall',
|
||||||
|
'exclamdownsmall', 'centoldstyle', 'Lslashsmall', 'Scaronsmall', 'Zcaronsmall',
|
||||||
|
'Dieresissmall', 'Brevesmall', 'Caronsmall', 'Dotaccentsmall', 'Macronsmall',
|
||||||
|
'figuredash', 'hypheninferior', 'Ogoneksmall', 'Ringsmall', 'Cedillasmall',
|
||||||
|
'questiondownsmall', 'oneeighth', 'threeeighths', 'fiveeighths', 'seveneighths',
|
||||||
|
'onethird', 'twothirds', 'zerosuperior', 'foursuperior', 'fivesuperior',
|
||||||
|
'sixsuperior', 'sevensuperior', 'eightsuperior', 'ninesuperior', 'zeroinferior',
|
||||||
|
'oneinferior', 'twoinferior', 'threeinferior', 'fourinferior', 'fiveinferior',
|
||||||
|
'sixinferior', 'seveninferior', 'eightinferior', 'nineinferior', 'centinferior',
|
||||||
|
'dollarinferior', 'periodinferior', 'commainferior', 'Agravesmall',
|
||||||
|
'Aacutesmall', 'Acircumflexsmall', 'Atildesmall', 'Adieresissmall', 'Aringsmall',
|
||||||
|
'AEsmall', 'Ccedillasmall', 'Egravesmall', 'Eacutesmall', 'Ecircumflexsmall',
|
||||||
|
'Edieresissmall', 'Igravesmall', 'Iacutesmall', 'Icircumflexsmall',
|
||||||
|
'Idieresissmall', 'Ethsmall', 'Ntildesmall', 'Ogravesmall', 'Oacutesmall',
|
||||||
|
'Ocircumflexsmall', 'Otildesmall', 'Odieresissmall', 'OEsmall', 'Oslashsmall',
|
||||||
|
'Ugravesmall', 'Uacutesmall', 'Ucircumflexsmall', 'Udieresissmall',
|
||||||
|
'Yacutesmall', 'Thornsmall', 'Ydieresissmall', '001.000', '001.001', '001.002',
|
||||||
|
'001.003', 'Black', 'Bold', 'Book', 'Light', 'Medium', 'Regular', 'Roman',
|
||||||
|
'Semibold'
|
||||||
|
]
|
||||||
|
|
||||||
|
cffStandardStringCount = 391
|
||||||
|
assert len(cffStandardStrings) == cffStandardStringCount
|
||||||
|
# build reverse mapping
|
||||||
|
cffStandardStringMapping = {}
|
||||||
|
for _i in range(cffStandardStringCount):
|
||||||
|
cffStandardStringMapping[cffStandardStrings[_i]] = _i
|
||||||
|
|
||||||
|
|
553
Lib/fontTools/fondLib.py
Normal file
553
Lib/fontTools/fondLib.py
Normal file
@ -0,0 +1,553 @@
|
|||||||
|
import os
|
||||||
|
import Res
|
||||||
|
import struct, sstruct
|
||||||
|
import string
|
||||||
|
|
||||||
|
__version__ = "1.0b2"
|
||||||
|
__author__ = "jvr"
|
||||||
|
|
||||||
|
error = "fondLib.error"
|
||||||
|
|
||||||
|
DEBUG = 0
|
||||||
|
|
||||||
|
headerformat = """
|
||||||
|
ffFlags: h
|
||||||
|
ffFamID: h
|
||||||
|
ffFirstChar: h
|
||||||
|
ffLastChar: h
|
||||||
|
ffAscent: h
|
||||||
|
ffDescent: h
|
||||||
|
ffLeading: h
|
||||||
|
ffWidMax: h
|
||||||
|
ffWTabOff: l
|
||||||
|
ffKernOff: l
|
||||||
|
ffStylOff: l
|
||||||
|
"""
|
||||||
|
|
||||||
|
FONDheadersize = 52
|
||||||
|
|
||||||
|
class FontFamily:
|
||||||
|
|
||||||
|
def __init__(self, theRes, mode = 'r'):
|
||||||
|
self.ID, type, self.name = theRes.GetResInfo()
|
||||||
|
if type <> 'FOND':
|
||||||
|
raise ValueError, "FOND resource required"
|
||||||
|
self.FOND = theRes
|
||||||
|
self.mode = mode
|
||||||
|
self.changed = 0
|
||||||
|
|
||||||
|
if DEBUG:
|
||||||
|
self.parsedthings = []
|
||||||
|
|
||||||
|
def parse(self):
|
||||||
|
self._getheader()
|
||||||
|
self._getfontassociationtable()
|
||||||
|
self._getoffsettable()
|
||||||
|
self._getboundingboxtable()
|
||||||
|
self._getglyphwidthtable()
|
||||||
|
self._getstylemappingtable()
|
||||||
|
self._getglyphencodingsubtable()
|
||||||
|
self._getkerningtables()
|
||||||
|
|
||||||
|
def minimalparse(self):
|
||||||
|
self._getheader()
|
||||||
|
self._getglyphwidthtable()
|
||||||
|
self._getstylemappingtable()
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "<FontFamily instance of %s>" % self.name
|
||||||
|
|
||||||
|
def getflags(self):
|
||||||
|
return self.fondClass
|
||||||
|
|
||||||
|
def setflags(self, flags):
|
||||||
|
self.changed = 1
|
||||||
|
self.fondClass = flags
|
||||||
|
|
||||||
|
def save(self, destresfile = None):
|
||||||
|
if self.mode <> 'w':
|
||||||
|
raise error, "can't save font: no write permission"
|
||||||
|
self._buildfontassociationtable()
|
||||||
|
self._buildoffsettable()
|
||||||
|
self._buildboundingboxtable()
|
||||||
|
self._buildglyphwidthtable()
|
||||||
|
self._buildkerningtables()
|
||||||
|
self._buildstylemappingtable()
|
||||||
|
self._buildglyphencodingsubtable()
|
||||||
|
rawnames = [ "_rawheader",
|
||||||
|
"_rawfontassociationtable",
|
||||||
|
"_rawoffsettable",
|
||||||
|
"_rawglyphwidthtable",
|
||||||
|
"_rawstylemappingtable",
|
||||||
|
"_rawglyphencodingsubtable",
|
||||||
|
"_rawkerningtables"
|
||||||
|
]
|
||||||
|
for name in rawnames[1:]: # skip header
|
||||||
|
data = getattr(self, name)
|
||||||
|
if len(data) & 1:
|
||||||
|
setattr(self, name, data + '\0')
|
||||||
|
|
||||||
|
self.ffWTabOff = FONDheadersize + len(self._rawfontassociationtable) + len(self._rawoffsettable)
|
||||||
|
self.ffStylOff = self.ffWTabOff + len(self._rawglyphwidthtable)
|
||||||
|
self.ffKernOff = self.ffStylOff + len(self._rawstylemappingtable) + len(self._rawglyphencodingsubtable)
|
||||||
|
self.glyphTableOffset = len(self._rawstylemappingtable)
|
||||||
|
|
||||||
|
if not self._rawglyphwidthtable:
|
||||||
|
self.ffWTabOff = 0
|
||||||
|
if not self._rawstylemappingtable:
|
||||||
|
self.ffStylOff = 0
|
||||||
|
if not self._rawglyphencodingsubtable:
|
||||||
|
self.glyphTableOffset = 0
|
||||||
|
if not self._rawkerningtables:
|
||||||
|
self.ffKernOff = 0
|
||||||
|
|
||||||
|
self._buildheader()
|
||||||
|
|
||||||
|
# glyphTableOffset has only just been calculated
|
||||||
|
self._updatestylemappingtable()
|
||||||
|
|
||||||
|
newdata = ""
|
||||||
|
for name in rawnames:
|
||||||
|
newdata = newdata + getattr(self, name)
|
||||||
|
if destresfile is None:
|
||||||
|
self.FOND.data = newdata
|
||||||
|
self.FOND.ChangedResource()
|
||||||
|
self.FOND.WriteResource()
|
||||||
|
else:
|
||||||
|
ID, type, name = self.FOND.GetResInfo()
|
||||||
|
self.FOND.DetachResource()
|
||||||
|
self.FOND.data = newdata
|
||||||
|
saveref = Res.CurResFile()
|
||||||
|
Res.UseResFile(destresfile)
|
||||||
|
self.FOND.AddResource(type, ID, name)
|
||||||
|
Res.UseResFile(saveref)
|
||||||
|
self.changed = 0
|
||||||
|
|
||||||
|
def _getheader(self):
|
||||||
|
data = self.FOND.data
|
||||||
|
sstruct.unpack(headerformat, data[:28], self)
|
||||||
|
self.ffProperty = struct.unpack("9h", data[28:46])
|
||||||
|
self.ffIntl = struct.unpack("hh", data[46:50])
|
||||||
|
self.ffVersion, = struct.unpack("h", data[50:FONDheadersize])
|
||||||
|
|
||||||
|
if DEBUG:
|
||||||
|
self._rawheader = data[:FONDheadersize]
|
||||||
|
self.parsedthings.append((0, FONDheadersize, 'header'))
|
||||||
|
|
||||||
|
def _buildheader(self):
|
||||||
|
header = sstruct.pack(headerformat, self)
|
||||||
|
header = header + apply(struct.pack, ("9h",) + self.ffProperty)
|
||||||
|
header = header + apply(struct.pack, ("hh",) + self.ffIntl)
|
||||||
|
header = header + struct.pack("h", self.ffVersion)
|
||||||
|
if DEBUG:
|
||||||
|
print "header is the same?", self._rawheader == header and 'yes.' or 'no.'
|
||||||
|
if self._rawheader <> header:
|
||||||
|
print len(self._rawheader), len(header)
|
||||||
|
self._rawheader = header
|
||||||
|
|
||||||
|
def _getfontassociationtable(self):
|
||||||
|
data = self.FOND.data
|
||||||
|
offset = FONDheadersize
|
||||||
|
numberofentries, = struct.unpack("h", data[offset:offset+2])
|
||||||
|
numberofentries = numberofentries + 1
|
||||||
|
size = numberofentries * 6
|
||||||
|
self.fontAssoc = []
|
||||||
|
for i in range(offset + 2, offset + size, 6):
|
||||||
|
self.fontAssoc.append(struct.unpack("3h", data[i:i+6]))
|
||||||
|
|
||||||
|
self._endoffontassociationtable = offset + size + 2
|
||||||
|
if DEBUG:
|
||||||
|
self._rawfontassociationtable = data[offset:self._endoffontassociationtable]
|
||||||
|
self.parsedthings.append((offset, self._endoffontassociationtable, 'fontassociationtable'))
|
||||||
|
|
||||||
|
def _buildfontassociationtable(self):
|
||||||
|
data = struct.pack("h", len(self.fontAssoc) - 1)
|
||||||
|
for size, stype, ID in self.fontAssoc:
|
||||||
|
data = data + struct.pack("3h", size, stype, ID)
|
||||||
|
|
||||||
|
if DEBUG:
|
||||||
|
print "font association table is the same?", self._rawfontassociationtable == data and 'yes.' or 'no.'
|
||||||
|
if self._rawfontassociationtable <> data:
|
||||||
|
print len(self._rawfontassociationtable), len(data)
|
||||||
|
self._rawfontassociationtable = data
|
||||||
|
|
||||||
|
def _getoffsettable(self):
|
||||||
|
if self.ffWTabOff == 0:
|
||||||
|
self._rawoffsettable = ""
|
||||||
|
return
|
||||||
|
data = self.FOND.data
|
||||||
|
# Quick'n'Dirty. What's the spec anyway? Can't find it...
|
||||||
|
offset = self._endoffontassociationtable
|
||||||
|
count = self.ffWTabOff
|
||||||
|
self._rawoffsettable = data[offset:count]
|
||||||
|
if DEBUG:
|
||||||
|
self.parsedthings.append((offset, count, 'offsettable&bbtable'))
|
||||||
|
|
||||||
|
def _buildoffsettable(self):
|
||||||
|
if not hasattr(self, "_rawoffsettable"):
|
||||||
|
self._rawoffsettable = ""
|
||||||
|
|
||||||
|
def _getboundingboxtable(self):
|
||||||
|
self.boundingBoxes = None
|
||||||
|
if self._rawoffsettable[:6] <> '\0\0\0\0\0\6': # XXX ????
|
||||||
|
return
|
||||||
|
boxes = {}
|
||||||
|
data = self._rawoffsettable[6:]
|
||||||
|
numstyles = struct.unpack("h", data[:2])[0] + 1
|
||||||
|
data = data[2:]
|
||||||
|
for i in range(numstyles):
|
||||||
|
style, l, b, r, t = struct.unpack("hhhhh", data[:10])
|
||||||
|
boxes[style] = (l, b, r, t)
|
||||||
|
data = data[10:]
|
||||||
|
self.boundingBoxes = boxes
|
||||||
|
|
||||||
|
def _buildboundingboxtable(self):
|
||||||
|
if self.boundingBoxes and self._rawoffsettable[:6] == '\0\0\0\0\0\6':
|
||||||
|
boxes = self.boundingBoxes.items()
|
||||||
|
boxes.sort()
|
||||||
|
data = '\0\0\0\0\0\6' + struct.pack("h", len(boxes) - 1)
|
||||||
|
for style, (l, b, r, t) in boxes:
|
||||||
|
data = data + struct.pack("hhhhh", style, l, b, r, t)
|
||||||
|
self._rawoffsettable = data
|
||||||
|
|
||||||
|
def _getglyphwidthtable(self):
|
||||||
|
self.widthTables = {}
|
||||||
|
if self.ffWTabOff == 0:
|
||||||
|
return
|
||||||
|
data = self.FOND.data
|
||||||
|
offset = self.ffWTabOff
|
||||||
|
numberofentries, = struct.unpack("h", data[offset:offset+2])
|
||||||
|
numberofentries = numberofentries + 1
|
||||||
|
count = offset + 2
|
||||||
|
for i in range(numberofentries):
|
||||||
|
stylecode, = struct.unpack("h", data[count:count+2])
|
||||||
|
widthtable = self.widthTables[stylecode] = []
|
||||||
|
count = count + 2
|
||||||
|
for j in range(3 + self.ffLastChar - self.ffFirstChar):
|
||||||
|
width, = struct.unpack("h", data[count:count+2])
|
||||||
|
widthtable.append(width)
|
||||||
|
count = count + 2
|
||||||
|
|
||||||
|
if DEBUG:
|
||||||
|
self._rawglyphwidthtable = data[offset:count]
|
||||||
|
self.parsedthings.append((offset, count, 'glyphwidthtable'))
|
||||||
|
|
||||||
|
def _buildglyphwidthtable(self):
|
||||||
|
if not self.widthTables:
|
||||||
|
self._rawglyphwidthtable = ""
|
||||||
|
return
|
||||||
|
numberofentries = len(self.widthTables)
|
||||||
|
data = struct.pack('h', numberofentries - 1)
|
||||||
|
tables = self.widthTables.items()
|
||||||
|
tables.sort()
|
||||||
|
for stylecode, table in tables:
|
||||||
|
data = data + struct.pack('h', stylecode)
|
||||||
|
if len(table) <> (3 + self.ffLastChar - self.ffFirstChar):
|
||||||
|
raise error, "width table has wrong length"
|
||||||
|
for width in table:
|
||||||
|
data = data + struct.pack('h', width)
|
||||||
|
if DEBUG:
|
||||||
|
print "glyph width table is the same?", self._rawglyphwidthtable == data and 'yes.' or 'no.'
|
||||||
|
self._rawglyphwidthtable = data
|
||||||
|
|
||||||
|
def _getkerningtables(self):
|
||||||
|
self.kernTables = {}
|
||||||
|
if self.ffKernOff == 0:
|
||||||
|
return
|
||||||
|
data = self.FOND.data
|
||||||
|
offset = self.ffKernOff
|
||||||
|
numberofentries, = struct.unpack("h", data[offset:offset+2])
|
||||||
|
numberofentries = numberofentries + 1
|
||||||
|
count = offset + 2
|
||||||
|
for i in range(numberofentries):
|
||||||
|
stylecode, = struct.unpack("h", data[count:count+2])
|
||||||
|
count = count + 2
|
||||||
|
numberofpairs, = struct.unpack("h", data[count:count+2])
|
||||||
|
count = count + 2
|
||||||
|
kerntable = self.kernTables[stylecode] = []
|
||||||
|
for j in range(numberofpairs):
|
||||||
|
firstchar, secondchar, kerndistance = struct.unpack("cch", data[count:count+4])
|
||||||
|
kerntable.append((ord(firstchar), ord(secondchar), kerndistance))
|
||||||
|
count = count + 4
|
||||||
|
|
||||||
|
if DEBUG:
|
||||||
|
self._rawkerningtables = data[offset:count]
|
||||||
|
self.parsedthings.append((offset, count, 'kerningtables'))
|
||||||
|
|
||||||
|
def _buildkerningtables(self):
|
||||||
|
if self.kernTables == {}:
|
||||||
|
self._rawkerningtables = ""
|
||||||
|
self.ffKernOff = 0
|
||||||
|
return
|
||||||
|
numberofentries = len(self.kernTables)
|
||||||
|
data = [struct.pack('h', numberofentries - 1)]
|
||||||
|
tables = self.kernTables.items()
|
||||||
|
tables.sort()
|
||||||
|
for stylecode, table in tables:
|
||||||
|
data.append(struct.pack('h', stylecode))
|
||||||
|
data.append(struct.pack('h', len(table))) # numberofpairs
|
||||||
|
for firstchar, secondchar, kerndistance in table:
|
||||||
|
data.append(struct.pack("cch", chr(firstchar), chr(secondchar), kerndistance))
|
||||||
|
|
||||||
|
data = string.join(data, '')
|
||||||
|
|
||||||
|
if DEBUG:
|
||||||
|
print "kerning table is the same?", self._rawkerningtables == data and 'yes.' or 'no.'
|
||||||
|
if self._rawkerningtables <> data:
|
||||||
|
print len(self._rawkerningtables), len(data)
|
||||||
|
self._rawkerningtables = data
|
||||||
|
|
||||||
|
def _getstylemappingtable(self):
|
||||||
|
offset = self.ffStylOff
|
||||||
|
self.styleStrings = []
|
||||||
|
self.styleIndices = ()
|
||||||
|
self.glyphTableOffset = 0
|
||||||
|
self.fondClass = 0
|
||||||
|
if offset == 0:
|
||||||
|
return
|
||||||
|
data = self.FOND.data
|
||||||
|
self.fondClass, self.glyphTableOffset, self.styleMappingReserved, = \
|
||||||
|
struct.unpack("hll", data[offset:offset+10])
|
||||||
|
self.styleIndices = struct.unpack('48b', data[offset + 10:offset + 58])
|
||||||
|
stringcount, = struct.unpack('h', data[offset+58:offset+60])
|
||||||
|
|
||||||
|
count = offset + 60
|
||||||
|
for i in range(stringcount):
|
||||||
|
str_len = ord(data[count])
|
||||||
|
self.styleStrings.append(data[count + 1:count + 1 + str_len])
|
||||||
|
count = count + 1 + str_len
|
||||||
|
|
||||||
|
self._unpackstylestrings()
|
||||||
|
|
||||||
|
data = data[offset:count]
|
||||||
|
if len(data) % 2:
|
||||||
|
data = data + '\0'
|
||||||
|
if DEBUG:
|
||||||
|
self._rawstylemappingtable = data
|
||||||
|
self.parsedthings.append((offset, count, 'stylemappingtable'))
|
||||||
|
|
||||||
|
def _buildstylemappingtable(self):
|
||||||
|
if not self.styleIndices:
|
||||||
|
self._rawstylemappingtable = ""
|
||||||
|
return
|
||||||
|
data = struct.pack("hll", self.fondClass, self.glyphTableOffset,
|
||||||
|
self.styleMappingReserved)
|
||||||
|
|
||||||
|
self._packstylestrings()
|
||||||
|
data = data + apply(struct.pack, ("48b",) + self.styleIndices)
|
||||||
|
|
||||||
|
stringcount = len(self.styleStrings)
|
||||||
|
data = data + struct.pack("h", stringcount)
|
||||||
|
for string in self.styleStrings:
|
||||||
|
data = data + chr(len(string)) + string
|
||||||
|
|
||||||
|
if len(data) % 2:
|
||||||
|
data = data + '\0'
|
||||||
|
|
||||||
|
if DEBUG:
|
||||||
|
print "style mapping table is the same?", self._rawstylemappingtable == data and 'yes.' or 'no.'
|
||||||
|
self._rawstylemappingtable = data
|
||||||
|
|
||||||
|
def _unpackstylestrings(self):
|
||||||
|
psNames = {}
|
||||||
|
self.ffFamilyName = self.styleStrings[0]
|
||||||
|
for i in self.widthTables.keys():
|
||||||
|
index = self.styleIndices[i]
|
||||||
|
if index == 1:
|
||||||
|
psNames[i] = self.styleStrings[0]
|
||||||
|
else:
|
||||||
|
style = self.styleStrings[0]
|
||||||
|
codes = map(ord, self.styleStrings[index - 1])
|
||||||
|
for code in codes:
|
||||||
|
style = style + self.styleStrings[code - 1]
|
||||||
|
psNames[i] = style
|
||||||
|
self.psNames = psNames
|
||||||
|
|
||||||
|
def _packstylestrings(self):
|
||||||
|
nameparts = {}
|
||||||
|
splitnames = {}
|
||||||
|
for style, name in self.psNames.items():
|
||||||
|
split = splitname(name, self.ffFamilyName)
|
||||||
|
splitnames[style] = split
|
||||||
|
for part in split:
|
||||||
|
nameparts[part] = None
|
||||||
|
del nameparts[self.ffFamilyName]
|
||||||
|
nameparts = nameparts.keys()
|
||||||
|
nameparts.sort()
|
||||||
|
items = splitnames.items()
|
||||||
|
items.sort()
|
||||||
|
numindices = 0
|
||||||
|
for style, split in items:
|
||||||
|
if len(split) > 1:
|
||||||
|
numindices = numindices + 1
|
||||||
|
styleStrings = [self.ffFamilyName] + numindices * [None] + nameparts
|
||||||
|
# XXX the next bit goes wrong for MM fonts.
|
||||||
|
for style, split in items:
|
||||||
|
if len(split) == 1:
|
||||||
|
continue
|
||||||
|
indices = ""
|
||||||
|
for part in split[1:]:
|
||||||
|
indices = indices + chr(nameparts.index(part) + numindices + 2)
|
||||||
|
styleStrings[self.styleIndices[style] - 1] = indices
|
||||||
|
self.styleStrings = styleStrings
|
||||||
|
|
||||||
|
def _updatestylemappingtable(self):
|
||||||
|
# Update the glyphTableOffset field.
|
||||||
|
# This is neccesary since we have to build this table to
|
||||||
|
# know what the glyphTableOffset will be.
|
||||||
|
# And we don't want to build it twice, do we?
|
||||||
|
data = self._rawstylemappingtable
|
||||||
|
if not data:
|
||||||
|
return
|
||||||
|
data = data[:2] + struct.pack("l", self.glyphTableOffset) + data[6:]
|
||||||
|
self._rawstylemappingtable = data
|
||||||
|
|
||||||
|
def _getglyphencodingsubtable(self):
|
||||||
|
glyphEncoding = self.glyphEncoding = {}
|
||||||
|
if not self.glyphTableOffset:
|
||||||
|
return
|
||||||
|
offset = self.ffStylOff + self.glyphTableOffset
|
||||||
|
data = self.FOND.data
|
||||||
|
numberofentries, = struct.unpack("h", data[offset:offset+2])
|
||||||
|
count = offset + 2
|
||||||
|
for i in range(numberofentries):
|
||||||
|
glyphcode = ord(data[count])
|
||||||
|
count = count + 1
|
||||||
|
strlen = ord(data[count])
|
||||||
|
count = count + 1
|
||||||
|
glyphname = data[count:count+strlen]
|
||||||
|
glyphEncoding[glyphcode] = glyphname
|
||||||
|
count = count + strlen
|
||||||
|
|
||||||
|
if DEBUG:
|
||||||
|
self._rawglyphencodingsubtable = data[offset:count]
|
||||||
|
self.parsedthings.append((offset, count, 'glyphencodingsubtable'))
|
||||||
|
|
||||||
|
def _buildglyphencodingsubtable(self):
|
||||||
|
if not self.glyphEncoding:
|
||||||
|
self._rawglyphencodingsubtable = ""
|
||||||
|
return
|
||||||
|
numberofentries = len(self.glyphEncoding)
|
||||||
|
data = struct.pack("h", numberofentries)
|
||||||
|
items = self.glyphEncoding.items()
|
||||||
|
items.sort()
|
||||||
|
for glyphcode, glyphname in items:
|
||||||
|
data = data + chr(glyphcode) + chr(len(glyphname)) + glyphname
|
||||||
|
self._rawglyphencodingsubtable = data
|
||||||
|
|
||||||
|
|
||||||
|
uppercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
|
||||||
|
|
||||||
|
def splitname(name, famname = None):
|
||||||
|
# XXX this goofs up MM font names: but how should it be done??
|
||||||
|
if famname:
|
||||||
|
if name[:len(famname)] <> famname:
|
||||||
|
raise error, "first part of name should be same as family name"
|
||||||
|
name = name[len(famname):]
|
||||||
|
split = [famname]
|
||||||
|
else:
|
||||||
|
split = []
|
||||||
|
current = ""
|
||||||
|
for c in name:
|
||||||
|
if c == '-' or c in uppercase:
|
||||||
|
if current:
|
||||||
|
split.append(current)
|
||||||
|
current = ""
|
||||||
|
current = current + c
|
||||||
|
if current:
|
||||||
|
split.append(current)
|
||||||
|
return split
|
||||||
|
|
||||||
|
def makeLWFNfilename(name):
|
||||||
|
split = splitname(name)
|
||||||
|
lwfnname = split[0][:5]
|
||||||
|
for part in split[1:]:
|
||||||
|
if part <> '-':
|
||||||
|
lwfnname = lwfnname + part[:3]
|
||||||
|
return lwfnname
|
||||||
|
|
||||||
|
class BitmapFontFile:
|
||||||
|
|
||||||
|
def __init__(self, path, mode = 'r'):
|
||||||
|
import macfs
|
||||||
|
|
||||||
|
if mode == 'r':
|
||||||
|
permission = 1 # read only
|
||||||
|
elif mode == 'w':
|
||||||
|
permission = 3 # exclusive r/w
|
||||||
|
else:
|
||||||
|
raise error, 'mode should be either "r" or "w"'
|
||||||
|
self.mode = mode
|
||||||
|
fss = macfs.FSSpec(path)
|
||||||
|
self.resref = Res.FSpOpenResFile(fss, permission)
|
||||||
|
Res.UseResFile(self.resref)
|
||||||
|
self.path = path
|
||||||
|
self.fonds = []
|
||||||
|
self.getFONDs()
|
||||||
|
|
||||||
|
def getFONDs(self):
|
||||||
|
FONDcount = Res.Count1Resources('FOND')
|
||||||
|
for i in range(FONDcount):
|
||||||
|
fond = FontFamily(Res.Get1IndResource('FOND', i + 1), self.mode)
|
||||||
|
self.fonds.append(fond)
|
||||||
|
|
||||||
|
def parse(self):
|
||||||
|
self.fondsbyname = {}
|
||||||
|
for fond in self.fonds:
|
||||||
|
fond.parse()
|
||||||
|
if hasattr(fond, "psNames") and fond.psNames:
|
||||||
|
psNames = fond.psNames.values()
|
||||||
|
psNames.sort()
|
||||||
|
self.fondsbyname[psNames[0]] = fond
|
||||||
|
|
||||||
|
def minimalparse(self):
|
||||||
|
for fond in self.fonds:
|
||||||
|
fond.minimalparse()
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
if self.resref <> None:
|
||||||
|
try:
|
||||||
|
Res.CloseResFile(self.resref)
|
||||||
|
except Res.Error:
|
||||||
|
pass
|
||||||
|
self.resref = None
|
||||||
|
|
||||||
|
|
||||||
|
class FondSelector:
|
||||||
|
|
||||||
|
def __init__(self, fondlist):
|
||||||
|
import W
|
||||||
|
if not fondlist:
|
||||||
|
raise ValueError, "expected at least one FOND entry"
|
||||||
|
if len(fondlist) == 1:
|
||||||
|
self.choice = 0
|
||||||
|
return
|
||||||
|
fonds = []
|
||||||
|
for fond in fondlist:
|
||||||
|
fonds.append(fond.name)
|
||||||
|
self.w = W.ModalDialog((200, 200), "aaa")
|
||||||
|
self.w.donebutton = W.Button((-70, -26, 60, 16), "Done", self.close)
|
||||||
|
self.w.l = W.List((10, 10, -10, -36), fonds, self.listhit)
|
||||||
|
self.w.setdefaultbutton(self.w.donebutton)
|
||||||
|
self.w.l.setselection([0])
|
||||||
|
self.w.open()
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
self.checksel()
|
||||||
|
sel = self.w.l.getselection()
|
||||||
|
self.choice = sel[0]
|
||||||
|
self.w.close()
|
||||||
|
|
||||||
|
def listhit(self, isDbl):
|
||||||
|
if isDbl:
|
||||||
|
self.w.donebutton.push()
|
||||||
|
else:
|
||||||
|
self.checksel()
|
||||||
|
|
||||||
|
def checksel(self):
|
||||||
|
sel = self.w.l.getselection()
|
||||||
|
if not sel:
|
||||||
|
self.w.l.setselection([0])
|
||||||
|
elif len(sel) <> 1:
|
||||||
|
self.w.l.setselection([sel[0]])
|
||||||
|
|
0
Lib/fontTools/misc/__init__.py
Normal file
0
Lib/fontTools/misc/__init__.py
Normal file
89
Lib/fontTools/misc/textTools.py
Normal file
89
Lib/fontTools/misc/textTools.py
Normal file
@ -0,0 +1,89 @@
|
|||||||
|
"""fontTools.misc.textTools.py -- miscelaneous routines."""
|
||||||
|
|
||||||
|
|
||||||
|
import string
|
||||||
|
|
||||||
|
|
||||||
|
def safeEval(data, eval=eval):
|
||||||
|
"""A safe replacement for eval."""
|
||||||
|
return eval(data, {"__builtins__":{}}, {})
|
||||||
|
|
||||||
|
|
||||||
|
def readHex(content):
|
||||||
|
"""Convert a list of hex strings to binary data."""
|
||||||
|
hexdata = ""
|
||||||
|
for chunk in content:
|
||||||
|
if type(chunk) == type(""):
|
||||||
|
hexdata = hexdata + chunk
|
||||||
|
return deHexStr(hexdata)
|
||||||
|
|
||||||
|
def deHexStr(hexdata):
|
||||||
|
"""Convert a hex string to binary data."""
|
||||||
|
parts = string.split(hexdata)
|
||||||
|
hexdata = string.join(parts, "")
|
||||||
|
if len(hexdata) % 2:
|
||||||
|
hexdata = hexdata + "0"
|
||||||
|
data = ""
|
||||||
|
for i in range(0, len(hexdata), 2):
|
||||||
|
data = data + chr(string.atoi(hexdata[i:i+2], 16))
|
||||||
|
return data
|
||||||
|
|
||||||
|
def hexStr(data):
|
||||||
|
"""Convert binary data to a hex string."""
|
||||||
|
h = string.hexdigits
|
||||||
|
r = ''
|
||||||
|
for c in data:
|
||||||
|
i = ord(c)
|
||||||
|
r = r + h[(i >> 4) & 0xF] + h[i & 0xF]
|
||||||
|
return r
|
||||||
|
|
||||||
|
|
||||||
|
def num2binary(l, bits=32):
|
||||||
|
all = []
|
||||||
|
bin = ""
|
||||||
|
for i in range(bits):
|
||||||
|
if l & 0x1:
|
||||||
|
bin = "1" + bin
|
||||||
|
else:
|
||||||
|
bin = "0" + bin
|
||||||
|
l = l >> 1
|
||||||
|
if not ((i+1) % 8):
|
||||||
|
all.append(bin)
|
||||||
|
bin = ""
|
||||||
|
all.reverse()
|
||||||
|
assert l in (0, -1), "number doesn't fit in number of bits"
|
||||||
|
return string.join(all, " ")
|
||||||
|
|
||||||
|
|
||||||
|
def binary2num(bin):
|
||||||
|
bin = string.join(string.split(bin), "")
|
||||||
|
l = 0
|
||||||
|
for digit in bin:
|
||||||
|
l = l << 1
|
||||||
|
if digit <> "0":
|
||||||
|
l = l | 0x1
|
||||||
|
return l
|
||||||
|
|
||||||
|
|
||||||
|
def caselessSort(alist):
|
||||||
|
"""Return a sorted copy of a list. If there are only strings
|
||||||
|
in the list, it will not consider case.
|
||||||
|
"""
|
||||||
|
|
||||||
|
try:
|
||||||
|
# turn ['FOO', 'aaBc', 'ABcD'] into
|
||||||
|
# [('foo', 'FOO'), ('aabc', 'aaBc'), ('abcd', 'ABcD')],
|
||||||
|
# but only if all elements are strings
|
||||||
|
tupledlist = map(lambda item, lower = string.lower:
|
||||||
|
(lower(item), item), alist)
|
||||||
|
except TypeError:
|
||||||
|
# at least one element in alist is not a string, proceed the normal way...
|
||||||
|
alist = alist[:]
|
||||||
|
alist.sort()
|
||||||
|
return alist
|
||||||
|
else:
|
||||||
|
tupledlist.sort()
|
||||||
|
# turn [('aabc', 'aaBc'), ('abcd', 'ABcD'), ('foo', 'FOO')] into
|
||||||
|
# ['aaBc', 'ABcD', 'FOO']
|
||||||
|
return map(lambda x: x[1], tupledlist)
|
||||||
|
|
144
Lib/fontTools/nfntLib.py
Normal file
144
Lib/fontTools/nfntLib.py
Normal file
@ -0,0 +1,144 @@
|
|||||||
|
import Res
|
||||||
|
import macfs
|
||||||
|
import struct
|
||||||
|
import Qd
|
||||||
|
from types import *
|
||||||
|
|
||||||
|
|
||||||
|
class NFNT:
|
||||||
|
|
||||||
|
def __init__(self, nfnt, name = "", _type = 'NFNT'):
|
||||||
|
if type(nfnt) == type(Res.Resource("")):
|
||||||
|
theID, theType, name = nfnt.GetResInfo()
|
||||||
|
if theType <> _type:
|
||||||
|
raise TypeError, 'resource of wrong type; expected ' + _type
|
||||||
|
data = nfnt.data
|
||||||
|
elif type(nfnt) == StringType:
|
||||||
|
fss = macfs.FSSpec(nfnt)
|
||||||
|
data = readnfntresource(nfnt, name, _type)
|
||||||
|
elif type(nfnt) == type(macfs.FSSpec(':')):
|
||||||
|
data = readnfntresource(nfnt, name, _type)
|
||||||
|
else:
|
||||||
|
raise TypeError, 'expected resource, string or fss; found ' + type(nfnt).__name__
|
||||||
|
self.parse_nfnt(data)
|
||||||
|
|
||||||
|
def parse_nfnt(self, data):
|
||||||
|
# header; FontRec
|
||||||
|
( self.fontType,
|
||||||
|
self.firstChar,
|
||||||
|
self.lastChar,
|
||||||
|
self.widMax,
|
||||||
|
self.kernMax,
|
||||||
|
self.nDescent,
|
||||||
|
fRectWidth,
|
||||||
|
self.fRectHeight,
|
||||||
|
owTLoc,
|
||||||
|
self.ascent,
|
||||||
|
self.descent,
|
||||||
|
self.leading,
|
||||||
|
self.rowWords ) = struct.unpack("13h", data[:26])
|
||||||
|
if owTLoc < 0:
|
||||||
|
owTLoc = owTLoc + 0x8000 # unsigned short
|
||||||
|
|
||||||
|
# rest
|
||||||
|
tablesize = 2 * (self.lastChar - self.firstChar + 3)
|
||||||
|
bitmapsize = 2 * self.rowWords * self.fRectHeight
|
||||||
|
|
||||||
|
self.bits = data[26:26 + bitmapsize]
|
||||||
|
self.bitImage = Qd.BitMap(self.bits, 2 * self.rowWords, (0, 0, self.rowWords * 16, self.fRectHeight))
|
||||||
|
|
||||||
|
owTable = data[26 + bitmapsize + tablesize:26 + bitmapsize + 2 * tablesize]
|
||||||
|
if len(owTable) <> tablesize:
|
||||||
|
raise ValueError, 'invalid NFNT resource'
|
||||||
|
|
||||||
|
locTable = data[26 + bitmapsize:26 + bitmapsize + tablesize]
|
||||||
|
if len(locTable) <> tablesize:
|
||||||
|
raise ValueError, 'invalid NFNT resource'
|
||||||
|
|
||||||
|
# fill tables
|
||||||
|
self.offsettable = []
|
||||||
|
self.widthtable = []
|
||||||
|
self.locationtable = []
|
||||||
|
for i in range(0, tablesize, 2):
|
||||||
|
self.offsettable.append(ord(owTable[i]))
|
||||||
|
self.widthtable.append(ord(owTable[i+1]))
|
||||||
|
loc, = struct.unpack('h', locTable[i:i+2])
|
||||||
|
self.locationtable.append(loc)
|
||||||
|
|
||||||
|
def drawstring(self, astring, destbits, xoffset = 0, yoffset = 0):
|
||||||
|
drawchar = self.drawchar
|
||||||
|
for ch in astring:
|
||||||
|
xoffset = drawchar(ch, destbits, xoffset, yoffset)
|
||||||
|
return xoffset
|
||||||
|
|
||||||
|
def drawchar(self, ch, destbits, xoffset, yoffset = 0):
|
||||||
|
width, bounds, destbounds = self.getcharbounds(ch)
|
||||||
|
destbounds = Qd.OffsetRect(destbounds, xoffset, yoffset)
|
||||||
|
Qd.CopyBits(self.bitImage, destbits, bounds, destbounds, 1, None)
|
||||||
|
return xoffset + width
|
||||||
|
|
||||||
|
def stringwidth(self, astring):
|
||||||
|
charwidth = self.charwidth
|
||||||
|
width = 0
|
||||||
|
for ch in astring:
|
||||||
|
width = width + charwidth(ch)
|
||||||
|
return width
|
||||||
|
|
||||||
|
def charwidth(self, ch):
|
||||||
|
cindex = ord(ch) - self.firstChar
|
||||||
|
if cindex > self.lastChar or \
|
||||||
|
(self.offsettable[cindex] == 255 and self.widthtable[cindex] == 255):
|
||||||
|
cindex = -2 # missing char
|
||||||
|
return self.widthtable[cindex]
|
||||||
|
|
||||||
|
def getcharbounds(self, ch):
|
||||||
|
cindex = ord(ch) - self.firstChar
|
||||||
|
if cindex > self.lastChar or \
|
||||||
|
(self.offsettable[cindex] == 255 and self.widthtable[cindex] == 255):
|
||||||
|
return self.getcharboundsindex(-2) # missing char
|
||||||
|
return self.getcharboundsindex(cindex)
|
||||||
|
|
||||||
|
def getcharboundsindex(self, cindex):
|
||||||
|
offset = self.offsettable[cindex]
|
||||||
|
width = self.widthtable[cindex]
|
||||||
|
if offset == 255 and width == 255:
|
||||||
|
raise ValueError, "character not defined"
|
||||||
|
location0 = self.locationtable[cindex]
|
||||||
|
location1 = self.locationtable[cindex + 1]
|
||||||
|
srcbounds = (location0, 0, location1, self.fRectHeight)
|
||||||
|
destbounds = ( offset + self.kernMax,
|
||||||
|
0,
|
||||||
|
offset + self.kernMax + location1 - location0,
|
||||||
|
self.fRectHeight )
|
||||||
|
return width, srcbounds, destbounds
|
||||||
|
|
||||||
|
|
||||||
|
def readnfntresource(fss, name, _type = 'NFNT'):
|
||||||
|
resref = Res.FSpOpenResFile(fss, 1) # readonly
|
||||||
|
Res.UseResFile(resref)
|
||||||
|
try:
|
||||||
|
if name:
|
||||||
|
res = Res.Get1NamedResource(_type, name)
|
||||||
|
else:
|
||||||
|
# just take the first in the file
|
||||||
|
res = Res.Get1IndResource(_type, 1)
|
||||||
|
theID, theType, name = res.GetResInfo()
|
||||||
|
if theType <> _type:
|
||||||
|
raise TypeError, 'resource of wrong type; expected ' + _type
|
||||||
|
data = res.data
|
||||||
|
finally:
|
||||||
|
Res.CloseResFile(resref)
|
||||||
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
if 0:
|
||||||
|
import Win
|
||||||
|
fss, ok = macfs.StandardGetFile('FFIL')
|
||||||
|
if ok:
|
||||||
|
n = NFNT(fss)
|
||||||
|
s = "!!!ABCDEFGHIJKLMN01234 hemeltje lief...x.."
|
||||||
|
x = 10
|
||||||
|
y = 40
|
||||||
|
destbits = Win.FrontWindow().GetWindowPort().portBits
|
||||||
|
n.drawstring(s, destbits, x, y)
|
||||||
|
print n.stringwidth(s)
|
974
Lib/fontTools/psCharStrings.py
Normal file
974
Lib/fontTools/psCharStrings.py
Normal file
@ -0,0 +1,974 @@
|
|||||||
|
"""psCharStrings.py -- module implementing various kinds of CharStrings:
|
||||||
|
CFF dictionary data and Type1/Type2 CharStrings.
|
||||||
|
"""
|
||||||
|
|
||||||
|
__version__ = "1.0b1"
|
||||||
|
__author__ = "jvr"
|
||||||
|
|
||||||
|
|
||||||
|
import types
|
||||||
|
import struct
|
||||||
|
import string
|
||||||
|
|
||||||
|
|
||||||
|
t1OperandEncoding = [None] * 256
|
||||||
|
t1OperandEncoding[0:32] = (32) * ["do_operator"]
|
||||||
|
t1OperandEncoding[32:247] = (247 - 32) * ["read_byte"]
|
||||||
|
t1OperandEncoding[247:251] = (251 - 247) * ["read_smallInt1"]
|
||||||
|
t1OperandEncoding[251:255] = (255 - 251) * ["read_smallInt2"]
|
||||||
|
t1OperandEncoding[255] = "read_longInt"
|
||||||
|
assert len(t1OperandEncoding) == 256
|
||||||
|
|
||||||
|
t2OperandEncoding = t1OperandEncoding[:]
|
||||||
|
t2OperandEncoding[28] = "read_shortInt"
|
||||||
|
|
||||||
|
cffDictOperandEncoding = t2OperandEncoding[:]
|
||||||
|
cffDictOperandEncoding[29] = "read_longInt"
|
||||||
|
cffDictOperandEncoding[30] = "read_realNumber"
|
||||||
|
cffDictOperandEncoding[255] = "reserved"
|
||||||
|
|
||||||
|
|
||||||
|
realNibbles = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
|
||||||
|
'.', 'E', 'E-', None, '-']
|
||||||
|
|
||||||
|
|
||||||
|
class ByteCodeDecompilerBase:
|
||||||
|
|
||||||
|
def read_byte(self, b0, data, index):
|
||||||
|
return b0 - 139, index
|
||||||
|
|
||||||
|
def read_smallInt1(self, b0, data, index):
|
||||||
|
b1 = ord(data[index])
|
||||||
|
return (b0-247)*256 + b1 + 108, index+1
|
||||||
|
|
||||||
|
def read_smallInt2(self, b0, data, index):
|
||||||
|
b1 = ord(data[index])
|
||||||
|
return -(b0-251)*256 - b1 - 108, index+1
|
||||||
|
|
||||||
|
def read_shortInt(self, b0, data, index):
|
||||||
|
bin = data[index] + data[index+1]
|
||||||
|
value, = struct.unpack(">h", bin)
|
||||||
|
return value, index+2
|
||||||
|
|
||||||
|
def read_longInt(self, b0, data, index):
|
||||||
|
bin = data[index] + data[index+1] + data[index+2] + data[index+3]
|
||||||
|
value, = struct.unpack(">l", bin)
|
||||||
|
return value, index+4
|
||||||
|
|
||||||
|
def read_realNumber(self, b0, data, index):
|
||||||
|
number = ''
|
||||||
|
while 1:
|
||||||
|
b = ord(data[index])
|
||||||
|
index = index + 1
|
||||||
|
nibble0 = (b & 0xf0) >> 4
|
||||||
|
nibble1 = b & 0x0f
|
||||||
|
if nibble0 == 0xf:
|
||||||
|
break
|
||||||
|
number = number + realNibbles[nibble0]
|
||||||
|
if nibble1 == 0xf:
|
||||||
|
break
|
||||||
|
number = number + realNibbles[nibble1]
|
||||||
|
return string.atof(number), index
|
||||||
|
|
||||||
|
|
||||||
|
def _buildOperatorDict(operatorList):
|
||||||
|
dict = {}
|
||||||
|
for item in operatorList:
|
||||||
|
if len(item) == 2:
|
||||||
|
dict[item[0]] = item[1]
|
||||||
|
else:
|
||||||
|
dict[item[0]] = item[1:]
|
||||||
|
return dict
|
||||||
|
|
||||||
|
|
||||||
|
t2Operators = [
|
||||||
|
# opcode name
|
||||||
|
(1, 'hstem'),
|
||||||
|
(3, 'vstem'),
|
||||||
|
(4, 'vmoveto'),
|
||||||
|
(5, 'rlineto'),
|
||||||
|
(6, 'hlineto'),
|
||||||
|
(7, 'vlineto'),
|
||||||
|
(8, 'rrcurveto'),
|
||||||
|
(10, 'callsubr'),
|
||||||
|
(11, 'return'),
|
||||||
|
(14, 'endchar'),
|
||||||
|
(16, 'blend'),
|
||||||
|
(18, 'hstemhm'),
|
||||||
|
(19, 'hintmask'),
|
||||||
|
(20, 'cntrmask'),
|
||||||
|
(21, 'rmoveto'),
|
||||||
|
(22, 'hmoveto'),
|
||||||
|
(23, 'vstemhm'),
|
||||||
|
(24, 'rcurveline'),
|
||||||
|
(25, 'rlinecurve'),
|
||||||
|
(26, 'vvcurveto'),
|
||||||
|
(27, 'hhcurveto'),
|
||||||
|
# (28, 'shortint'), # not really an operator
|
||||||
|
(29, 'callgsubr'),
|
||||||
|
(30, 'vhcurveto'),
|
||||||
|
(31, 'hvcurveto'),
|
||||||
|
((12, 3), 'and'),
|
||||||
|
((12, 4), 'or'),
|
||||||
|
((12, 5), 'not'),
|
||||||
|
((12, 8), 'store'),
|
||||||
|
((12, 9), 'abs'),
|
||||||
|
((12, 10), 'add'),
|
||||||
|
((12, 11), 'sub'),
|
||||||
|
((12, 12), 'div'),
|
||||||
|
((12, 13), 'load'),
|
||||||
|
((12, 14), 'neg'),
|
||||||
|
((12, 15), 'eq'),
|
||||||
|
((12, 18), 'drop'),
|
||||||
|
((12, 20), 'put'),
|
||||||
|
((12, 21), 'get'),
|
||||||
|
((12, 22), 'ifelse'),
|
||||||
|
((12, 23), 'random'),
|
||||||
|
((12, 24), 'mul'),
|
||||||
|
((12, 26), 'sqrt'),
|
||||||
|
((12, 27), 'dup'),
|
||||||
|
((12, 28), 'exch'),
|
||||||
|
((12, 29), 'index'),
|
||||||
|
((12, 30), 'roll'),
|
||||||
|
((12, 34), 'hflex'),
|
||||||
|
((12, 35), 'flex'),
|
||||||
|
((12, 36), 'hflex1'),
|
||||||
|
((12, 37), 'flex1'),
|
||||||
|
]
|
||||||
|
|
||||||
|
class T2CharString(ByteCodeDecompilerBase):
|
||||||
|
|
||||||
|
operandEncoding = t2OperandEncoding
|
||||||
|
operators = _buildOperatorDict(t2Operators)
|
||||||
|
|
||||||
|
def __init__(self, bytecode=None, program=None):
|
||||||
|
if program is None:
|
||||||
|
program = []
|
||||||
|
self.bytecode = bytecode
|
||||||
|
self.program = program
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
if self.bytecode is None:
|
||||||
|
return "<%s (source) at %x>" % (self.__class__.__name__, id(self))
|
||||||
|
else:
|
||||||
|
return "<%s (bytecode) at %x>" % (self.__class__.__name__, id(self))
|
||||||
|
|
||||||
|
def needsDecompilation(self):
|
||||||
|
return self.bytecode is not None
|
||||||
|
|
||||||
|
def setProgram(self, program):
|
||||||
|
self.program = program
|
||||||
|
self.bytecode = None
|
||||||
|
|
||||||
|
def getToken(self, index,
|
||||||
|
len=len, ord=ord, getattr=getattr, type=type, StringType=types.StringType):
|
||||||
|
if self.bytecode is not None:
|
||||||
|
if index >= len(self.bytecode):
|
||||||
|
return None, 0, 0
|
||||||
|
b0 = ord(self.bytecode[index])
|
||||||
|
index = index + 1
|
||||||
|
code = self.operandEncoding[b0]
|
||||||
|
handler = getattr(self, code)
|
||||||
|
token, index = handler(b0, self.bytecode, index)
|
||||||
|
else:
|
||||||
|
if index >= len(self.program):
|
||||||
|
return None, 0, 0
|
||||||
|
token = self.program[index]
|
||||||
|
index = index + 1
|
||||||
|
isOperator = type(token) == StringType
|
||||||
|
return token, isOperator, index
|
||||||
|
|
||||||
|
def getBytes(self, index, nBytes):
|
||||||
|
if self.bytecode is not None:
|
||||||
|
newIndex = index + nBytes
|
||||||
|
bytes = self.bytecode[index:newIndex]
|
||||||
|
index = newIndex
|
||||||
|
else:
|
||||||
|
bytes = self.program[index]
|
||||||
|
index = index + 1
|
||||||
|
assert len(bytes) == nBytes
|
||||||
|
return bytes, index
|
||||||
|
|
||||||
|
def do_operator(self, b0, data, index):
|
||||||
|
if b0 == 12:
|
||||||
|
op = (b0, ord(data[index]))
|
||||||
|
index = index+1
|
||||||
|
else:
|
||||||
|
op = b0
|
||||||
|
operator = self.operators[op]
|
||||||
|
return operator, index
|
||||||
|
|
||||||
|
def toXML(self, xmlWriter):
|
||||||
|
from misc.textTools import num2binary
|
||||||
|
if self.bytecode is not None:
|
||||||
|
xmlWriter.dumphex(self.bytecode)
|
||||||
|
else:
|
||||||
|
index = 0
|
||||||
|
args = []
|
||||||
|
while 1:
|
||||||
|
token, isOperator, index = self.getToken(index)
|
||||||
|
if token is None:
|
||||||
|
break
|
||||||
|
if isOperator:
|
||||||
|
args = map(str, args)
|
||||||
|
if token in ('hintmask', 'cntrmask'):
|
||||||
|
hintMask, isOperator, index = self.getToken(index)
|
||||||
|
bits = []
|
||||||
|
for byte in hintMask:
|
||||||
|
bits.append(num2binary(ord(byte), 8))
|
||||||
|
hintMask = repr(string.join(bits, ""))
|
||||||
|
line = string.join(args + [token, hintMask], " ")
|
||||||
|
else:
|
||||||
|
line = string.join(args + [token], " ")
|
||||||
|
xmlWriter.write(line)
|
||||||
|
xmlWriter.newline()
|
||||||
|
args = []
|
||||||
|
else:
|
||||||
|
args.append(token)
|
||||||
|
|
||||||
|
|
||||||
|
t1Operators = [
|
||||||
|
# opcode name
|
||||||
|
(1, 'hstem'),
|
||||||
|
(3, 'vstem'),
|
||||||
|
(4, 'vmoveto'),
|
||||||
|
(5, 'rlineto'),
|
||||||
|
(6, 'hlineto'),
|
||||||
|
(7, 'vlineto'),
|
||||||
|
(8, 'rrcurveto'),
|
||||||
|
(9, 'closepath'),
|
||||||
|
(10, 'callsubr'),
|
||||||
|
(11, 'return'),
|
||||||
|
(13, 'hsbw'),
|
||||||
|
(14, 'endchar'),
|
||||||
|
(21, 'rmoveto'),
|
||||||
|
(22, 'hmoveto'),
|
||||||
|
(30, 'vhcurveto'),
|
||||||
|
(31, 'hvcurveto'),
|
||||||
|
((12, 0), 'dotsection'),
|
||||||
|
((12, 1), 'vstem3'),
|
||||||
|
((12, 2), 'hstem3'),
|
||||||
|
((12, 6), 'seac'),
|
||||||
|
((12, 7), 'sbw'),
|
||||||
|
((12, 12), 'div'),
|
||||||
|
((12, 16), 'callothersubr'),
|
||||||
|
((12, 17), 'pop'),
|
||||||
|
((12, 33), 'setcurrentpoint'),
|
||||||
|
]
|
||||||
|
|
||||||
|
class T1CharString(T2CharString):
|
||||||
|
|
||||||
|
operandEncoding = t1OperandEncoding
|
||||||
|
operators = _buildOperatorDict(t1Operators)
|
||||||
|
|
||||||
|
def decompile(self):
|
||||||
|
if hasattr(self, "program"):
|
||||||
|
return
|
||||||
|
program = []
|
||||||
|
index = 0
|
||||||
|
while 1:
|
||||||
|
token, isOperator, index = self.getToken(index)
|
||||||
|
if token is None:
|
||||||
|
break
|
||||||
|
program.append(token)
|
||||||
|
self.setProgram(program)
|
||||||
|
|
||||||
|
|
||||||
|
class SimpleT2Decompiler:
|
||||||
|
|
||||||
|
def __init__(self, localSubrs, globalSubrs):
|
||||||
|
self.localSubrs = localSubrs
|
||||||
|
self.localBias = calcSubrBias(localSubrs)
|
||||||
|
self.globalSubrs = globalSubrs
|
||||||
|
self.globalBias = calcSubrBias(globalSubrs)
|
||||||
|
self.reset()
|
||||||
|
|
||||||
|
def reset(self):
|
||||||
|
self.callingStack = []
|
||||||
|
self.operandStack = []
|
||||||
|
self.hintCount = 0
|
||||||
|
self.hintMaskBytes = 0
|
||||||
|
|
||||||
|
def execute(self, charString):
|
||||||
|
self.callingStack.append(charString)
|
||||||
|
needsDecompilation = charString.needsDecompilation()
|
||||||
|
if needsDecompilation:
|
||||||
|
program = []
|
||||||
|
pushToProgram = program.append
|
||||||
|
else:
|
||||||
|
pushToProgram = lambda x: None
|
||||||
|
pushToStack = self.operandStack.append
|
||||||
|
index = 0
|
||||||
|
while 1:
|
||||||
|
token, isOperator, index = charString.getToken(index)
|
||||||
|
if token is None:
|
||||||
|
break # we're done!
|
||||||
|
pushToProgram(token)
|
||||||
|
if isOperator:
|
||||||
|
handlerName = "op_" + token
|
||||||
|
if hasattr(self, handlerName):
|
||||||
|
handler = getattr(self, handlerName)
|
||||||
|
rv = handler(index)
|
||||||
|
if rv:
|
||||||
|
hintMaskBytes, index = rv
|
||||||
|
pushToProgram(hintMaskBytes)
|
||||||
|
else:
|
||||||
|
self.popall()
|
||||||
|
else:
|
||||||
|
pushToStack(token)
|
||||||
|
if needsDecompilation:
|
||||||
|
charString.setProgram(program)
|
||||||
|
assert program[-1] in ("endchar", "return", "callsubr", "callgsubr", "seac")
|
||||||
|
del self.callingStack[-1]
|
||||||
|
|
||||||
|
def pop(self):
|
||||||
|
value = self.operandStack[-1]
|
||||||
|
del self.operandStack[-1]
|
||||||
|
return value
|
||||||
|
|
||||||
|
def popall(self):
|
||||||
|
stack = self.operandStack[:]
|
||||||
|
self.operandStack[:] = []
|
||||||
|
return stack
|
||||||
|
|
||||||
|
def push(self, value):
|
||||||
|
self.operandStack.append(value)
|
||||||
|
|
||||||
|
def op_return(self, index):
|
||||||
|
if self.operandStack:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def op_endchar(self, index):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def op_callsubr(self, index):
|
||||||
|
subrIndex = self.pop()
|
||||||
|
subr = self.localSubrs[subrIndex+self.localBias]
|
||||||
|
self.execute(subr)
|
||||||
|
|
||||||
|
def op_callgsubr(self, index):
|
||||||
|
subrIndex = self.pop()
|
||||||
|
subr = self.globalSubrs[subrIndex+self.globalBias]
|
||||||
|
self.execute(subr)
|
||||||
|
|
||||||
|
def op_hstemhm(self, index):
|
||||||
|
self.countHints()
|
||||||
|
|
||||||
|
op_vstemhm = op_hstemhm
|
||||||
|
|
||||||
|
def op_hintmask(self, index):
|
||||||
|
if not self.hintMaskBytes:
|
||||||
|
self.countHints()
|
||||||
|
self.hintMaskBytes = (self.hintCount + 7) / 8
|
||||||
|
hintMaskBytes, index = self.callingStack[-1].getBytes(index, self.hintMaskBytes)
|
||||||
|
return hintMaskBytes, index
|
||||||
|
|
||||||
|
op_cntrmask = op_hintmask
|
||||||
|
|
||||||
|
def countHints(self):
|
||||||
|
assert self.hintMaskBytes == 0
|
||||||
|
args = self.popall()
|
||||||
|
self.hintCount = self.hintCount + len(args) / 2
|
||||||
|
|
||||||
|
|
||||||
|
class T2OutlineExtractor(SimpleT2Decompiler):
|
||||||
|
|
||||||
|
def __init__(self, localSubrs, globalSubrs, nominalWidthX, defaultWidthX):
|
||||||
|
SimpleT2Decompiler.__init__(self, localSubrs, globalSubrs)
|
||||||
|
self.nominalWidthX = nominalWidthX
|
||||||
|
self.defaultWidthX = defaultWidthX
|
||||||
|
|
||||||
|
def reset(self):
|
||||||
|
import Numeric
|
||||||
|
SimpleT2Decompiler.reset(self)
|
||||||
|
self.hints = []
|
||||||
|
self.gotWidth = 0
|
||||||
|
self.width = 0
|
||||||
|
self.currentPoint = Numeric.array((0, 0), Numeric.Int16)
|
||||||
|
self.contours = []
|
||||||
|
|
||||||
|
def getContours(self):
|
||||||
|
return self.contours
|
||||||
|
|
||||||
|
def newPath(self):
|
||||||
|
self.contours.append([[], [], 0])
|
||||||
|
|
||||||
|
def closePath(self):
|
||||||
|
if self.contours and self.contours[-1][2] == 0:
|
||||||
|
self.contours[-1][2] = 1
|
||||||
|
|
||||||
|
def appendPoint(self, point, isPrimary):
|
||||||
|
import Numeric
|
||||||
|
point = self.currentPoint + Numeric.array(point, Numeric.Int16)
|
||||||
|
self.currentPoint = point
|
||||||
|
points, flags, isClosed = self.contours[-1]
|
||||||
|
points.append(point)
|
||||||
|
flags.append(isPrimary)
|
||||||
|
|
||||||
|
def popallWidth(self, evenOdd=0):
|
||||||
|
args = self.popall()
|
||||||
|
if not self.gotWidth:
|
||||||
|
if evenOdd ^ (len(args) % 2):
|
||||||
|
self.width = self.nominalWidthX + args[0]
|
||||||
|
args = args[1:]
|
||||||
|
else:
|
||||||
|
self.width = self.defaultWidthX
|
||||||
|
self.gotWidth = 1
|
||||||
|
return args
|
||||||
|
|
||||||
|
def countHints(self):
|
||||||
|
assert self.hintMaskBytes == 0
|
||||||
|
args = self.popallWidth()
|
||||||
|
self.hintCount = self.hintCount + len(args) / 2
|
||||||
|
|
||||||
|
#
|
||||||
|
# hint operators
|
||||||
|
#
|
||||||
|
def op_hstem(self, index):
|
||||||
|
self.popallWidth() # XXX
|
||||||
|
def op_vstem(self, index):
|
||||||
|
self.popallWidth() # XXX
|
||||||
|
def op_hstemhm(self, index):
|
||||||
|
self.countHints()
|
||||||
|
#XXX
|
||||||
|
def op_vstemhm(self, index):
|
||||||
|
self.countHints()
|
||||||
|
#XXX
|
||||||
|
#def op_hintmask(self, index):
|
||||||
|
# self.countHints()
|
||||||
|
#def op_cntrmask(self, index):
|
||||||
|
# self.countHints()
|
||||||
|
|
||||||
|
#
|
||||||
|
# path constructors, moveto
|
||||||
|
#
|
||||||
|
def op_rmoveto(self, index):
|
||||||
|
self.closePath()
|
||||||
|
self.newPath()
|
||||||
|
self.appendPoint(self.popallWidth(), 1)
|
||||||
|
def op_hmoveto(self, index):
|
||||||
|
self.closePath()
|
||||||
|
self.newPath()
|
||||||
|
self.appendPoint((self.popallWidth(1)[0], 0), 1)
|
||||||
|
def op_vmoveto(self, index):
|
||||||
|
self.closePath()
|
||||||
|
self.newPath()
|
||||||
|
self.appendPoint((0, self.popallWidth(1)[0]), 1)
|
||||||
|
def op_endchar(self, index):
|
||||||
|
self.closePath()
|
||||||
|
|
||||||
|
#
|
||||||
|
# path constructors, lines
|
||||||
|
#
|
||||||
|
def op_rlineto(self, index):
|
||||||
|
args = self.popall()
|
||||||
|
for i in range(0, len(args), 2):
|
||||||
|
point = args[i:i+2]
|
||||||
|
self.appendPoint(point, 1)
|
||||||
|
|
||||||
|
def op_hlineto(self, index):
|
||||||
|
self.alternatingLineto(1)
|
||||||
|
def op_vlineto(self, index):
|
||||||
|
self.alternatingLineto(0)
|
||||||
|
|
||||||
|
#
|
||||||
|
# path constructors, curves
|
||||||
|
#
|
||||||
|
def op_rrcurveto(self, index):
|
||||||
|
"""{dxa dya dxb dyb dxc dyc}+ rrcurveto"""
|
||||||
|
args = self.popall()
|
||||||
|
for i in range(0, len(args), 6):
|
||||||
|
dxa, dya, dxb, dyb, dxc, dyc, = args[i:i+6]
|
||||||
|
self.rrcurveto((dxa, dya), (dxb, dyb), (dxc, dyc))
|
||||||
|
|
||||||
|
def op_rcurveline(self, index):
|
||||||
|
"""{dxa dya dxb dyb dxc dyc}+ dxd dyd rcurveline"""
|
||||||
|
args = self.popall()
|
||||||
|
for i in range(0, len(args)-2, 6):
|
||||||
|
dxb, dyb, dxc, dyc, dxd, dyd = args[i:i+6]
|
||||||
|
self.rrcurveto((dxb, dyb), (dxc, dyc), (dxd, dyd))
|
||||||
|
self.appendPoint(args[-2:], 1)
|
||||||
|
|
||||||
|
def op_rlinecurve(self, index):
|
||||||
|
"""{dxa dya}+ dxb dyb dxc dyc dxd dyd rlinecurve"""
|
||||||
|
args = self.popall()
|
||||||
|
lineArgs = args[:-6]
|
||||||
|
for i in range(0, len(lineArgs), 2):
|
||||||
|
self.appendPoint(lineArgs[i:i+2], 1)
|
||||||
|
dxb, dyb, dxc, dyc, dxd, dyd = args[-6:]
|
||||||
|
self.rrcurveto((dxb, dyb), (dxc, dyc), (dxd, dyd))
|
||||||
|
|
||||||
|
def op_vvcurveto(self, index):
|
||||||
|
"dx1? {dya dxb dyb dyc}+ vvcurveto"
|
||||||
|
args = self.popall()
|
||||||
|
if len(args) % 2:
|
||||||
|
dx1 = args[0]
|
||||||
|
args = args[1:]
|
||||||
|
else:
|
||||||
|
dx1 = 0
|
||||||
|
for i in range(0, len(args), 4):
|
||||||
|
dya, dxb, dyb, dyc = args[i:i+4]
|
||||||
|
self.rrcurveto((dx1, dya), (dxb, dyb), (0, dyc))
|
||||||
|
dx1 = 0
|
||||||
|
|
||||||
|
def op_hhcurveto(self, index):
|
||||||
|
"""dy1? {dxa dxb dyb dxc}+ hhcurveto"""
|
||||||
|
args = self.popall()
|
||||||
|
if len(args) % 2:
|
||||||
|
dy1 = args[0]
|
||||||
|
args = args[1:]
|
||||||
|
else:
|
||||||
|
dy1 = 0
|
||||||
|
for i in range(0, len(args), 4):
|
||||||
|
dxa, dxb, dyb, dxc = args[i:i+4]
|
||||||
|
self.rrcurveto((dxa, dy1), (dxb, dyb), (dxc, 0))
|
||||||
|
dy1 = 0
|
||||||
|
|
||||||
|
def op_vhcurveto(self, index):
|
||||||
|
"""dy1 dx2 dy2 dx3 {dxa dxb dyb dyc dyd dxe dye dxf}* dyf? vhcurveto (30)
|
||||||
|
{dya dxb dyb dxc dxd dxe dye dyf}+ dxf? vhcurveto
|
||||||
|
"""
|
||||||
|
args = self.popall()
|
||||||
|
while args:
|
||||||
|
args = self.vcurveto(args)
|
||||||
|
if args:
|
||||||
|
args = self.hcurveto(args)
|
||||||
|
|
||||||
|
def op_hvcurveto(self, index):
|
||||||
|
"""dx1 dx2 dy2 dy3 {dya dxb dyb dxc dxd dxe dye dyf}* dxf?
|
||||||
|
{dxa dxb dyb dyc dyd dxe dye dxf}+ dyf?
|
||||||
|
"""
|
||||||
|
args = self.popall()
|
||||||
|
while args:
|
||||||
|
args = self.hcurveto(args)
|
||||||
|
if args:
|
||||||
|
args = self.vcurveto(args)
|
||||||
|
|
||||||
|
#
|
||||||
|
# path constructors, flex
|
||||||
|
#
|
||||||
|
def op_hflex(self, index):
|
||||||
|
XXX
|
||||||
|
def op_flex(self, index):
|
||||||
|
XXX
|
||||||
|
def op_hflex1(self, index):
|
||||||
|
XXX
|
||||||
|
def op_flex1(self, index):
|
||||||
|
XXX
|
||||||
|
|
||||||
|
#
|
||||||
|
# MultipleMaster. Well...
|
||||||
|
#
|
||||||
|
def op_blend(self, index):
|
||||||
|
XXX
|
||||||
|
|
||||||
|
# misc
|
||||||
|
def op_and(self, index):
|
||||||
|
XXX
|
||||||
|
def op_or(self, index):
|
||||||
|
XXX
|
||||||
|
def op_not(self, index):
|
||||||
|
XXX
|
||||||
|
def op_store(self, index):
|
||||||
|
XXX
|
||||||
|
def op_abs(self, index):
|
||||||
|
XXX
|
||||||
|
def op_add(self, index):
|
||||||
|
XXX
|
||||||
|
def op_sub(self, index):
|
||||||
|
XXX
|
||||||
|
def op_div(self, index):
|
||||||
|
num2 = self.pop()
|
||||||
|
num1 = self.pop()
|
||||||
|
d1 = num1/num2
|
||||||
|
d2 = float(num1)/num2
|
||||||
|
if d1 == d2:
|
||||||
|
self.push(d1)
|
||||||
|
else:
|
||||||
|
self.push(d2)
|
||||||
|
def op_load(self, index):
|
||||||
|
XXX
|
||||||
|
def op_neg(self, index):
|
||||||
|
XXX
|
||||||
|
def op_eq(self, index):
|
||||||
|
XXX
|
||||||
|
def op_drop(self, index):
|
||||||
|
XXX
|
||||||
|
def op_put(self, index):
|
||||||
|
XXX
|
||||||
|
def op_get(self, index):
|
||||||
|
XXX
|
||||||
|
def op_ifelse(self, index):
|
||||||
|
XXX
|
||||||
|
def op_random(self, index):
|
||||||
|
XXX
|
||||||
|
def op_mul(self, index):
|
||||||
|
XXX
|
||||||
|
def op_sqrt(self, index):
|
||||||
|
XXX
|
||||||
|
def op_dup(self, index):
|
||||||
|
XXX
|
||||||
|
def op_exch(self, index):
|
||||||
|
XXX
|
||||||
|
def op_index(self, index):
|
||||||
|
XXX
|
||||||
|
def op_roll(self, index):
|
||||||
|
XXX
|
||||||
|
|
||||||
|
#
|
||||||
|
# miscelaneous helpers
|
||||||
|
#
|
||||||
|
def alternatingLineto(self, isHorizontal):
|
||||||
|
args = self.popall()
|
||||||
|
for arg in args:
|
||||||
|
if isHorizontal:
|
||||||
|
point = (arg, 0)
|
||||||
|
else:
|
||||||
|
point = (0, arg)
|
||||||
|
self.appendPoint(point, 1)
|
||||||
|
isHorizontal = not isHorizontal
|
||||||
|
|
||||||
|
def rrcurveto(self, p1, p2, p3):
|
||||||
|
self.appendPoint(p1, 0)
|
||||||
|
self.appendPoint(p2, 0)
|
||||||
|
self.appendPoint(p3, 1)
|
||||||
|
|
||||||
|
def vcurveto(self, args):
|
||||||
|
dya, dxb, dyb, dxc = args[:4]
|
||||||
|
args = args[4:]
|
||||||
|
if len(args) == 1:
|
||||||
|
dyc = args[0]
|
||||||
|
args = []
|
||||||
|
else:
|
||||||
|
dyc = 0
|
||||||
|
self.rrcurveto((0, dya), (dxb, dyb), (dxc, dyc))
|
||||||
|
return args
|
||||||
|
|
||||||
|
def hcurveto(self, args):
|
||||||
|
dxa, dxb, dyb, dyc = args[:4]
|
||||||
|
args = args[4:]
|
||||||
|
if len(args) == 1:
|
||||||
|
dxc = args[0]
|
||||||
|
args = []
|
||||||
|
else:
|
||||||
|
dxc = 0
|
||||||
|
self.rrcurveto((dxa, 0), (dxb, dyb), (dxc, dyc))
|
||||||
|
return args
|
||||||
|
|
||||||
|
|
||||||
|
class T1OutlineExtractor(T2OutlineExtractor):
|
||||||
|
|
||||||
|
def __init__(self, subrs):
|
||||||
|
self.subrs = subrs
|
||||||
|
self.reset()
|
||||||
|
|
||||||
|
def reset(self):
|
||||||
|
self.flexing = 0
|
||||||
|
self.width = 0
|
||||||
|
self.sbx = 0
|
||||||
|
T2OutlineExtractor.reset(self)
|
||||||
|
|
||||||
|
def popallWidth(self, evenOdd=0):
|
||||||
|
return self.popall()
|
||||||
|
|
||||||
|
def exch(self):
|
||||||
|
stack = self.operandStack
|
||||||
|
stack[-1], stack[-2] = stack[-2], stack[-1]
|
||||||
|
|
||||||
|
#
|
||||||
|
# path constructors
|
||||||
|
#
|
||||||
|
def op_rmoveto(self, index):
|
||||||
|
if self.flexing:
|
||||||
|
return
|
||||||
|
self.newPath()
|
||||||
|
self.appendPoint(self.popall(), 1)
|
||||||
|
def op_hmoveto(self, index):
|
||||||
|
if self.flexing:
|
||||||
|
# We must add a parameter to the stack if we are flexing
|
||||||
|
self.push(0)
|
||||||
|
return
|
||||||
|
self.newPath()
|
||||||
|
self.appendPoint((self.popall()[0], 0), 1)
|
||||||
|
def op_vmoveto(self, index):
|
||||||
|
if self.flexing:
|
||||||
|
# We must add a parameter to the stack if we are flexing
|
||||||
|
self.push(0)
|
||||||
|
self.exch()
|
||||||
|
return
|
||||||
|
self.newPath()
|
||||||
|
self.appendPoint((0, self.popall()[0]), 1)
|
||||||
|
def op_closepath(self, index):
|
||||||
|
self.closePath()
|
||||||
|
def op_setcurrentpoint(self, index):
|
||||||
|
args = self.popall()
|
||||||
|
x, y = args
|
||||||
|
self.currentPoint[0] = x
|
||||||
|
self.currentPoint[1] = y
|
||||||
|
|
||||||
|
def op_endchar(self, index):
|
||||||
|
self.closePath()
|
||||||
|
|
||||||
|
def op_hsbw(self, index):
|
||||||
|
sbx, wx = self.popall()
|
||||||
|
self.width = wx
|
||||||
|
self.sbx = sbx
|
||||||
|
self.currentPoint[0] = sbx
|
||||||
|
def op_sbw(self, index):
|
||||||
|
self.popall() # XXX
|
||||||
|
|
||||||
|
#
|
||||||
|
def op_callsubr(self, index):
|
||||||
|
subrIndex = self.pop()
|
||||||
|
subr = self.subrs[subrIndex]
|
||||||
|
self.execute(subr)
|
||||||
|
def op_callothersubr(self, index):
|
||||||
|
subrIndex = self.pop()
|
||||||
|
nArgs = self.pop()
|
||||||
|
#print nArgs, subrIndex, "callothersubr"
|
||||||
|
if subrIndex == 0 and nArgs == 3:
|
||||||
|
self.doFlex()
|
||||||
|
self.flexing = 0
|
||||||
|
elif subrIndex == 1 and nArgs == 0:
|
||||||
|
self.flexing = 1
|
||||||
|
# ignore...
|
||||||
|
def op_pop(self, index):
|
||||||
|
pass # ignore...
|
||||||
|
|
||||||
|
def doFlex(self):
|
||||||
|
finaly = self.pop()
|
||||||
|
finalx = self.pop()
|
||||||
|
self.pop() # flex height is unused
|
||||||
|
|
||||||
|
p3y = self.pop()
|
||||||
|
p3x = self.pop()
|
||||||
|
bcp4y = self.pop()
|
||||||
|
bcp4x = self.pop()
|
||||||
|
bcp3y = self.pop()
|
||||||
|
bcp3x = self.pop()
|
||||||
|
p2y = self.pop()
|
||||||
|
p2x = self.pop()
|
||||||
|
bcp2y = self.pop()
|
||||||
|
bcp2x = self.pop()
|
||||||
|
bcp1y = self.pop()
|
||||||
|
bcp1x = self.pop()
|
||||||
|
rpy = self.pop()
|
||||||
|
rpx = self.pop()
|
||||||
|
|
||||||
|
# call rrcurveto
|
||||||
|
self.push(bcp1x+rpx)
|
||||||
|
self.push(bcp1y+rpy)
|
||||||
|
self.push(bcp2x)
|
||||||
|
self.push(bcp2y)
|
||||||
|
self.push(p2x)
|
||||||
|
self.push(p2y)
|
||||||
|
self.op_rrcurveto(None)
|
||||||
|
|
||||||
|
# call rrcurveto
|
||||||
|
self.push(bcp3x)
|
||||||
|
self.push(bcp3y)
|
||||||
|
self.push(bcp4x)
|
||||||
|
self.push(bcp4y)
|
||||||
|
self.push(p3x)
|
||||||
|
self.push(p3y)
|
||||||
|
self.op_rrcurveto(None)
|
||||||
|
|
||||||
|
# Push back final coords so subr 0 can find them
|
||||||
|
self.push(finalx)
|
||||||
|
self.push(finaly)
|
||||||
|
|
||||||
|
def op_dotsection(self, index):
|
||||||
|
self.popall() # XXX
|
||||||
|
def op_hstem3(self, index):
|
||||||
|
self.popall() # XXX
|
||||||
|
def op_seac(self, index):
|
||||||
|
"asb adx ady bchar achar seac"
|
||||||
|
asb, adx, ady, bchar, achar = self.popall() # XXX
|
||||||
|
self.contours.append([(asb, adx, ady, bchar, achar), None, -1])
|
||||||
|
def op_vstem3(self, index):
|
||||||
|
self.popall() # XXX
|
||||||
|
|
||||||
|
|
||||||
|
class DictDecompiler(ByteCodeDecompilerBase):
|
||||||
|
|
||||||
|
operandEncoding = cffDictOperandEncoding
|
||||||
|
dictDefaults = {}
|
||||||
|
|
||||||
|
def __init__(self, strings):
|
||||||
|
self.stack = []
|
||||||
|
self.strings = strings
|
||||||
|
self.dict = {}
|
||||||
|
|
||||||
|
def getDict(self):
|
||||||
|
assert len(self.stack) == 0, "non-empty stack"
|
||||||
|
return self.dict
|
||||||
|
|
||||||
|
def decompile(self, data):
|
||||||
|
index = 0
|
||||||
|
lenData = len(data)
|
||||||
|
push = self.stack.append
|
||||||
|
while index < lenData:
|
||||||
|
b0 = ord(data[index])
|
||||||
|
index = index + 1
|
||||||
|
code = self.operandEncoding[b0]
|
||||||
|
handler = getattr(self, code)
|
||||||
|
value, index = handler(b0, data, index)
|
||||||
|
if value is not None:
|
||||||
|
push(value)
|
||||||
|
|
||||||
|
def pop(self):
|
||||||
|
value = self.stack[-1]
|
||||||
|
del self.stack[-1]
|
||||||
|
return value
|
||||||
|
|
||||||
|
def popall(self):
|
||||||
|
all = self.stack[:]
|
||||||
|
del self.stack[:]
|
||||||
|
return all
|
||||||
|
|
||||||
|
def do_operator(self, b0, data, index):
|
||||||
|
if b0 == 12:
|
||||||
|
op = (b0, ord(data[index]))
|
||||||
|
index = index+1
|
||||||
|
else:
|
||||||
|
op = b0
|
||||||
|
operator, argType = self.operators[op]
|
||||||
|
self.handle_operator(operator, argType)
|
||||||
|
return None, index
|
||||||
|
|
||||||
|
def handle_operator(self, operator, argType):
|
||||||
|
if type(argType) == type(()):
|
||||||
|
value = ()
|
||||||
|
for arg in argType:
|
||||||
|
arghandler = getattr(self, "arg_" + arg)
|
||||||
|
value = (arghandler(operator),) + value
|
||||||
|
else:
|
||||||
|
arghandler = getattr(self, "arg_" + argType)
|
||||||
|
value = arghandler(operator)
|
||||||
|
self.dict[operator] = value
|
||||||
|
|
||||||
|
def arg_number(self, name):
|
||||||
|
return self.pop()
|
||||||
|
def arg_SID(self, name):
|
||||||
|
return self.strings[self.pop()]
|
||||||
|
def arg_array(self, name):
|
||||||
|
return self.popall()
|
||||||
|
|
||||||
|
|
||||||
|
topDictOperators = [
|
||||||
|
# opcode name argument type
|
||||||
|
(0, 'version', 'SID'),
|
||||||
|
(1, 'Notice', 'SID'),
|
||||||
|
(2, 'FullName', 'SID'),
|
||||||
|
(3, 'FamilyName', 'SID'),
|
||||||
|
(4, 'Weight', 'SID'),
|
||||||
|
(5, 'FontBBox', 'array'),
|
||||||
|
(13, 'UniqueID', 'number'),
|
||||||
|
(14, 'XUID', 'array'),
|
||||||
|
(15, 'charset', 'number'),
|
||||||
|
(16, 'Encoding', 'number'),
|
||||||
|
(17, 'CharStrings', 'number'),
|
||||||
|
(18, 'Private', ('number', 'number')),
|
||||||
|
((12, 0), 'Copyright', 'SID'),
|
||||||
|
((12, 1), 'isFixedPitch', 'number'),
|
||||||
|
((12, 2), 'ItalicAngle', 'number'),
|
||||||
|
((12, 3), 'UnderlinePosition', 'number'),
|
||||||
|
((12, 4), 'UnderlineThickness', 'number'),
|
||||||
|
((12, 5), 'PaintType', 'number'),
|
||||||
|
((12, 6), 'CharstringType', 'number'),
|
||||||
|
((12, 7), 'FontMatrix', 'array'),
|
||||||
|
((12, 8), 'StrokeWidth', 'number'),
|
||||||
|
((12, 20), 'SyntheticBase', 'number'),
|
||||||
|
((12, 21), 'PostScript', 'SID'),
|
||||||
|
((12, 22), 'BaseFontName', 'SID'),
|
||||||
|
# CID additions
|
||||||
|
((12, 30), 'ROS', ('SID', 'SID', 'number')),
|
||||||
|
((12, 31), 'CIDFontVersion', 'number'),
|
||||||
|
((12, 32), 'CIDFontRevision', 'number'),
|
||||||
|
((12, 33), 'CIDFontType', 'number'),
|
||||||
|
((12, 34), 'CIDCount', 'number'),
|
||||||
|
((12, 35), 'UIDBase', 'number'),
|
||||||
|
((12, 36), 'FDArray', 'number'),
|
||||||
|
((12, 37), 'FDSelect', 'number'),
|
||||||
|
((12, 38), 'FontName', 'SID'),
|
||||||
|
# MM, Chameleon. Pft.
|
||||||
|
]
|
||||||
|
|
||||||
|
topDictDefaults = {
|
||||||
|
'isFixedPitch': 0,
|
||||||
|
'ItalicAngle': 0,
|
||||||
|
'UnderlineThickness': 50,
|
||||||
|
'PaintType': 0,
|
||||||
|
'CharstringType': 2,
|
||||||
|
'FontMatrix': [0.001, 0, 0, 0.001, 0, 0],
|
||||||
|
'FontBBox': [0, 0, 0, 0],
|
||||||
|
'StrokeWidth': 0,
|
||||||
|
'charset': 0,
|
||||||
|
'Encoding': 0,
|
||||||
|
# CID defaults
|
||||||
|
'CIDFontVersion': 0,
|
||||||
|
'CIDFontRevision': 0,
|
||||||
|
'CIDFontType': 0,
|
||||||
|
'CIDCount': 8720,
|
||||||
|
}
|
||||||
|
|
||||||
|
class TopDictDecompiler(DictDecompiler):
|
||||||
|
|
||||||
|
operators = _buildOperatorDict(topDictOperators)
|
||||||
|
dictDefaults = topDictDefaults
|
||||||
|
|
||||||
|
|
||||||
|
privateDictOperators = [
|
||||||
|
# opcode name argument type
|
||||||
|
(6, 'BlueValues', 'array'),
|
||||||
|
(7, 'OtherBlues', 'array'),
|
||||||
|
(8, 'FamilyBlues', 'array'),
|
||||||
|
(9, 'FamilyOtherBlues', 'array'),
|
||||||
|
(10, 'StdHW', 'number'),
|
||||||
|
(11, 'StdVW', 'number'),
|
||||||
|
(19, 'Subrs', 'number'),
|
||||||
|
(20, 'defaultWidthX', 'number'),
|
||||||
|
(21, 'nominalWidthX', 'number'),
|
||||||
|
((12, 9), 'BlueScale', 'number'),
|
||||||
|
((12, 10), 'BlueShift', 'number'),
|
||||||
|
((12, 11), 'BlueFuzz', 'number'),
|
||||||
|
((12, 12), 'StemSnapH', 'array'),
|
||||||
|
((12, 13), 'StemSnapV', 'array'),
|
||||||
|
((12, 14), 'ForceBold', 'number'),
|
||||||
|
((12, 15), 'ForceBoldThreshold', 'number'),
|
||||||
|
((12, 16), 'lenIV', 'number'),
|
||||||
|
((12, 17), 'LanguageGroup', 'number'),
|
||||||
|
((12, 18), 'ExpansionFactor', 'number'),
|
||||||
|
((12, 19), 'initialRandomSeed', 'number'),
|
||||||
|
]
|
||||||
|
|
||||||
|
privateDictDefaults = {
|
||||||
|
'defaultWidthX': 0,
|
||||||
|
'nominalWidthX': 0,
|
||||||
|
'BlueScale': 0.039625,
|
||||||
|
'BlueShift': 7,
|
||||||
|
'BlueFuzz': 1,
|
||||||
|
'ForceBold': 0,
|
||||||
|
'ForceBoldThreshold': 0,
|
||||||
|
'lenIV': -1,
|
||||||
|
'LanguageGroup': 0,
|
||||||
|
'ExpansionFactor': 0.06,
|
||||||
|
'initialRandomSeed': 0,
|
||||||
|
}
|
||||||
|
|
||||||
|
class PrivateDictDecompiler(DictDecompiler):
|
||||||
|
|
||||||
|
operators = _buildOperatorDict(privateDictOperators)
|
||||||
|
dictDefaults = privateDictDefaults
|
||||||
|
|
||||||
|
|
||||||
|
def calcSubrBias(subrs):
|
||||||
|
nSubrs = len(subrs)
|
||||||
|
if nSubrs < 1240:
|
||||||
|
bias = 107
|
||||||
|
elif nSubrs < 33900:
|
||||||
|
bias = 1131
|
||||||
|
else:
|
||||||
|
bias = 32768
|
||||||
|
return bias
|
||||||
|
|
346
Lib/fontTools/psLib.py
Normal file
346
Lib/fontTools/psLib.py
Normal file
@ -0,0 +1,346 @@
|
|||||||
|
import StringIO
|
||||||
|
import regex
|
||||||
|
import string
|
||||||
|
import eexec
|
||||||
|
import types
|
||||||
|
from psOperators import *
|
||||||
|
|
||||||
|
|
||||||
|
ps_special = '()<>[]{}%' # / is one too, but we take care of that one differently
|
||||||
|
|
||||||
|
whitespace = string.whitespace
|
||||||
|
skipwhiteRE = regex.compile("[%s]*" % whitespace)
|
||||||
|
|
||||||
|
endofthingPat = "[^][(){}<>/%s%s]*" % ('%', whitespace)
|
||||||
|
endofthingRE = regex.compile(endofthingPat)
|
||||||
|
|
||||||
|
commentRE = regex.compile("%[^\n\r]*")
|
||||||
|
|
||||||
|
# XXX This not entirely correct:
|
||||||
|
stringPat = """
|
||||||
|
(
|
||||||
|
\(
|
||||||
|
\(
|
||||||
|
[^()]* \\\\ [()]
|
||||||
|
\)
|
||||||
|
\|
|
||||||
|
\(
|
||||||
|
[^()]* ( [^()]* )
|
||||||
|
\)
|
||||||
|
\)*
|
||||||
|
[^()]*
|
||||||
|
)
|
||||||
|
"""
|
||||||
|
stringPat = string.join(string.split(stringPat), '')
|
||||||
|
stringRE = regex.compile(stringPat)
|
||||||
|
|
||||||
|
hexstringRE = regex.compile("<[%s0-9A-Fa-f]*>" % whitespace)
|
||||||
|
|
||||||
|
ps_tokenerror = 'ps_tokenerror'
|
||||||
|
ps_error = 'ps_error'
|
||||||
|
|
||||||
|
class PSTokenizer(StringIO.StringIO):
|
||||||
|
|
||||||
|
def getnexttoken(self,
|
||||||
|
# localize some stuff, for performance
|
||||||
|
len = len,
|
||||||
|
ps_special = ps_special,
|
||||||
|
stringmatch = stringRE.match,
|
||||||
|
hexstringmatch = hexstringRE.match,
|
||||||
|
commentmatch = commentRE.match,
|
||||||
|
endmatch = endofthingRE.match,
|
||||||
|
whitematch = skipwhiteRE.match):
|
||||||
|
|
||||||
|
self.pos = self.pos + whitematch(self.buf, self.pos)
|
||||||
|
if self.pos >= self.len:
|
||||||
|
return None, None
|
||||||
|
pos = self.pos
|
||||||
|
buf = self.buf
|
||||||
|
char = buf[pos]
|
||||||
|
if char in ps_special:
|
||||||
|
if char in '{}[]':
|
||||||
|
tokentype = 'do_special'
|
||||||
|
token = char
|
||||||
|
elif char == '%':
|
||||||
|
tokentype = 'do_comment'
|
||||||
|
commentlen = commentmatch(buf, pos)
|
||||||
|
token = buf[pos:pos+commentlen]
|
||||||
|
elif char == '(':
|
||||||
|
tokentype = 'do_string'
|
||||||
|
strlen = stringmatch(buf, pos)
|
||||||
|
if strlen < 0:
|
||||||
|
raise ps_tokenerror, 'bad string at character %d' % pos
|
||||||
|
token = buf[pos:pos+strlen]
|
||||||
|
elif char == '<':
|
||||||
|
tokentype = 'do_hexstring'
|
||||||
|
strlen = hexstringmatch(buf, pos)
|
||||||
|
if strlen < 0:
|
||||||
|
raise ps_tokenerror, 'bad hexstring at character %d' % pos
|
||||||
|
token = buf[pos:pos+strlen]
|
||||||
|
else:
|
||||||
|
raise ps_tokenerror, 'bad token at character %d' % pos
|
||||||
|
else:
|
||||||
|
if char == '/':
|
||||||
|
tokentype = 'do_literal'
|
||||||
|
endofthing = endmatch(buf, pos + 1) + 1
|
||||||
|
else:
|
||||||
|
tokentype = ''
|
||||||
|
endofthing = endmatch(buf, pos)
|
||||||
|
if endofthing <= 0:
|
||||||
|
raise ps_tokenerror, 'bad token at character %d' % pos
|
||||||
|
token = buf[pos:pos + endofthing]
|
||||||
|
self.pos = pos + len(token)
|
||||||
|
return tokentype, token
|
||||||
|
|
||||||
|
def skipwhite(self, whitematch = skipwhiteRE.match):
|
||||||
|
self.pos = self.pos + whitematch(self.buf, self.pos)
|
||||||
|
|
||||||
|
def starteexec(self):
|
||||||
|
self.pos = self.pos + 1
|
||||||
|
#self.skipwhite()
|
||||||
|
self.dirtybuf = self.buf[self.pos:]
|
||||||
|
self.buf, R = eexec.Decrypt(self.dirtybuf, 55665)
|
||||||
|
self.len = len(self.buf)
|
||||||
|
self.pos = 4
|
||||||
|
|
||||||
|
def stopeexec(self):
|
||||||
|
if not hasattr(self, 'dirtybuf'):
|
||||||
|
return
|
||||||
|
self.buf = self.dirtybuf
|
||||||
|
del self.dirtybuf
|
||||||
|
|
||||||
|
def flush(self):
|
||||||
|
if self.buflist:
|
||||||
|
self.buf = self.buf + string.join(self.buflist, '')
|
||||||
|
self.buflist = []
|
||||||
|
|
||||||
|
|
||||||
|
class PSInterpreter(PSOperators):
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
systemdict = {}
|
||||||
|
userdict = {}
|
||||||
|
self.dictstack = [systemdict, userdict]
|
||||||
|
self.stack = []
|
||||||
|
self.proclevel = 0
|
||||||
|
self.procmark = ps_procmark()
|
||||||
|
self.fillsystemdict()
|
||||||
|
|
||||||
|
def fillsystemdict(self):
|
||||||
|
systemdict = self.dictstack[0]
|
||||||
|
systemdict['['] = systemdict['mark'] = self.mark = ps_mark()
|
||||||
|
systemdict[']'] = ps_operator(']', self.do_makearray)
|
||||||
|
systemdict['true'] = ps_boolean(1)
|
||||||
|
systemdict['false'] = ps_boolean(0)
|
||||||
|
systemdict['StandardEncoding'] = ps_array(ps_StandardEncoding)
|
||||||
|
systemdict['FontDirectory'] = ps_dict({})
|
||||||
|
self.suckoperators(systemdict, self.__class__)
|
||||||
|
|
||||||
|
def suckoperators(self, systemdict, klass):
|
||||||
|
for name in dir(klass):
|
||||||
|
attr = getattr(self, name)
|
||||||
|
if callable(attr) and name[:3] == 'ps_':
|
||||||
|
name = name[3:]
|
||||||
|
systemdict[name] = ps_operator(name, attr)
|
||||||
|
for baseclass in klass.__bases__:
|
||||||
|
self.suckoperators(systemdict, baseclass)
|
||||||
|
|
||||||
|
def interpret(self, data, getattr = getattr):
|
||||||
|
tokenizer = self.tokenizer = PSTokenizer(data)
|
||||||
|
getnexttoken = tokenizer.getnexttoken
|
||||||
|
do_token = self.do_token
|
||||||
|
handle_object = self.handle_object
|
||||||
|
try:
|
||||||
|
while 1:
|
||||||
|
tokentype, token = getnexttoken()
|
||||||
|
#print token
|
||||||
|
if not token:
|
||||||
|
break
|
||||||
|
if tokentype:
|
||||||
|
handler = getattr(self, tokentype)
|
||||||
|
object = handler(token)
|
||||||
|
else:
|
||||||
|
object = do_token(token)
|
||||||
|
if object is not None:
|
||||||
|
handle_object(object)
|
||||||
|
tokenizer.close()
|
||||||
|
self.tokenizer = None
|
||||||
|
finally:
|
||||||
|
if self.tokenizer is not None:
|
||||||
|
print 'ps error:\n- - - - - - -'
|
||||||
|
print self.tokenizer.buf[self.tokenizer.pos-50:self.tokenizer.pos]
|
||||||
|
print '>>>'
|
||||||
|
print self.tokenizer.buf[self.tokenizer.pos:self.tokenizer.pos+50]
|
||||||
|
print '- - - - - - -'
|
||||||
|
|
||||||
|
def handle_object(self, object):
|
||||||
|
if not (self.proclevel or object.literal or object.type == 'proceduretype'):
|
||||||
|
if object.type <> 'operatortype':
|
||||||
|
object = self.resolve_name(object.value)
|
||||||
|
if object.literal:
|
||||||
|
self.push(object)
|
||||||
|
else:
|
||||||
|
if object.type == 'proceduretype':
|
||||||
|
self.call_procedure(object)
|
||||||
|
else:
|
||||||
|
object.function()
|
||||||
|
else:
|
||||||
|
self.push(object)
|
||||||
|
|
||||||
|
def call_procedure(self, proc):
|
||||||
|
handle_object = self.handle_object
|
||||||
|
for item in proc.value:
|
||||||
|
handle_object(item)
|
||||||
|
|
||||||
|
def resolve_name(self, name):
|
||||||
|
dictstack = self.dictstack
|
||||||
|
for i in range(len(dictstack)-1, -1, -1):
|
||||||
|
if dictstack[i].has_key(name):
|
||||||
|
return dictstack[i][name]
|
||||||
|
raise ps_error, 'name error: ' + str(name)
|
||||||
|
|
||||||
|
def do_token(self, token,
|
||||||
|
atoi = string.atoi,
|
||||||
|
atof = string.atof,
|
||||||
|
ps_name = ps_name,
|
||||||
|
ps_integer = ps_integer,
|
||||||
|
ps_real = ps_real):
|
||||||
|
try:
|
||||||
|
num = atoi(token)
|
||||||
|
except (ValueError, OverflowError):
|
||||||
|
try:
|
||||||
|
num = atof(token)
|
||||||
|
except (ValueError, OverflowError):
|
||||||
|
if '#' in token:
|
||||||
|
hashpos = string.find(token, '#')
|
||||||
|
try:
|
||||||
|
base = string.atoi(token[:hashpos])
|
||||||
|
num = string.atoi(token[hashpos+1:], base)
|
||||||
|
except (ValueError, OverflowError):
|
||||||
|
return ps_name(token)
|
||||||
|
else:
|
||||||
|
return ps_integer(num)
|
||||||
|
else:
|
||||||
|
return ps_name(token)
|
||||||
|
else:
|
||||||
|
return ps_real(num)
|
||||||
|
else:
|
||||||
|
return ps_integer(num)
|
||||||
|
|
||||||
|
def do_comment(self, token):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def do_literal(self, token):
|
||||||
|
return ps_literal(token[1:])
|
||||||
|
|
||||||
|
def do_string(self, token):
|
||||||
|
return ps_string(token[1:-1])
|
||||||
|
|
||||||
|
def do_hexstring(self, token):
|
||||||
|
hexStr = string.join(string.split(token[1:-1]), '')
|
||||||
|
if len(hexStr) % 2:
|
||||||
|
hexStr = hexStr + '0'
|
||||||
|
cleanstr = []
|
||||||
|
for i in range(0, len(hexStr), 2):
|
||||||
|
cleanstr.append(chr(string.atoi(hexStr[i:i+2], 16)))
|
||||||
|
cleanstr = string.join(cleanstr, '')
|
||||||
|
return ps_string(cleanstr)
|
||||||
|
|
||||||
|
def do_special(self, token):
|
||||||
|
if token == '{':
|
||||||
|
self.proclevel = self.proclevel + 1
|
||||||
|
return self.procmark
|
||||||
|
elif token == '}':
|
||||||
|
proc = []
|
||||||
|
while 1:
|
||||||
|
topobject = self.pop()
|
||||||
|
if topobject == self.procmark:
|
||||||
|
break
|
||||||
|
proc.append(topobject)
|
||||||
|
self.proclevel = self.proclevel - 1
|
||||||
|
proc.reverse()
|
||||||
|
return ps_procedure(proc)
|
||||||
|
elif token == '[':
|
||||||
|
return self.mark
|
||||||
|
elif token == ']':
|
||||||
|
return ps_name(']')
|
||||||
|
else:
|
||||||
|
raise ps_tokenerror, 'huh?'
|
||||||
|
|
||||||
|
def push(self, object):
|
||||||
|
self.stack.append(object)
|
||||||
|
|
||||||
|
def pop(self, *types):
|
||||||
|
stack = self.stack
|
||||||
|
if not stack:
|
||||||
|
raise ps_error, 'stack underflow'
|
||||||
|
object = stack[-1]
|
||||||
|
if types:
|
||||||
|
if object.type not in types:
|
||||||
|
raise ps_error, 'typecheck, expected %s, found %s' % (`types`, object.type)
|
||||||
|
del stack[-1]
|
||||||
|
return object
|
||||||
|
|
||||||
|
def do_makearray(self):
|
||||||
|
array = []
|
||||||
|
while 1:
|
||||||
|
topobject = self.pop()
|
||||||
|
if topobject == self.mark:
|
||||||
|
break
|
||||||
|
array.append(topobject)
|
||||||
|
array.reverse()
|
||||||
|
self.push(ps_array(array))
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
"""Remove circular references."""
|
||||||
|
del self.stack
|
||||||
|
del self.dictstack
|
||||||
|
|
||||||
|
|
||||||
|
def unpack_item(item):
|
||||||
|
tp = type(item.value)
|
||||||
|
if tp == types.DictionaryType:
|
||||||
|
newitem = {}
|
||||||
|
for key, value in item.value.items():
|
||||||
|
newitem[key] = unpack_item(value)
|
||||||
|
elif tp == types.ListType:
|
||||||
|
newitem = [None] * len(item.value)
|
||||||
|
for i in range(len(item.value)):
|
||||||
|
newitem[i] = unpack_item(item.value[i])
|
||||||
|
if item.type == 'proceduretype':
|
||||||
|
newitem = tuple(newitem)
|
||||||
|
else:
|
||||||
|
newitem = item.value
|
||||||
|
return newitem
|
||||||
|
|
||||||
|
def suckfont(data):
|
||||||
|
import re
|
||||||
|
m = re.search(r"/FontName\s+/([^ \t\n\r]+)\s+def", data)
|
||||||
|
if m:
|
||||||
|
fontName = m.group(1)
|
||||||
|
else:
|
||||||
|
fontName = None
|
||||||
|
interpreter = PSInterpreter()
|
||||||
|
interpreter.interpret("/Helvetica 4 dict dup /Encoding StandardEncoding put definefont pop")
|
||||||
|
interpreter.interpret(data)
|
||||||
|
fontdir = interpreter.dictstack[0]['FontDirectory'].value
|
||||||
|
if fontdir.has_key(fontName):
|
||||||
|
rawfont = fontdir[fontName]
|
||||||
|
else:
|
||||||
|
# fall back, in case fontName wasn't found
|
||||||
|
fontNames = fontdir.keys()
|
||||||
|
if len(fontNames) > 1:
|
||||||
|
fontNames.remove("Helvetica")
|
||||||
|
fontNames.sort()
|
||||||
|
rawfont = fontdir[fontNames[0]]
|
||||||
|
interpreter.close()
|
||||||
|
return unpack_item(rawfont)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
import macfs
|
||||||
|
fss, ok = macfs.StandardGetFile("LWFN")
|
||||||
|
if ok:
|
||||||
|
import t1Lib
|
||||||
|
data, kind = t1Lib.read(fss.as_pathname())
|
||||||
|
font = suckfont(data)
|
580
Lib/fontTools/psOperators.py
Normal file
580
Lib/fontTools/psOperators.py
Normal file
@ -0,0 +1,580 @@
|
|||||||
|
import string
|
||||||
|
|
||||||
|
|
||||||
|
_accessstrings = {0: "", 1: "readonly", 2: "executeonly", 3: "noaccess"}
|
||||||
|
|
||||||
|
class ps_object:
|
||||||
|
|
||||||
|
literal = 1
|
||||||
|
access = 0
|
||||||
|
value = None
|
||||||
|
|
||||||
|
def __init__(self, value):
|
||||||
|
self.value = value
|
||||||
|
self.type = self.__class__.__name__[3:] + "type"
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "<%s %s>" % (self.__class__.__name__[3:], repr(self.value))
|
||||||
|
|
||||||
|
|
||||||
|
class ps_operator(ps_object):
|
||||||
|
|
||||||
|
literal = 0
|
||||||
|
|
||||||
|
def __init__(self, name, function):
|
||||||
|
self.name = name
|
||||||
|
self.function = function
|
||||||
|
self.type = self.__class__.__name__[3:] + "type"
|
||||||
|
def __repr__(self):
|
||||||
|
return "<operator %s>" % self.name
|
||||||
|
|
||||||
|
class ps_procedure(ps_object):
|
||||||
|
literal = 0
|
||||||
|
def __repr__(self):
|
||||||
|
return "<procedure>"
|
||||||
|
def __str__(self):
|
||||||
|
psstring = '{'
|
||||||
|
for i in range(len(self.value)):
|
||||||
|
if i:
|
||||||
|
psstring = psstring + ' ' + str(self.value[i])
|
||||||
|
else:
|
||||||
|
psstring = psstring + str(self.value[i])
|
||||||
|
return psstring + '}'
|
||||||
|
|
||||||
|
class ps_name(ps_object):
|
||||||
|
literal = 0
|
||||||
|
def __str__(self):
|
||||||
|
if self.literal:
|
||||||
|
return '/' + self.value
|
||||||
|
else:
|
||||||
|
return self.value
|
||||||
|
|
||||||
|
class ps_literal(ps_object):
|
||||||
|
def __str__(self):
|
||||||
|
return '/' + self.value
|
||||||
|
|
||||||
|
class ps_array(ps_object):
|
||||||
|
def __str__(self):
|
||||||
|
psstring = '['
|
||||||
|
for i in range(len(self.value)):
|
||||||
|
item = self.value[i]
|
||||||
|
access = _accessstrings[item.access]
|
||||||
|
if access:
|
||||||
|
access = ' ' + access
|
||||||
|
if i:
|
||||||
|
psstring = psstring + ' ' + str(item) + access
|
||||||
|
else:
|
||||||
|
psstring = psstring + str(item) + access
|
||||||
|
return psstring + ']'
|
||||||
|
def __repr__(self):
|
||||||
|
return "<array>"
|
||||||
|
|
||||||
|
_type1_pre_eexec_order = [
|
||||||
|
"FontInfo",
|
||||||
|
"FontName",
|
||||||
|
"Encoding",
|
||||||
|
"PaintType",
|
||||||
|
"FontType",
|
||||||
|
"FontMatrix",
|
||||||
|
"FontBBox",
|
||||||
|
"UniqueID",
|
||||||
|
"Metrics",
|
||||||
|
"StrokeWidth"
|
||||||
|
]
|
||||||
|
|
||||||
|
_type1_fontinfo_order = [
|
||||||
|
"version",
|
||||||
|
"Notice",
|
||||||
|
"FullName",
|
||||||
|
"FamilyName",
|
||||||
|
"Weight",
|
||||||
|
"ItalicAngle",
|
||||||
|
"isFixedPitch",
|
||||||
|
"UnderlinePosition",
|
||||||
|
"UnderlineThickness"
|
||||||
|
]
|
||||||
|
|
||||||
|
_type1_post_eexec_order = [
|
||||||
|
"Private",
|
||||||
|
"CharStrings",
|
||||||
|
"FID"
|
||||||
|
]
|
||||||
|
|
||||||
|
def _type1_item_repr(key, value):
|
||||||
|
psstring = ""
|
||||||
|
access = _accessstrings[value.access]
|
||||||
|
if access:
|
||||||
|
access = access + ' '
|
||||||
|
if key == 'CharStrings':
|
||||||
|
psstring = psstring + "/%s %s def\n" % (key, _type1_CharString_repr(value.value))
|
||||||
|
elif key == 'Encoding':
|
||||||
|
psstring = psstring + _type1_Encoding_repr(value, access)
|
||||||
|
else:
|
||||||
|
psstring = psstring + "/%s %s %sdef\n" % (str(key), str(value), access)
|
||||||
|
return psstring
|
||||||
|
|
||||||
|
def _type1_Encoding_repr(encoding, access):
|
||||||
|
encoding = encoding.value
|
||||||
|
psstring = "/Encoding 256 array\n0 1 255 {1 index exch /.notdef put} for\n"
|
||||||
|
for i in range(256):
|
||||||
|
name = encoding[i].value
|
||||||
|
if name <> '.notdef':
|
||||||
|
psstring = psstring + "dup %d /%s put\n" % (i, name)
|
||||||
|
return psstring + access + "def\n"
|
||||||
|
|
||||||
|
def _type1_CharString_repr(charstrings):
|
||||||
|
items = charstrings.items()
|
||||||
|
items.sort()
|
||||||
|
return 'xxx'
|
||||||
|
|
||||||
|
class ps_font(ps_object):
|
||||||
|
def __str__(self):
|
||||||
|
psstring = "%d dict dup begin\n" % len(self.value)
|
||||||
|
for key in _type1_pre_eexec_order:
|
||||||
|
try:
|
||||||
|
value = self.value[key]
|
||||||
|
except KeyError:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
psstring = psstring + _type1_item_repr(key, value)
|
||||||
|
items = self.value.items()
|
||||||
|
items.sort()
|
||||||
|
for key, value in items:
|
||||||
|
if key not in _type1_pre_eexec_order + _type1_post_eexec_order:
|
||||||
|
psstring = psstring + _type1_item_repr(key, value)
|
||||||
|
psstring = psstring + "currentdict end\ncurrentfile eexec\ndup "
|
||||||
|
for key in _type1_post_eexec_order:
|
||||||
|
try:
|
||||||
|
value = self.value[key]
|
||||||
|
except KeyError:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
psstring = psstring + _type1_item_repr(key, value)
|
||||||
|
return psstring + 'dup/FontName get exch definefont pop\nmark currentfile closefile\n' + \
|
||||||
|
8 * (64 * '0' + '\n') + 'cleartomark' + '\n'
|
||||||
|
def __repr__(self):
|
||||||
|
return '<font>'
|
||||||
|
|
||||||
|
class ps_file(ps_object):
|
||||||
|
pass
|
||||||
|
|
||||||
|
class ps_dict(ps_object):
|
||||||
|
def __str__(self):
|
||||||
|
psstring = "%d dict dup begin\n" % len(self.value)
|
||||||
|
items = self.value.items()
|
||||||
|
items.sort()
|
||||||
|
dictrepr = "%d dict dup begin\n" % len(items)
|
||||||
|
for key, value in items:
|
||||||
|
access = _accessstrings[value.access]
|
||||||
|
if access:
|
||||||
|
access = access + ' '
|
||||||
|
psstring = psstring + "/%s %s %sdef\n" % (str(key), str(value), access)
|
||||||
|
return psstring + 'end '
|
||||||
|
def __repr__(self):
|
||||||
|
return "<dict>"
|
||||||
|
|
||||||
|
class ps_mark(ps_object):
|
||||||
|
def __init__(self):
|
||||||
|
self.value = 'mark'
|
||||||
|
self.type = self.__class__.__name__[3:] + "type"
|
||||||
|
|
||||||
|
class ps_procmark(ps_object):
|
||||||
|
def __init__(self):
|
||||||
|
self.value = 'procmark'
|
||||||
|
self.type = self.__class__.__name__[3:] + "type"
|
||||||
|
|
||||||
|
class ps_null(ps_object):
|
||||||
|
def __init__(self):
|
||||||
|
self.type = self.__class__.__name__[3:] + "type"
|
||||||
|
|
||||||
|
class ps_boolean(ps_object):
|
||||||
|
def __str__(self):
|
||||||
|
if self.value:
|
||||||
|
return 'true'
|
||||||
|
else:
|
||||||
|
return 'false'
|
||||||
|
|
||||||
|
class ps_string(ps_object):
|
||||||
|
def __str__(self):
|
||||||
|
return "(%s)" % `self.value`[1:-1]
|
||||||
|
|
||||||
|
class ps_integer(ps_object):
|
||||||
|
def __str__(self):
|
||||||
|
return `self.value`
|
||||||
|
|
||||||
|
class ps_real(ps_object):
|
||||||
|
def __str__(self):
|
||||||
|
return `self.value`
|
||||||
|
|
||||||
|
|
||||||
|
class PSOperators:
|
||||||
|
|
||||||
|
def ps_def(self):
|
||||||
|
object = self.pop()
|
||||||
|
name = self.pop()
|
||||||
|
self.dictstack[-1][name.value] = object
|
||||||
|
|
||||||
|
def ps_bind(self):
|
||||||
|
proc = self.pop('proceduretype')
|
||||||
|
self.proc_bind(proc)
|
||||||
|
self.push(proc)
|
||||||
|
|
||||||
|
def proc_bind(self, proc):
|
||||||
|
for i in range(len(proc.value)):
|
||||||
|
item = proc.value[i]
|
||||||
|
if item.type == 'proceduretype':
|
||||||
|
self.proc_bind(item)
|
||||||
|
else:
|
||||||
|
if not item.literal:
|
||||||
|
try:
|
||||||
|
object = self.resolve_name(item.value)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
if object.type == 'operatortype':
|
||||||
|
proc.value[i] = object
|
||||||
|
|
||||||
|
def ps_exch(self):
|
||||||
|
if len(self.stack) < 2:
|
||||||
|
raise RuntimeError, 'stack underflow'
|
||||||
|
obj1 = self.pop()
|
||||||
|
obj2 = self.pop()
|
||||||
|
self.push(obj1)
|
||||||
|
self.push(obj2)
|
||||||
|
|
||||||
|
def ps_dup(self):
|
||||||
|
if not self.stack:
|
||||||
|
raise RuntimeError, 'stack underflow'
|
||||||
|
self.push(self.stack[-1])
|
||||||
|
|
||||||
|
def ps_exec(self):
|
||||||
|
object = self.pop()
|
||||||
|
if object.type == 'proceduretype':
|
||||||
|
self.call_procedure(object)
|
||||||
|
else:
|
||||||
|
self.handle_object(object)
|
||||||
|
|
||||||
|
def ps_count(self):
|
||||||
|
self.push(ps_integer(len(self.stack)))
|
||||||
|
|
||||||
|
def ps_eq(self):
|
||||||
|
any1 = self.pop()
|
||||||
|
any2 = self.pop()
|
||||||
|
self.push(ps_boolean(any1.value == any2.value))
|
||||||
|
|
||||||
|
def ps_ne(self):
|
||||||
|
any1 = self.pop()
|
||||||
|
any2 = self.pop()
|
||||||
|
self.push(ps_boolean(any1.value <> any2.value))
|
||||||
|
|
||||||
|
def ps_cvx(self):
|
||||||
|
obj = self.pop()
|
||||||
|
obj.literal = 0
|
||||||
|
self.push(obj)
|
||||||
|
|
||||||
|
def ps_matrix(self):
|
||||||
|
matrix = [ps_real(1.0), ps_integer(0), ps_integer(0), ps_real(1.0), ps_integer(0), ps_integer(0)]
|
||||||
|
self.push(ps_array(matrix))
|
||||||
|
|
||||||
|
def ps_string(self):
|
||||||
|
num = self.pop('integertype').value
|
||||||
|
self.push(ps_string('\0' * num))
|
||||||
|
|
||||||
|
def ps_type(self):
|
||||||
|
obj = self.pop()
|
||||||
|
self.push(ps_string(obj.type))
|
||||||
|
|
||||||
|
def ps_store(self):
|
||||||
|
value = self.pop()
|
||||||
|
key = self.pop()
|
||||||
|
name = key.value
|
||||||
|
for i in range(len(self.dictstack)-1, -1, -1):
|
||||||
|
if self.dictstack[i].has_key(name):
|
||||||
|
self.dictstack[i][name] = value
|
||||||
|
break
|
||||||
|
self.dictstack[-1][name] = value
|
||||||
|
|
||||||
|
def ps_where(self):
|
||||||
|
name = self.pop()
|
||||||
|
# XXX
|
||||||
|
self.push(ps_boolean(0))
|
||||||
|
|
||||||
|
def ps_systemdict(self):
|
||||||
|
self.push(ps_dict(self.dictstack[0]))
|
||||||
|
|
||||||
|
def ps_userdict(self):
|
||||||
|
self.push(ps_dict(self.dictstack[1]))
|
||||||
|
|
||||||
|
def ps_currentdict(self):
|
||||||
|
self.push(ps_dict(self.dictstack[-1]))
|
||||||
|
|
||||||
|
def ps_currentfile(self):
|
||||||
|
self.push(ps_file(self.tokenizer))
|
||||||
|
|
||||||
|
def ps_eexec(self):
|
||||||
|
file = self.pop('filetype').value
|
||||||
|
file.starteexec()
|
||||||
|
|
||||||
|
def ps_closefile(self):
|
||||||
|
file = self.pop('filetype').value
|
||||||
|
file.skipwhite()
|
||||||
|
file.stopeexec()
|
||||||
|
|
||||||
|
def ps_cleartomark(self):
|
||||||
|
obj = self.pop()
|
||||||
|
while obj <> self.mark:
|
||||||
|
obj = self.pop()
|
||||||
|
|
||||||
|
def ps_readstring(self,
|
||||||
|
ps_boolean = ps_boolean,
|
||||||
|
len = len):
|
||||||
|
string = self.pop('stringtype')
|
||||||
|
oldstr = string.value
|
||||||
|
file = self.pop('filetype')
|
||||||
|
#pad = file.value.read(1)
|
||||||
|
# for StringIO, this is faster
|
||||||
|
file.value.pos = file.value.pos + 1
|
||||||
|
newstr = file.value.read(len(oldstr))
|
||||||
|
string.value = newstr
|
||||||
|
self.push(string)
|
||||||
|
self.push(ps_boolean(len(oldstr) == len(newstr)))
|
||||||
|
|
||||||
|
def ps_known(self):
|
||||||
|
key = self.pop()
|
||||||
|
dict = self.pop('dicttype', 'fonttype')
|
||||||
|
self.push(ps_boolean(dict.value.has_key(key.value)))
|
||||||
|
|
||||||
|
def ps_if(self):
|
||||||
|
proc = self.pop('proceduretype')
|
||||||
|
bool = self.pop('booleantype')
|
||||||
|
if bool.value:
|
||||||
|
self.call_procedure(proc)
|
||||||
|
|
||||||
|
def ps_ifelse(self):
|
||||||
|
proc2 = self.pop('proceduretype')
|
||||||
|
proc1 = self.pop('proceduretype')
|
||||||
|
bool = self.pop('booleantype')
|
||||||
|
if bool.value:
|
||||||
|
self.call_procedure(proc1)
|
||||||
|
else:
|
||||||
|
self.call_procedure(proc2)
|
||||||
|
|
||||||
|
def ps_readonly(self):
|
||||||
|
obj = self.pop()
|
||||||
|
if obj.access < 1:
|
||||||
|
obj.access = 1
|
||||||
|
self.push(obj)
|
||||||
|
|
||||||
|
def ps_executeonly(self):
|
||||||
|
obj = self.pop()
|
||||||
|
if obj.access < 2:
|
||||||
|
obj.access = 2
|
||||||
|
self.push(obj)
|
||||||
|
|
||||||
|
def ps_noaccess(self):
|
||||||
|
obj = self.pop()
|
||||||
|
if obj.access < 3:
|
||||||
|
obj.access = 3
|
||||||
|
self.push(obj)
|
||||||
|
|
||||||
|
def ps_not(self):
|
||||||
|
obj = self.pop('booleantype', 'integertype')
|
||||||
|
if obj.type == 'booleantype':
|
||||||
|
self.push(ps_boolean(not obj.value))
|
||||||
|
else:
|
||||||
|
self.push(ps_integer(~obj.value))
|
||||||
|
|
||||||
|
def ps_print(self):
|
||||||
|
str = self.pop('stringtype')
|
||||||
|
print 'PS output --->', str.value
|
||||||
|
|
||||||
|
def ps_anchorsearch(self):
|
||||||
|
seek = self.pop('stringtype')
|
||||||
|
string = self.pop('stringtype')
|
||||||
|
seeklen = len(seek.value)
|
||||||
|
if string.value[:seeklen] == seek.value:
|
||||||
|
self.push(ps_string(string.value[seeklen:]))
|
||||||
|
self.push(seek)
|
||||||
|
self.push(ps_boolean(1))
|
||||||
|
else:
|
||||||
|
self.push(string)
|
||||||
|
self.push(ps_boolean(0))
|
||||||
|
|
||||||
|
def ps_array(self):
|
||||||
|
num = self.pop('integertype')
|
||||||
|
array = ps_array([None] * num.value)
|
||||||
|
self.push(array)
|
||||||
|
|
||||||
|
def ps_astore(self):
|
||||||
|
array = self.pop('arraytype')
|
||||||
|
for i in range(len(array.value)-1, -1, -1):
|
||||||
|
array.value[i] = self.pop()
|
||||||
|
self.push(array)
|
||||||
|
|
||||||
|
def ps_load(self):
|
||||||
|
name = self.pop()
|
||||||
|
object = self.resolve_name(name.value)
|
||||||
|
self.push(object)
|
||||||
|
|
||||||
|
def ps_put(self):
|
||||||
|
obj1 = self.pop()
|
||||||
|
obj2 = self.pop()
|
||||||
|
obj3 = self.pop('arraytype', 'dicttype', 'stringtype', 'proceduretype')
|
||||||
|
tp = obj3.type
|
||||||
|
if tp == 'arraytype' or tp == 'proceduretype':
|
||||||
|
obj3.value[obj2.value] = obj1
|
||||||
|
elif tp == 'dicttype':
|
||||||
|
obj3.value[obj2.value] = obj1
|
||||||
|
elif tp == 'stringtype':
|
||||||
|
index = obj2.value
|
||||||
|
obj3.value = obj3.value[:index] + chr(obj1.value) + obj3.value[index+1:]
|
||||||
|
|
||||||
|
def ps_get(self):
|
||||||
|
obj1 = self.pop()
|
||||||
|
if obj1.value == "Encoding":
|
||||||
|
pass
|
||||||
|
obj2 = self.pop('arraytype', 'dicttype', 'stringtype', 'proceduretype', 'fonttype')
|
||||||
|
tp = obj2.type
|
||||||
|
if tp in ('arraytype', 'proceduretype'):
|
||||||
|
self.push(obj2.value[obj1.value])
|
||||||
|
elif tp in ('dicttype', 'fonttype'):
|
||||||
|
self.push(obj2.value[obj1.value])
|
||||||
|
elif tp == 'stringtype':
|
||||||
|
self.push(ps_integer(ord(obj2.value[obj1.value])))
|
||||||
|
else:
|
||||||
|
assert 0, "shouldn't get here"
|
||||||
|
|
||||||
|
def ps_getinterval(self):
|
||||||
|
obj1 = self.pop('integertype')
|
||||||
|
obj2 = self.pop('integertype')
|
||||||
|
obj3 = self.pop('arraytype', 'stringtype')
|
||||||
|
tp = obj3.type
|
||||||
|
if tp == 'arraytype':
|
||||||
|
self.push(ps_array(obj3.value[obj2.value:obj2.value + obj1.value]))
|
||||||
|
elif tp == 'stringtype':
|
||||||
|
self.push(ps_string(obj3.value[obj2.value:obj2.value + obj1.value]))
|
||||||
|
|
||||||
|
def ps_putinterval(self):
|
||||||
|
obj1 = self.pop('arraytype', 'stringtype')
|
||||||
|
obj2 = self.pop('integertype')
|
||||||
|
obj3 = self.pop('arraytype', 'stringtype')
|
||||||
|
tp = obj3.type
|
||||||
|
if tp == 'arraytype':
|
||||||
|
obj3.value[obj2.value:obj2.value + len(obj1.value)] = obj1.value
|
||||||
|
elif tp == 'stringtype':
|
||||||
|
newstr = obj3.value[:obj2.value]
|
||||||
|
newstr = newstr + obj1.value
|
||||||
|
newstr = newstr + obj3.value[obj2.value + len(obj1.value):]
|
||||||
|
obj3.value = newstr
|
||||||
|
|
||||||
|
def ps_cvn(self):
|
||||||
|
str = self.pop('stringtype')
|
||||||
|
self.push(ps_name(str.value))
|
||||||
|
|
||||||
|
def ps_index(self):
|
||||||
|
n = self.pop('integertype').value
|
||||||
|
if n < 0:
|
||||||
|
raise RuntimeError, 'index may not be negative'
|
||||||
|
self.push(self.stack[-1-n])
|
||||||
|
|
||||||
|
def ps_for(self):
|
||||||
|
proc = self.pop('proceduretype')
|
||||||
|
limit = self.pop('integertype', 'realtype').value
|
||||||
|
increment = self.pop('integertype', 'realtype').value
|
||||||
|
i = self.pop('integertype', 'realtype').value
|
||||||
|
while 1:
|
||||||
|
if increment > 0:
|
||||||
|
if i > limit:
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
if i < limit:
|
||||||
|
break
|
||||||
|
if type(i) == type(0.0):
|
||||||
|
self.push(ps_real(i))
|
||||||
|
else:
|
||||||
|
self.push(ps_integer(i))
|
||||||
|
self.call_procedure(proc)
|
||||||
|
i = i + increment
|
||||||
|
|
||||||
|
def ps_forall(self):
|
||||||
|
proc = self.pop('proceduretype')
|
||||||
|
obj = self.pop('arraytype', 'stringtype', 'dicttype')
|
||||||
|
tp = obj.type
|
||||||
|
if tp == 'arraytype':
|
||||||
|
for item in obj.value:
|
||||||
|
self.push(item)
|
||||||
|
self.call_procedure(proc)
|
||||||
|
elif tp == 'stringtype':
|
||||||
|
for item in obj.value:
|
||||||
|
self.push(ps_integer(ord(item)))
|
||||||
|
self.call_procedure(proc)
|
||||||
|
elif tp == 'dicttype':
|
||||||
|
for key, value in obj.value.items():
|
||||||
|
self.push(ps_name(key))
|
||||||
|
self.push(value)
|
||||||
|
self.call_procedure(proc)
|
||||||
|
|
||||||
|
def ps_definefont(self):
|
||||||
|
font = self.pop('dicttype')
|
||||||
|
name = self.pop()
|
||||||
|
font = ps_font(font.value)
|
||||||
|
self.dictstack[0]['FontDirectory'].value[name.value] = font
|
||||||
|
self.push(font)
|
||||||
|
|
||||||
|
def ps_findfont(self):
|
||||||
|
name = self.pop()
|
||||||
|
font = self.dictstack[0]['FontDirectory'].value[name.value]
|
||||||
|
self.push(font)
|
||||||
|
|
||||||
|
def ps_pop(self):
|
||||||
|
self.pop()
|
||||||
|
|
||||||
|
def ps_dict(self):
|
||||||
|
num = self.pop('integertype')
|
||||||
|
dict = ps_dict({})
|
||||||
|
self.push(dict)
|
||||||
|
|
||||||
|
def ps_begin(self):
|
||||||
|
dict = self.pop('dicttype')
|
||||||
|
self.dictstack.append(dict.value)
|
||||||
|
|
||||||
|
def ps_end(self):
|
||||||
|
if len(self.dictstack) > 2:
|
||||||
|
del self.dictstack[-1]
|
||||||
|
else:
|
||||||
|
raise RuntimeError, 'dictstack underflow'
|
||||||
|
|
||||||
|
notdef = '.notdef'
|
||||||
|
StandardEncoding = [ notdef, notdef, notdef, notdef, notdef, notdef,
|
||||||
|
notdef, notdef, notdef, notdef, notdef, notdef, notdef, notdef,
|
||||||
|
notdef, notdef, notdef, notdef, notdef, notdef, notdef, notdef,
|
||||||
|
notdef, notdef, notdef, notdef, notdef, notdef, notdef, notdef,
|
||||||
|
notdef, notdef, 'space', 'exclam', 'quotedbl', 'numbersign', 'dollar', 'percent',
|
||||||
|
'ampersand', 'quoteright', 'parenleft', 'parenright', 'asterisk', 'plus', 'comma',
|
||||||
|
'hyphen', 'period', 'slash', 'zero', 'one', 'two', 'three', 'four', 'five', 'six',
|
||||||
|
'seven', 'eight', 'nine', 'colon', 'semicolon', 'less', 'equal', 'greater', 'question',
|
||||||
|
'at', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q',
|
||||||
|
'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'bracketleft', 'backslash', 'bracketright',
|
||||||
|
'asciicircum', 'underscore', 'quoteleft', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i',
|
||||||
|
'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
|
||||||
|
'braceleft', 'bar', 'braceright', 'asciitilde', notdef, notdef, notdef,
|
||||||
|
notdef, notdef, notdef, notdef, notdef, notdef, notdef, notdef,
|
||||||
|
notdef, notdef, notdef, notdef, notdef, notdef, notdef, notdef,
|
||||||
|
notdef, notdef, notdef, notdef, notdef, notdef, notdef, notdef,
|
||||||
|
notdef, notdef, notdef, notdef, notdef, notdef, notdef, 'exclamdown',
|
||||||
|
'cent', 'sterling', 'fraction', 'yen', 'florin', 'section', 'currency', 'quotesingle',
|
||||||
|
'quotedblleft', 'guillemotleft', 'guilsinglleft', 'guilsinglright', 'fi', 'fl', notdef,
|
||||||
|
'endash', 'dagger', 'daggerdbl', 'periodcentered', notdef, 'paragraph', 'bullet',
|
||||||
|
'quotesinglbase', 'quotedblbase', 'quotedblright', 'guillemotright', 'ellipsis',
|
||||||
|
'perthousand', notdef, 'questiondown', notdef, 'grave', 'acute', 'circumflex',
|
||||||
|
'tilde', 'macron', 'breve', 'dotaccent', 'dieresis', notdef, 'ring', 'cedilla',
|
||||||
|
notdef, 'hungarumlaut', 'ogonek', 'caron', 'emdash', notdef, notdef, notdef,
|
||||||
|
notdef, notdef, notdef, notdef, notdef, notdef, notdef, notdef,
|
||||||
|
notdef, notdef, notdef, notdef, notdef, 'AE', notdef, 'ordfeminine',
|
||||||
|
notdef, notdef, notdef, notdef, 'Lslash', 'Oslash', 'OE', 'ordmasculine',
|
||||||
|
notdef, notdef, notdef, notdef, notdef, 'ae', notdef, notdef,
|
||||||
|
notdef, 'dotlessi', notdef, notdef, 'lslash', 'oslash', 'oe', 'germandbls',
|
||||||
|
notdef, notdef, notdef, notdef ]
|
||||||
|
|
||||||
|
ps_StandardEncoding = map(ps_name, StandardEncoding)
|
||||||
|
|
347
Lib/fontTools/t1Lib.py
Normal file
347
Lib/fontTools/t1Lib.py
Normal file
@ -0,0 +1,347 @@
|
|||||||
|
"""fontTools.t1Lib.py -- Tools for PostScript Type 1 fonts
|
||||||
|
|
||||||
|
Functions for reading and writing raw Type 1 data:
|
||||||
|
|
||||||
|
read(path)
|
||||||
|
reads any Type 1 font file, returns the raw data and a type indicator:
|
||||||
|
'LWFN', 'PFB' or 'OTHER', depending on the format of the file pointed
|
||||||
|
to by 'path'.
|
||||||
|
Raises an error when the file does not contain valid Type 1 data.
|
||||||
|
|
||||||
|
write(path, data, kind = 'OTHER', dohex = 0)
|
||||||
|
writes raw Type 1 data to the file pointed to by 'path'.
|
||||||
|
'kind' can be one of 'LWFN', 'PFB' or 'OTHER'; it defaults to 'OTHER'.
|
||||||
|
'dohex' is a flag which determines whether the eexec encrypted
|
||||||
|
part should be written as hexadecimal or binary, but only if kind
|
||||||
|
is 'LWFN' or 'PFB'.
|
||||||
|
"""
|
||||||
|
|
||||||
|
__author__ = "jvr"
|
||||||
|
__version__ = "1.0b2"
|
||||||
|
DEBUG = 0
|
||||||
|
|
||||||
|
import eexec
|
||||||
|
import string
|
||||||
|
import re
|
||||||
|
import os
|
||||||
|
|
||||||
|
if os.name == 'mac':
|
||||||
|
import Res
|
||||||
|
import macfs
|
||||||
|
|
||||||
|
error = 't1Lib.error'
|
||||||
|
|
||||||
|
# work in progress
|
||||||
|
|
||||||
|
|
||||||
|
class T1Font:
|
||||||
|
|
||||||
|
"""Type 1 font class.
|
||||||
|
XXX This is work in progress! For now just use the read()
|
||||||
|
and write() functions as described above, they are stable.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, path=None):
|
||||||
|
if path is not None:
|
||||||
|
self.data, type = read(path)
|
||||||
|
else:
|
||||||
|
pass # XXX
|
||||||
|
|
||||||
|
def saveAs(self, path, type):
|
||||||
|
self.write(path, self.getData(), type)
|
||||||
|
|
||||||
|
def getData(self):
|
||||||
|
return self.data
|
||||||
|
|
||||||
|
def __getitem__(self, key):
|
||||||
|
if not hasattr(self, "font"):
|
||||||
|
self.parse()
|
||||||
|
return self.font[key]
|
||||||
|
else:
|
||||||
|
return self.font[key]
|
||||||
|
|
||||||
|
def parse(self):
|
||||||
|
import psLib
|
||||||
|
import psCharStrings
|
||||||
|
self.font = psLib.suckfont(self.data)
|
||||||
|
charStrings = self.font["CharStrings"]
|
||||||
|
lenIV = self.font["Private"].get("lenIV", 4)
|
||||||
|
assert lenIV >= 0
|
||||||
|
for glyphName, charString in charStrings.items():
|
||||||
|
charString, R = eexec.Decrypt(charString, 4330)
|
||||||
|
charStrings[glyphName] = psCharStrings.T1CharString(charString[lenIV:])
|
||||||
|
subrs = self.font["Private"]["Subrs"]
|
||||||
|
for i in range(len(subrs)):
|
||||||
|
charString, R = eexec.Decrypt(subrs[i], 4330)
|
||||||
|
subrs[i] = psCharStrings.T1CharString(charString[lenIV:])
|
||||||
|
del self.data
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# public functions
|
||||||
|
|
||||||
|
def read(path):
|
||||||
|
"""reads any Type 1 font file, returns raw data"""
|
||||||
|
normpath = string.lower(path)
|
||||||
|
if os.name == 'mac':
|
||||||
|
fss = macfs.FSSpec(path)
|
||||||
|
creator, type = fss.GetCreatorType()
|
||||||
|
if type == 'LWFN':
|
||||||
|
return readlwfn(path), 'LWFN'
|
||||||
|
if normpath[-4:] == '.pfb':
|
||||||
|
return readpfb(path), 'PFB'
|
||||||
|
else:
|
||||||
|
return readother(path), 'OTHER'
|
||||||
|
|
||||||
|
def write(path, data, kind='OTHER', dohex=0):
|
||||||
|
asserttype1(data)
|
||||||
|
kind = string.upper(kind)
|
||||||
|
try:
|
||||||
|
os.remove(path)
|
||||||
|
except os.error:
|
||||||
|
pass
|
||||||
|
err = 1
|
||||||
|
try:
|
||||||
|
if kind == 'LWFN':
|
||||||
|
writelwfn(path, data)
|
||||||
|
elif kind == 'PFB':
|
||||||
|
writepfb(path, data)
|
||||||
|
else:
|
||||||
|
writeother(path, data, dohex)
|
||||||
|
err = 0
|
||||||
|
finally:
|
||||||
|
if err and not DEBUG:
|
||||||
|
try:
|
||||||
|
os.remove(path)
|
||||||
|
except os.error:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
# -- internal --
|
||||||
|
|
||||||
|
LWFNCHUNKSIZE = 2000
|
||||||
|
HEXLINELENGTH = 80
|
||||||
|
|
||||||
|
|
||||||
|
def readlwfn(path):
|
||||||
|
"""reads an LWFN font file, returns raw data"""
|
||||||
|
resref = Res.OpenResFile(path)
|
||||||
|
try:
|
||||||
|
Res.UseResFile(resref)
|
||||||
|
n = Res.Count1Resources('POST')
|
||||||
|
data = []
|
||||||
|
for i in range(501, 501 + n):
|
||||||
|
res = Res.Get1Resource('POST', i)
|
||||||
|
code = ord(res.data[0])
|
||||||
|
if ord(res.data[1]) <> 0:
|
||||||
|
raise error, 'corrupt LWFN file'
|
||||||
|
if code in [1, 2]:
|
||||||
|
data.append(res.data[2:])
|
||||||
|
elif code in [3, 5]:
|
||||||
|
break
|
||||||
|
elif code == 4:
|
||||||
|
f = open(path, "rb")
|
||||||
|
data.append(f.read())
|
||||||
|
f.close()
|
||||||
|
elif code == 0:
|
||||||
|
pass # comment, ignore
|
||||||
|
else:
|
||||||
|
raise error, 'bad chunk code: ' + `code`
|
||||||
|
finally:
|
||||||
|
Res.CloseResFile(resref)
|
||||||
|
data = string.join(data, '')
|
||||||
|
asserttype1(data)
|
||||||
|
return data
|
||||||
|
|
||||||
|
def readpfb(path):
|
||||||
|
"""reads a PFB font file, returns raw data"""
|
||||||
|
f = open(path, "rb")
|
||||||
|
data = []
|
||||||
|
while 1:
|
||||||
|
if f.read(1) <> chr(128):
|
||||||
|
raise error, 'corrupt PFB file'
|
||||||
|
code = ord(f.read(1))
|
||||||
|
if code in [1, 2]:
|
||||||
|
chunklen = string2long(f.read(4))
|
||||||
|
data.append(f.read(chunklen))
|
||||||
|
elif code == 3:
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
raise error, 'bad chunk code: ' + `code`
|
||||||
|
f.close()
|
||||||
|
data = string.join(data, '')
|
||||||
|
asserttype1(data)
|
||||||
|
return data
|
||||||
|
|
||||||
|
def readother(path):
|
||||||
|
"""reads any (font) file, returns raw data"""
|
||||||
|
f = open(path, "rb")
|
||||||
|
data = f.read()
|
||||||
|
f.close()
|
||||||
|
asserttype1(data)
|
||||||
|
|
||||||
|
chunks = findencryptedchunks(data)
|
||||||
|
data = []
|
||||||
|
for isencrypted, chunk in chunks:
|
||||||
|
if isencrypted and ishex(chunk[:4]):
|
||||||
|
data.append(dehexstring(chunk))
|
||||||
|
else:
|
||||||
|
data.append(chunk)
|
||||||
|
return string.join(data, '')
|
||||||
|
|
||||||
|
# file writing tools
|
||||||
|
|
||||||
|
def writelwfn(path, data):
|
||||||
|
Res.CreateResFile(path)
|
||||||
|
fss = macfs.FSSpec(path)
|
||||||
|
fss.SetCreatorType('just', 'LWFN')
|
||||||
|
resref = Res.OpenResFile(path)
|
||||||
|
try:
|
||||||
|
Res.UseResFile(resref)
|
||||||
|
resID = 501
|
||||||
|
chunks = findencryptedchunks(data)
|
||||||
|
for isencrypted, chunk in chunks:
|
||||||
|
if isencrypted:
|
||||||
|
code = 2
|
||||||
|
else:
|
||||||
|
code = 1
|
||||||
|
while chunk:
|
||||||
|
res = Res.Resource(chr(code) + '\0' + chunk[:LWFNCHUNKSIZE - 2])
|
||||||
|
res.AddResource('POST', resID, '')
|
||||||
|
chunk = chunk[LWFNCHUNKSIZE - 2:]
|
||||||
|
resID = resID + 1
|
||||||
|
res = Res.Resource(chr(5) + '\0')
|
||||||
|
res.AddResource('POST', resID, '')
|
||||||
|
finally:
|
||||||
|
Res.CloseResFile(resref)
|
||||||
|
|
||||||
|
def writepfb(path, data):
|
||||||
|
chunks = findencryptedchunks(data)
|
||||||
|
f = open(dstpath, "wb")
|
||||||
|
try:
|
||||||
|
for isencrypted, chunk in chunks:
|
||||||
|
if isencrypted:
|
||||||
|
code = 2
|
||||||
|
else:
|
||||||
|
code = 1
|
||||||
|
f.write(chr(128) + chr(code))
|
||||||
|
f.write(long2string(len(chunk)))
|
||||||
|
f.write(chunk)
|
||||||
|
f.write(chr(128) + chr(3))
|
||||||
|
finally:
|
||||||
|
f.close()
|
||||||
|
if os.name == 'mac':
|
||||||
|
fss = macfs.FSSpec(dstpath)
|
||||||
|
fss.SetCreatorType('mdos', 'BINA')
|
||||||
|
|
||||||
|
def writeother(path, data, dohex = 0):
|
||||||
|
chunks = findencryptedchunks(data)
|
||||||
|
f = open(path, "wb")
|
||||||
|
try:
|
||||||
|
hexlinelen = HEXLINELENGTH / 2
|
||||||
|
for isencrypted, chunk in chunks:
|
||||||
|
if isencrypted:
|
||||||
|
code = 2
|
||||||
|
else:
|
||||||
|
code = 1
|
||||||
|
if code == 2 and dohex:
|
||||||
|
while chunk:
|
||||||
|
f.write(eexec.hexstring(chunk[:hexlinelen]))
|
||||||
|
f.write('\r')
|
||||||
|
chunk = chunk[hexlinelen:]
|
||||||
|
else:
|
||||||
|
f.write(chunk)
|
||||||
|
finally:
|
||||||
|
f.close()
|
||||||
|
if os.name == 'mac':
|
||||||
|
fss = macfs.FSSpec(path)
|
||||||
|
fss.SetCreatorType('R*ch', 'TEXT') # BBEdit text file
|
||||||
|
|
||||||
|
|
||||||
|
# decryption tools
|
||||||
|
|
||||||
|
EEXECBEGIN = "currentfile eexec"
|
||||||
|
EEXECEND = '0' * 64
|
||||||
|
EEXECINTERNALEND = "currentfile closefile"
|
||||||
|
EEXECBEGINMARKER = "%-- eexec start\r"
|
||||||
|
EEXECENDMARKER = "%-- eexec end\r"
|
||||||
|
|
||||||
|
_ishexRE = re.compile('[0-9A-Fa-f]*$')
|
||||||
|
|
||||||
|
def ishex(text):
|
||||||
|
return _ishexRE.match(text) is not None
|
||||||
|
|
||||||
|
|
||||||
|
def decrypttype1(data):
|
||||||
|
chunks = findencryptedchunks(data)
|
||||||
|
data = []
|
||||||
|
for isencrypted, chunk in chunks:
|
||||||
|
if isencrypted:
|
||||||
|
if ishex(chunk[:4]):
|
||||||
|
chunk = dehexstring(chunk)
|
||||||
|
decrypted, R = eexec.Decrypt(chunk, 55665)
|
||||||
|
decrypted = decrypted[4:]
|
||||||
|
if decrypted[-len(EEXECINTERNALEND)-1:-1] <> EEXECINTERNALEND \
|
||||||
|
and decrypted[-len(EEXECINTERNALEND)-2:-2] <> EEXECINTERNALEND:
|
||||||
|
raise error, "invalid end of eexec part"
|
||||||
|
decrypted = decrypted[:-len(EEXECINTERNALEND)-2] + '\r'
|
||||||
|
data.append(EEXECBEGINMARKER + decrypted + EEXECENDMARKER)
|
||||||
|
else:
|
||||||
|
if chunk[-len(EEXECBEGIN)-1:-1] == EEXECBEGIN:
|
||||||
|
data.append(chunk[:-len(EEXECBEGIN)-1])
|
||||||
|
else:
|
||||||
|
data.append(chunk)
|
||||||
|
return string.join(data, '')
|
||||||
|
|
||||||
|
def findencryptedchunks(data):
|
||||||
|
chunks = []
|
||||||
|
while 1:
|
||||||
|
ebegin = string.find(data, EEXECBEGIN)
|
||||||
|
if ebegin < 0:
|
||||||
|
break
|
||||||
|
eend = string.find(data, EEXECEND, ebegin)
|
||||||
|
if eend < 0:
|
||||||
|
raise error, "can't find end of eexec part"
|
||||||
|
chunks.append((0, data[:ebegin + len(EEXECBEGIN) + 1]))
|
||||||
|
chunks.append((1, data[ebegin + len(EEXECBEGIN) + 1:eend]))
|
||||||
|
data = data[eend:]
|
||||||
|
chunks.append((0, data))
|
||||||
|
return chunks
|
||||||
|
|
||||||
|
def dehexstring(hexstring):
|
||||||
|
return eexec.dehexstring(string.join(string.split(hexstring), ""))
|
||||||
|
|
||||||
|
|
||||||
|
# Type 1 assertion
|
||||||
|
|
||||||
|
_fontType1RE = re.compile(r"/FontType\s+1\s+def")
|
||||||
|
|
||||||
|
def asserttype1(data):
|
||||||
|
for head in ['%!PS-AdobeFont', '%!FontType1-1.0']:
|
||||||
|
if data[:len(head)] == head:
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
raise error, "not a PostScript font"
|
||||||
|
if not _fontType1RE.search(data):
|
||||||
|
raise error, "not a Type 1 font"
|
||||||
|
if string.find(data, "currentfile eexec") < 0:
|
||||||
|
raise error, "not an encrypted Type 1 font"
|
||||||
|
# XXX what else?
|
||||||
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
# pfb helpers
|
||||||
|
|
||||||
|
def long2string(long):
|
||||||
|
str = ""
|
||||||
|
for i in range(4):
|
||||||
|
str = str + chr((long & (0xff << (i * 8))) >> i * 8)
|
||||||
|
return str
|
||||||
|
|
||||||
|
def string2long(str):
|
||||||
|
if len(str) <> 4:
|
||||||
|
raise ValueError, 'string must be 4 bytes long'
|
||||||
|
long = 0
|
||||||
|
for i in range(4):
|
||||||
|
long = long + (ord(str[i]) << (i * 8))
|
||||||
|
return long
|
555
Lib/fontTools/ttLib/__init__.py
Normal file
555
Lib/fontTools/ttLib/__init__.py
Normal file
@ -0,0 +1,555 @@
|
|||||||
|
"""ttLib -- a package for dealing with TrueType fonts.
|
||||||
|
|
||||||
|
This package offers translators to convert TrueType fonts to Python
|
||||||
|
objects and vice versa, and additionally from Python to XML and vice versa.
|
||||||
|
|
||||||
|
Example interactive session:
|
||||||
|
|
||||||
|
Python 1.5.2c1 (#43, Mar 9 1999, 13:06:43) [CW PPC w/GUSI w/MSL]
|
||||||
|
Copyright 1991-1995 Stichting Mathematisch Centrum, Amsterdam
|
||||||
|
>>> from fontTools import ttLib
|
||||||
|
>>> tt = ttLib.TTFont("afont.ttf")
|
||||||
|
>>> tt['maxp'].numGlyphs
|
||||||
|
242
|
||||||
|
>>> tt['OS/2'].achVendID
|
||||||
|
'B&H\000'
|
||||||
|
>>> tt['head'].unitsPerEm
|
||||||
|
2048
|
||||||
|
>>> tt.saveXML("afont.xml")
|
||||||
|
Dumping 'LTSH' table...
|
||||||
|
Dumping 'OS/2' table...
|
||||||
|
Dumping 'VDMX' table...
|
||||||
|
Dumping 'cmap' table...
|
||||||
|
Dumping 'cvt ' table...
|
||||||
|
Dumping 'fpgm' table...
|
||||||
|
Dumping 'glyf' table...
|
||||||
|
Dumping 'hdmx' table...
|
||||||
|
Dumping 'head' table...
|
||||||
|
Dumping 'hhea' table...
|
||||||
|
Dumping 'hmtx' table...
|
||||||
|
Dumping 'loca' table...
|
||||||
|
Dumping 'maxp' table...
|
||||||
|
Dumping 'name' table...
|
||||||
|
Dumping 'post' table...
|
||||||
|
Dumping 'prep' table...
|
||||||
|
>>> tt2 = ttLib.TTFont()
|
||||||
|
>>> tt2.importXML("afont.xml")
|
||||||
|
>>> tt2['maxp'].numGlyphs
|
||||||
|
242
|
||||||
|
>>>
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
__author__ = "Just van Rossum, just@letterror.com"
|
||||||
|
__version__ = "1.0a5"
|
||||||
|
|
||||||
|
|
||||||
|
import os
|
||||||
|
import stat
|
||||||
|
import types
|
||||||
|
|
||||||
|
class TTLibError(Exception): pass
|
||||||
|
|
||||||
|
|
||||||
|
class TTFont:
|
||||||
|
|
||||||
|
"""The main font object. It manages file input and output, and offers
|
||||||
|
a convenient way of accessing tables.
|
||||||
|
Tables will be only decompiled when neccesary, ie. when they're actually
|
||||||
|
accessed. This means that simple operations can be extremely fast.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, file=None, res_name_or_index=None,
|
||||||
|
sfntVersion="\000\001\000\000", checkchecksums=0, verbose=0):
|
||||||
|
|
||||||
|
"""The constructor can be called with a few different arguments.
|
||||||
|
When reading a font from disk, 'file' should be either a pathname
|
||||||
|
pointing to a file, or a readable file object.
|
||||||
|
|
||||||
|
It we're running on a Macintosh, 'res_name_or_index' maybe an sfnt
|
||||||
|
resource name or an sfnt resource index number or zero. The latter
|
||||||
|
case will cause TTLib to autodetect whether the file is a flat file
|
||||||
|
or a suitcase. (If it's a suitcase, only the first 'sfnt' resource
|
||||||
|
will be read!)
|
||||||
|
|
||||||
|
The 'checkchecksums' argument is used to specify how sfnt
|
||||||
|
checksums are treated upon reading a file from disk:
|
||||||
|
0: don't check (default)
|
||||||
|
1: check, print warnings if a wrong checksum is found (default)
|
||||||
|
2: check, raise an exception if a wrong checksum is found.
|
||||||
|
|
||||||
|
The TTFont constructor can also be called without a 'file'
|
||||||
|
argument: this is the way to create a new empty font.
|
||||||
|
In this case you can optionally supply the 'sfntVersion' argument.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sfnt
|
||||||
|
self.verbose = verbose
|
||||||
|
self.tables = {}
|
||||||
|
self.reader = None
|
||||||
|
if not file:
|
||||||
|
self.sfntVersion = sfntVersion
|
||||||
|
return
|
||||||
|
if type(file) == types.StringType:
|
||||||
|
if os.name == "mac" and res_name_or_index is not None:
|
||||||
|
# on the mac, we deal with sfnt resources as well as flat files
|
||||||
|
import macUtils
|
||||||
|
if res_name_or_index == 0:
|
||||||
|
if macUtils.getSFNTResIndices(file):
|
||||||
|
# get the first available sfnt font.
|
||||||
|
file = macUtils.SFNTResourceReader(file, 1)
|
||||||
|
else:
|
||||||
|
file = open(file, "rb")
|
||||||
|
else:
|
||||||
|
file = macUtils.SFNTResourceReader(file, res_name_or_index)
|
||||||
|
else:
|
||||||
|
file = open(file, "rb")
|
||||||
|
else:
|
||||||
|
pass # assume "file" is a readable file object
|
||||||
|
self.reader = sfnt.SFNTReader(file, checkchecksums)
|
||||||
|
self.sfntVersion = self.reader.sfntVersion
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
"""If we still have a reader object, close it."""
|
||||||
|
if self.reader is not None:
|
||||||
|
self.reader.close()
|
||||||
|
|
||||||
|
def save(self, file, make_suitcase=0):
|
||||||
|
"""Save the font to disk. Similarly to the constructor,
|
||||||
|
the 'file' argument can be either a pathname or a writable
|
||||||
|
file object.
|
||||||
|
|
||||||
|
On the Mac, if make_suitcase is non-zero, a suitcase file will
|
||||||
|
we made instead of a flat .ttf file.
|
||||||
|
"""
|
||||||
|
import sfnt
|
||||||
|
if type(file) == types.StringType:
|
||||||
|
if os.name == "mac" and make_suitcase:
|
||||||
|
import macUtils
|
||||||
|
file = macUtils.SFNTResourceWriter(file, self)
|
||||||
|
else:
|
||||||
|
file = open(file, "wb")
|
||||||
|
if os.name == "mac":
|
||||||
|
import macfs
|
||||||
|
fss = macfs.FSSpec(file.name)
|
||||||
|
fss.SetCreatorType('mdos', 'BINA')
|
||||||
|
else:
|
||||||
|
pass # assume "file" is a writable file object
|
||||||
|
|
||||||
|
tags = self.keys()
|
||||||
|
numTables = len(tags)
|
||||||
|
writer = sfnt.SFNTWriter(file, numTables, self.sfntVersion)
|
||||||
|
|
||||||
|
done = []
|
||||||
|
for tag in tags:
|
||||||
|
self._writeTable(tag, writer, done)
|
||||||
|
|
||||||
|
writer.close()
|
||||||
|
|
||||||
|
def saveXML(self, file, progress=None, tables=None):
|
||||||
|
"""Export the font as an XML-based text file.
|
||||||
|
"""
|
||||||
|
import xmlWriter
|
||||||
|
writer = xmlWriter.XMLWriter(file)
|
||||||
|
writer.begintag("ttFont", sfntVersion=`self.sfntVersion`[1:-1],
|
||||||
|
ttlibVersion=__version__)
|
||||||
|
writer.newline()
|
||||||
|
writer.newline()
|
||||||
|
if not tables:
|
||||||
|
tables = self.keys()
|
||||||
|
numTables = len(tables)
|
||||||
|
numGlyphs = self['maxp'].numGlyphs
|
||||||
|
if progress:
|
||||||
|
progress.set(0, numTables * numGlyphs)
|
||||||
|
for i in range(numTables):
|
||||||
|
tag = tables[i]
|
||||||
|
table = self[tag]
|
||||||
|
report = "Dumping '%s' table..." % tag
|
||||||
|
if progress:
|
||||||
|
progress.setlabel(report)
|
||||||
|
elif self.verbose:
|
||||||
|
debugmsg(report)
|
||||||
|
else:
|
||||||
|
print report
|
||||||
|
xmltag = tag2xmltag(tag)
|
||||||
|
writer.begintag(xmltag)
|
||||||
|
writer.newline()
|
||||||
|
if tag == "glyf":
|
||||||
|
table.toXML(writer, self, progress)
|
||||||
|
elif tag == "CFF ":
|
||||||
|
table.toXML(writer, self, progress)
|
||||||
|
else:
|
||||||
|
table.toXML(writer, self)
|
||||||
|
writer.endtag(xmltag)
|
||||||
|
writer.newline()
|
||||||
|
writer.newline()
|
||||||
|
if progress:
|
||||||
|
progress.set(i * numGlyphs, numTables * numGlyphs)
|
||||||
|
writer.endtag("ttFont")
|
||||||
|
writer.newline()
|
||||||
|
writer.close()
|
||||||
|
if self.verbose:
|
||||||
|
debugmsg("Done dumping XML")
|
||||||
|
|
||||||
|
def importXML(self, file, progress=None):
|
||||||
|
"""Import an XML-based text file, so as to recreate
|
||||||
|
a font object.
|
||||||
|
"""
|
||||||
|
if self.tables:
|
||||||
|
raise error, "Can't import XML into existing font."
|
||||||
|
import xmlImport
|
||||||
|
from xml.parsers.xmlproc import xmlproc
|
||||||
|
builder = xmlImport.XMLApplication(self, progress)
|
||||||
|
if progress:
|
||||||
|
progress.set(0, os.stat(file)[stat.ST_SIZE] / 100 or 1)
|
||||||
|
proc = xmlImport.UnicodeProcessor()
|
||||||
|
proc.set_application(builder)
|
||||||
|
proc.set_error_handler(xmlImport.XMLErrorHandler(proc))
|
||||||
|
dir, filename = os.path.split(file)
|
||||||
|
if dir:
|
||||||
|
olddir = os.getcwd()
|
||||||
|
os.chdir(dir)
|
||||||
|
try:
|
||||||
|
proc.parse_resource(filename)
|
||||||
|
root = builder.root
|
||||||
|
finally:
|
||||||
|
if dir:
|
||||||
|
os.chdir(olddir)
|
||||||
|
# remove circular references
|
||||||
|
proc.deref()
|
||||||
|
del builder.progress
|
||||||
|
|
||||||
|
def isLoaded(self, tag):
|
||||||
|
"""Return true if the table identified by 'tag' has been
|
||||||
|
decompiled and loaded into memory."""
|
||||||
|
return self.tables.has_key(tag)
|
||||||
|
|
||||||
|
def has_key(self, tag):
|
||||||
|
"""Pretend we're a dictionary."""
|
||||||
|
if self.isLoaded(tag):
|
||||||
|
return 1
|
||||||
|
elif self.reader and self.reader.has_key(tag):
|
||||||
|
return 1
|
||||||
|
else:
|
||||||
|
return 0
|
||||||
|
|
||||||
|
def keys(self):
|
||||||
|
"""Pretend we're a dictionary."""
|
||||||
|
keys = self.tables.keys()
|
||||||
|
if self.reader:
|
||||||
|
for key in self.reader.keys():
|
||||||
|
if key not in keys:
|
||||||
|
keys.append(key)
|
||||||
|
keys.sort()
|
||||||
|
return keys
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
"""Pretend we're a dictionary."""
|
||||||
|
return len(self.keys())
|
||||||
|
|
||||||
|
def __getitem__(self, tag):
|
||||||
|
"""Pretend we're a dictionary."""
|
||||||
|
try:
|
||||||
|
return self.tables[tag]
|
||||||
|
except KeyError:
|
||||||
|
if self.reader is not None:
|
||||||
|
if self.verbose:
|
||||||
|
debugmsg("reading '%s' table from disk" % tag)
|
||||||
|
data = self.reader[tag]
|
||||||
|
tableclass = getTableClass(tag)
|
||||||
|
table = tableclass(tag)
|
||||||
|
self.tables[tag] = table
|
||||||
|
if self.verbose:
|
||||||
|
debugmsg("decompiling '%s' table" % tag)
|
||||||
|
table.decompile(data, self)
|
||||||
|
return table
|
||||||
|
else:
|
||||||
|
raise KeyError, "'%s' table not found" % tag
|
||||||
|
|
||||||
|
def __setitem__(self, tag, table):
|
||||||
|
"""Pretend we're a dictionary."""
|
||||||
|
self.tables[tag] = table
|
||||||
|
|
||||||
|
def __delitem__(self, tag):
|
||||||
|
"""Pretend we're a dictionary."""
|
||||||
|
del self.tables[tag]
|
||||||
|
|
||||||
|
def setGlyphOrder(self, glyphOrder):
|
||||||
|
self.glyphOrder = glyphOrder
|
||||||
|
if self.has_key('CFF '):
|
||||||
|
self['CFF '].setGlyphOrder(glyphOrder)
|
||||||
|
if self.has_key('glyf'):
|
||||||
|
self['glyf'].setGlyphOrder(glyphOrder)
|
||||||
|
|
||||||
|
def getGlyphOrder(self):
|
||||||
|
if not hasattr(self, "glyphOrder"):
|
||||||
|
if self.has_key('CFF '):
|
||||||
|
# CFF OpenType font
|
||||||
|
self.glyphOrder = self['CFF '].getGlyphOrder()
|
||||||
|
else:
|
||||||
|
# TrueType font
|
||||||
|
glyphOrder = self['post'].getGlyphOrder()
|
||||||
|
if glyphOrder is None:
|
||||||
|
#
|
||||||
|
# No names found in the 'post' table.
|
||||||
|
# Try to create glyph names from the unicode cmap (if available)
|
||||||
|
# in combination with the Adobe Glyph List (AGL).
|
||||||
|
#
|
||||||
|
self._getGlyphNamesFromCmap()
|
||||||
|
else:
|
||||||
|
self.glyphOrder = glyphOrder
|
||||||
|
# XXX what if a font contains 'glyf'/'post' table *and* CFF?
|
||||||
|
return self.glyphOrder
|
||||||
|
|
||||||
|
def _getGlyphNamesFromCmap(self):
|
||||||
|
# Make up glyph names based on glyphID, which will be used
|
||||||
|
# in case we don't find a unicode cmap.
|
||||||
|
numGlyphs = int(self['maxp'].numGlyphs)
|
||||||
|
glyphOrder = [None] * numGlyphs
|
||||||
|
glyphOrder[0] = ".notdef"
|
||||||
|
for i in range(1, numGlyphs):
|
||||||
|
glyphOrder[i] = "glyph%.5d" % i
|
||||||
|
# Set the glyph order, so the cmap parser has something
|
||||||
|
# to work with
|
||||||
|
self.glyphOrder = glyphOrder
|
||||||
|
# Get the temporary cmap (based on the just invented names)
|
||||||
|
tempcmap = self['cmap'].getcmap(3, 1)
|
||||||
|
if tempcmap is not None:
|
||||||
|
# we have a unicode cmap
|
||||||
|
import agl, string
|
||||||
|
cmap = tempcmap.cmap
|
||||||
|
# create a reverse cmap dict
|
||||||
|
reversecmap = {}
|
||||||
|
for unicode, name in cmap.items():
|
||||||
|
reversecmap[name] = unicode
|
||||||
|
assert len(reversecmap) == len(cmap)
|
||||||
|
for i in range(numGlyphs):
|
||||||
|
tempName = glyphOrder[i]
|
||||||
|
if reversecmap.has_key(tempName):
|
||||||
|
unicode = reversecmap[tempName]
|
||||||
|
if agl.UV2AGL.has_key(unicode):
|
||||||
|
# get name from the Adobe Glyph List
|
||||||
|
glyphOrder[i] = agl.UV2AGL[unicode]
|
||||||
|
else:
|
||||||
|
# create uni<CODE> name
|
||||||
|
glyphOrder[i] = "uni" + string.upper(string.zfill(hex(unicode)[2:], 4))
|
||||||
|
# Delete the cmap table from the cache, so it can be
|
||||||
|
# parsed again with the right names.
|
||||||
|
del self.tables['cmap']
|
||||||
|
else:
|
||||||
|
pass # no unicode cmap available, stick with the invented names
|
||||||
|
self.glyphOrder = glyphOrder
|
||||||
|
|
||||||
|
def getGlyphNames(self):
|
||||||
|
"""Get a list of glyph names, sorted alphabetically."""
|
||||||
|
glyphNames = self.getGlyphOrder()[:]
|
||||||
|
glyphNames.sort()
|
||||||
|
return glyphNames
|
||||||
|
|
||||||
|
def getGlyphNames2(self):
|
||||||
|
"""Get a list of glyph names, sorted alphabetically, but not case sensitive."""
|
||||||
|
from fontTools.misc import textTools
|
||||||
|
return textTools.caselessSort(self.getGlyphOrder())
|
||||||
|
|
||||||
|
def getGlyphName(self, glyphID):
|
||||||
|
return self.getGlyphOrder()[glyphID]
|
||||||
|
|
||||||
|
def getGlyphID(self, glyphName):
|
||||||
|
if not hasattr(self, "_reverseGlyphOrderDict"):
|
||||||
|
self._buildReverseGlyphOrderDict()
|
||||||
|
glyphOrder = self.getGlyphOrder()
|
||||||
|
d = self._reverseGlyphOrderDict
|
||||||
|
if not d.has_key(glyphName):
|
||||||
|
if glyphName in glyphOrder:
|
||||||
|
self._buildReverseGlyphOrderDict()
|
||||||
|
return self.getGlyphID(glyphName)
|
||||||
|
else:
|
||||||
|
raise KeyError, glyphName
|
||||||
|
glyphID = d[glyphName]
|
||||||
|
if glyphName <> glyphOrder[glyphID]:
|
||||||
|
self._buildReverseGlyphOrderDict()
|
||||||
|
return self.getGlyphID(glyphName)
|
||||||
|
return glyphID
|
||||||
|
|
||||||
|
def _buildReverseGlyphOrderDict(self):
|
||||||
|
self._reverseGlyphOrderDict = d = {}
|
||||||
|
glyphOrder = self.getGlyphOrder()
|
||||||
|
for glyphID in range(len(glyphOrder)):
|
||||||
|
d[glyphOrder[glyphID]] = glyphID
|
||||||
|
|
||||||
|
def _writeTable(self, tag, writer, done):
|
||||||
|
"""Internal helper function for self.save(). Keeps track of
|
||||||
|
inter-table dependencies.
|
||||||
|
"""
|
||||||
|
if tag in done:
|
||||||
|
return
|
||||||
|
tableclass = getTableClass(tag)
|
||||||
|
for masterTable in tableclass.dependencies:
|
||||||
|
if masterTable not in done:
|
||||||
|
if self.has_key(masterTable):
|
||||||
|
self._writeTable(masterTable, writer, done)
|
||||||
|
else:
|
||||||
|
done.append(masterTable)
|
||||||
|
tabledata = self._getTableData(tag)
|
||||||
|
if self.verbose:
|
||||||
|
debugmsg("writing '%s' table to disk" % tag)
|
||||||
|
writer[tag] = tabledata
|
||||||
|
done.append(tag)
|
||||||
|
|
||||||
|
def _getTableData(self, tag):
|
||||||
|
"""Internal helper function. Returns raw table data,
|
||||||
|
whether compiled or directly read from disk.
|
||||||
|
"""
|
||||||
|
if self.isLoaded(tag):
|
||||||
|
if self.verbose:
|
||||||
|
debugmsg("compiling '%s' table" % tag)
|
||||||
|
return self.tables[tag].compile(self)
|
||||||
|
elif self.reader and self.reader.has_key(tag):
|
||||||
|
if self.verbose:
|
||||||
|
debugmsg("reading '%s' table from disk" % tag)
|
||||||
|
return self.reader[tag]
|
||||||
|
else:
|
||||||
|
raise KeyError, tag
|
||||||
|
|
||||||
|
|
||||||
|
def _test_endianness():
|
||||||
|
"""Test the endianness of the machine. This is crucial to know
|
||||||
|
since TrueType data is always big endian, even on little endian
|
||||||
|
machines. There are quite a few situations where we explicitly
|
||||||
|
need to swap some bytes.
|
||||||
|
"""
|
||||||
|
import struct
|
||||||
|
data = struct.pack("h", 0x01)
|
||||||
|
if data == "\000\001":
|
||||||
|
return "big"
|
||||||
|
elif data == "\001\000":
|
||||||
|
return "little"
|
||||||
|
else:
|
||||||
|
assert 0, "endian confusion!"
|
||||||
|
|
||||||
|
endian = _test_endianness()
|
||||||
|
|
||||||
|
|
||||||
|
def getTableModule(tag):
|
||||||
|
"""Fetch the packer/unpacker module for a table.
|
||||||
|
Return None when no module is found.
|
||||||
|
"""
|
||||||
|
import imp
|
||||||
|
import tables
|
||||||
|
py_tag = tag2identifier(tag)
|
||||||
|
try:
|
||||||
|
f, path, kind = imp.find_module(py_tag, tables.__path__)
|
||||||
|
if f:
|
||||||
|
f.close()
|
||||||
|
except ImportError:
|
||||||
|
return None
|
||||||
|
else:
|
||||||
|
module = __import__("fontTools.ttLib.tables." + py_tag)
|
||||||
|
return getattr(tables, py_tag)
|
||||||
|
|
||||||
|
|
||||||
|
def getTableClass(tag):
|
||||||
|
"""Fetch the packer/unpacker class for a table.
|
||||||
|
Return None when no class is found.
|
||||||
|
"""
|
||||||
|
module = getTableModule(tag)
|
||||||
|
if module is None:
|
||||||
|
from tables.DefaultTable import DefaultTable
|
||||||
|
return DefaultTable
|
||||||
|
py_tag = tag2identifier(tag)
|
||||||
|
tableclass = getattr(module, "table_" + py_tag)
|
||||||
|
return tableclass
|
||||||
|
|
||||||
|
|
||||||
|
def newtable(tag):
|
||||||
|
"""Return a new instance of a table."""
|
||||||
|
tableclass = getTableClass(tag)
|
||||||
|
return tableclass(tag)
|
||||||
|
|
||||||
|
|
||||||
|
def _escapechar(c):
|
||||||
|
"""Helper function for tag2identifier()"""
|
||||||
|
import re
|
||||||
|
if re.match("[a-z0-9]", c):
|
||||||
|
return "_" + c
|
||||||
|
elif re.match("[A-Z]", c):
|
||||||
|
return c + "_"
|
||||||
|
else:
|
||||||
|
return hex(ord(c))[2:]
|
||||||
|
|
||||||
|
|
||||||
|
def tag2identifier(tag):
|
||||||
|
"""Convert a table tag to a valid (but UGLY) python identifier,
|
||||||
|
as well as a filename that's guaranteed to be unique even on a
|
||||||
|
caseless file system. Each character is mapped to two characters.
|
||||||
|
Lowercase letters get an underscore before the letter, uppercase
|
||||||
|
letters get an underscore after the letter. Trailing spaces are
|
||||||
|
trimmed. Illegal characters are escaped as two hex bytes. If the
|
||||||
|
result starts with a number (as the result of a hex escape), an
|
||||||
|
extra underscore is prepended. Examples:
|
||||||
|
'glyf' -> '_g_l_y_f'
|
||||||
|
'cvt ' -> '_c_v_t'
|
||||||
|
'OS/2' -> 'O_S_2f_2'
|
||||||
|
"""
|
||||||
|
import re
|
||||||
|
assert len(tag) == 4, "tag should be 4 characters long"
|
||||||
|
while len(tag) > 1 and tag[-1] == ' ':
|
||||||
|
tag = tag[:-1]
|
||||||
|
ident = ""
|
||||||
|
for c in tag:
|
||||||
|
ident = ident + _escapechar(c)
|
||||||
|
if re.match("[0-9]", ident):
|
||||||
|
ident = "_" + ident
|
||||||
|
return ident
|
||||||
|
|
||||||
|
|
||||||
|
def identifier2tag(ident):
|
||||||
|
"""the opposite of tag2identifier()"""
|
||||||
|
import string
|
||||||
|
if len(ident) % 2 and ident[0] == "_":
|
||||||
|
ident = ident[1:]
|
||||||
|
assert not (len(ident) % 2)
|
||||||
|
tag = ""
|
||||||
|
for i in range(0, len(ident), 2):
|
||||||
|
if ident[i] == "_":
|
||||||
|
tag = tag + ident[i+1]
|
||||||
|
elif ident[i+1] == "_":
|
||||||
|
tag = tag + ident[i]
|
||||||
|
else:
|
||||||
|
# assume hex
|
||||||
|
tag = tag + chr(string.atoi(ident[i:i+2], 16))
|
||||||
|
# append trailing spaces
|
||||||
|
tag = tag + (4 - len(tag)) * ' '
|
||||||
|
return tag
|
||||||
|
|
||||||
|
|
||||||
|
def tag2xmltag(tag):
|
||||||
|
"""Similarly to tag2identifier(), this converts a TT tag
|
||||||
|
to a valid XML element name. Since XML element names are
|
||||||
|
case sensitive, this is a fairly simple/readable translation.
|
||||||
|
"""
|
||||||
|
import string, re
|
||||||
|
if tag == "OS/2":
|
||||||
|
return "OS_2"
|
||||||
|
if re.match("[A-Za-z_][A-Za-z_0-9]* *$", tag):
|
||||||
|
return string.strip(tag)
|
||||||
|
else:
|
||||||
|
return tag2identifier(tag)
|
||||||
|
|
||||||
|
|
||||||
|
def xmltag2tag(tag):
|
||||||
|
"""The opposite of tag2xmltag()"""
|
||||||
|
if tag == "OS_2":
|
||||||
|
return "OS/2"
|
||||||
|
if len(tag) == 8:
|
||||||
|
return identifier2tag(tag)
|
||||||
|
else:
|
||||||
|
return tag + " " * (4 - len(tag))
|
||||||
|
return tag
|
||||||
|
|
||||||
|
|
||||||
|
def debugmsg(msg):
|
||||||
|
import time
|
||||||
|
print msg + time.strftime(" (%H:%M:%S)", time.localtime(time.time()))
|
||||||
|
|
||||||
|
|
212
Lib/fontTools/ttLib/macUtils.py
Normal file
212
Lib/fontTools/ttLib/macUtils.py
Normal file
@ -0,0 +1,212 @@
|
|||||||
|
"""ttLib.macUtils.py -- Various Mac-specific stuff."""
|
||||||
|
|
||||||
|
|
||||||
|
import os
|
||||||
|
if os.name <> "mac":
|
||||||
|
raise ImportError, "This module is Mac-only!"
|
||||||
|
|
||||||
|
import Res, macfs
|
||||||
|
import cStringIO
|
||||||
|
|
||||||
|
|
||||||
|
def getSFNTResIndices(path):
|
||||||
|
"""Determine whether a file has a resource fork or not."""
|
||||||
|
fss = macfs.FSSpec(path)
|
||||||
|
try:
|
||||||
|
resref = Res.FSpOpenResFile(fss, 1) # read only
|
||||||
|
except Res.Error:
|
||||||
|
return []
|
||||||
|
Res.UseResFile(resref)
|
||||||
|
numSFNTs = Res.Count1Resources('sfnt')
|
||||||
|
Res.CloseResFile(resref)
|
||||||
|
return range(1, numSFNTs + 1)
|
||||||
|
|
||||||
|
|
||||||
|
def openTTFonts(path):
|
||||||
|
"""Given a pathname, return a list of TTFont objects. In the case
|
||||||
|
of a flat TTF/OTF file, the list will contain just one font object;
|
||||||
|
but in the case of a Mac font suitcase it will contain as many
|
||||||
|
font objects as there are sfnt resources in the file.
|
||||||
|
"""
|
||||||
|
from fontTools import ttLib
|
||||||
|
fonts = []
|
||||||
|
sfnts = getSFNTResIndices(path)
|
||||||
|
if not sfnts:
|
||||||
|
fonts.append(ttLib.TTFont(path))
|
||||||
|
else:
|
||||||
|
for index in sfnts:
|
||||||
|
fonts.append(ttLib.TTFont(path, index))
|
||||||
|
if not fonts:
|
||||||
|
raise ttLib.TTLibError, "no fonts found in file '%s'" % path
|
||||||
|
return fonts
|
||||||
|
|
||||||
|
|
||||||
|
class ProgressBar:
|
||||||
|
|
||||||
|
def __init__(self, title, maxval=100):
|
||||||
|
import EasyDialogs
|
||||||
|
self.bar = EasyDialogs.ProgressBar(title, maxval=maxval)
|
||||||
|
|
||||||
|
def set(self, val, maxval=None):
|
||||||
|
if maxval <> None:
|
||||||
|
self.bar.maxval = maxval
|
||||||
|
self.bar.set(val)
|
||||||
|
|
||||||
|
def increment(self, val=1):
|
||||||
|
self.bar.inc(val)
|
||||||
|
|
||||||
|
def setlabel(self, text):
|
||||||
|
self.bar.label(text)
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
self.bar.d.HideWindow()
|
||||||
|
del self.bar
|
||||||
|
|
||||||
|
|
||||||
|
class SFNTResourceReader:
|
||||||
|
|
||||||
|
"""Simple (Mac-only) read-only file wrapper for 'sfnt' resources."""
|
||||||
|
|
||||||
|
def __init__(self, path, res_name_or_index):
|
||||||
|
fss = macfs.FSSpec(path)
|
||||||
|
resref = Res.FSpOpenResFile(fss, 1) # read-only
|
||||||
|
Res.UseResFile(resref)
|
||||||
|
if type(res_name_or_index) == type(""):
|
||||||
|
res = Res.Get1NamedResource('sfnt', res_name_or_index)
|
||||||
|
else:
|
||||||
|
res = Res.Get1IndResource('sfnt', res_name_or_index)
|
||||||
|
self.file = cStringIO.StringIO(res.data)
|
||||||
|
Res.CloseResFile(resref)
|
||||||
|
self.name = path
|
||||||
|
|
||||||
|
def __getattr__(self, attr):
|
||||||
|
# cheap inheritance
|
||||||
|
return getattr(self.file, attr)
|
||||||
|
|
||||||
|
|
||||||
|
class SFNTResourceWriter:
|
||||||
|
|
||||||
|
"""Simple (Mac-only) file wrapper for 'sfnt' resources."""
|
||||||
|
|
||||||
|
def __init__(self, path, ttFont, res_id=None):
|
||||||
|
self.file = cStringIO.StringIO()
|
||||||
|
self.name = path
|
||||||
|
self.closed = 0
|
||||||
|
fullname = ttFont['name'].getname(4, 1, 0) # Full name, mac, default encoding
|
||||||
|
familyname = ttFont['name'].getname(1, 1, 0) # Fam. name, mac, default encoding
|
||||||
|
psname = ttFont['name'].getname(6, 1, 0) # PostScript name, etc.
|
||||||
|
if fullname is None or fullname is None or psname is None:
|
||||||
|
from fontTools import ttLib
|
||||||
|
raise ttLib.TTLibError, "can't make 'sfnt' resource, no Macintosh 'name' table found"
|
||||||
|
self.fullname = fullname.string
|
||||||
|
self.familyname = familyname.string
|
||||||
|
self.psname = psname.string
|
||||||
|
if self.familyname <> self.psname[:len(self.familyname)]:
|
||||||
|
# ugh. force fam name to be the same as first part of ps name,
|
||||||
|
# fondLib otherwise barfs.
|
||||||
|
for i in range(min(len(self.psname), len(self.familyname))):
|
||||||
|
if self.familyname[i] <> self.psname[i]:
|
||||||
|
break
|
||||||
|
self.familyname = self.psname[:i]
|
||||||
|
|
||||||
|
self.ttFont = ttFont
|
||||||
|
self.res_id = res_id
|
||||||
|
fss = macfs.FSSpec(self.name)
|
||||||
|
if os.path.exists(self.name):
|
||||||
|
os.remove(self.name)
|
||||||
|
Res.FSpCreateResFile(fss, 'DMOV', 'FFIL', 0)
|
||||||
|
self.resref = Res.FSpOpenResFile(fss, 3) # exclusive read/write permission
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
if self.closed:
|
||||||
|
return
|
||||||
|
Res.UseResFile(self.resref)
|
||||||
|
try:
|
||||||
|
res = Res.Get1NamedResource('sfnt', self.fullname)
|
||||||
|
except Res.Error:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
res.RemoveResource()
|
||||||
|
res = Res.Resource(self.file.getvalue())
|
||||||
|
if self.res_id is None:
|
||||||
|
self.res_id = Res.Unique1ID('sfnt')
|
||||||
|
res.AddResource('sfnt', self.res_id, self.fullname)
|
||||||
|
res.ChangedResource()
|
||||||
|
|
||||||
|
self.createFond()
|
||||||
|
del self.ttFont
|
||||||
|
Res.CloseResFile(self.resref)
|
||||||
|
self.file.close()
|
||||||
|
self.closed = 1
|
||||||
|
|
||||||
|
def createFond(self):
|
||||||
|
fond_res = Res.Resource("")
|
||||||
|
fond_res.AddResource('FOND', self.res_id, self.fullname)
|
||||||
|
|
||||||
|
from fontTools import fondLib
|
||||||
|
fond = fondLib.FontFamily(fond_res, "w")
|
||||||
|
|
||||||
|
fond.ffFirstChar = 0
|
||||||
|
fond.ffLastChar = 255
|
||||||
|
fond.fondClass = 0
|
||||||
|
fond.fontAssoc = [(0, 0, self.res_id)]
|
||||||
|
fond.ffFlags = 20480 # XXX ???
|
||||||
|
fond.ffIntl = (0, 0)
|
||||||
|
fond.ffLeading = 0
|
||||||
|
fond.ffProperty = (0, 0, 0, 0, 0, 0, 0, 0, 0)
|
||||||
|
fond.ffVersion = 0
|
||||||
|
fond.glyphEncoding = {}
|
||||||
|
if self.familyname == self.psname:
|
||||||
|
fond.styleIndices = (1,) * 48 # uh-oh, fondLib is too dumb.
|
||||||
|
else:
|
||||||
|
fond.styleIndices = (2,) * 48
|
||||||
|
fond.styleStrings = []
|
||||||
|
fond.boundingBoxes = None
|
||||||
|
fond.ffFamID = self.res_id
|
||||||
|
fond.changed = 1
|
||||||
|
fond.glyphTableOffset = 0
|
||||||
|
fond.styleMappingReserved = 0
|
||||||
|
|
||||||
|
# calc:
|
||||||
|
scale = 4096.0 / self.ttFont['head'].unitsPerEm
|
||||||
|
fond.ffAscent = scale * self.ttFont['hhea'].ascent
|
||||||
|
fond.ffDescent = scale * self.ttFont['hhea'].descent
|
||||||
|
fond.ffWidMax = scale * self.ttFont['hhea'].advanceWidthMax
|
||||||
|
|
||||||
|
fond.ffFamilyName = self.familyname
|
||||||
|
fond.psNames = {0: self.psname}
|
||||||
|
|
||||||
|
fond.widthTables = {}
|
||||||
|
fond.kernTables = {}
|
||||||
|
cmap = self.ttFont['cmap'].getcmap(1, 0)
|
||||||
|
if cmap:
|
||||||
|
names = {}
|
||||||
|
for code, name in cmap.cmap.items():
|
||||||
|
names[name] = code
|
||||||
|
if self.ttFont.has_key('kern'):
|
||||||
|
kern = self.ttFont['kern'].getkern(0)
|
||||||
|
if kern:
|
||||||
|
fondkerning = []
|
||||||
|
for (left, right), value in kern.kernTable.items():
|
||||||
|
if names.has_key(left) and names.has_key(right):
|
||||||
|
fondkerning.append((names[left], names[right], scale * value))
|
||||||
|
fondkerning.sort()
|
||||||
|
fond.kernTables = {0: fondkerning}
|
||||||
|
if self.ttFont.has_key('hmtx'):
|
||||||
|
hmtx = self.ttFont['hmtx']
|
||||||
|
fondwidths = [2048] * 256 + [0, 0] # default width, + plus two zeros.
|
||||||
|
for name, (width, lsb) in hmtx.metrics.items():
|
||||||
|
if names.has_key(name):
|
||||||
|
fondwidths[names[name]] = scale * width
|
||||||
|
fond.widthTables = {0: fondwidths}
|
||||||
|
fond.save()
|
||||||
|
|
||||||
|
def __del__(self):
|
||||||
|
if not self.closed:
|
||||||
|
self.close()
|
||||||
|
|
||||||
|
def __getattr__(self, attr):
|
||||||
|
# cheap inheritance
|
||||||
|
return getattr(self.file, attr)
|
||||||
|
|
||||||
|
|
230
Lib/fontTools/ttLib/sfnt.py
Normal file
230
Lib/fontTools/ttLib/sfnt.py
Normal file
@ -0,0 +1,230 @@
|
|||||||
|
"""ttLib/sfnt.py -- low-level module to deal with the sfnt file format.
|
||||||
|
|
||||||
|
Defines two public classes:
|
||||||
|
SFNTReader
|
||||||
|
SFNTWriter
|
||||||
|
|
||||||
|
(Normally you don't have to use these classes explicitly; they are
|
||||||
|
used automatically by ttLib.TTFont.)
|
||||||
|
|
||||||
|
The reading and writing of sfnt files is separated in two distinct
|
||||||
|
classes, since whenever to number of tables changes or whenever
|
||||||
|
a table's length chages you need to rewrite the whole file anyway.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import struct, sstruct
|
||||||
|
import Numeric
|
||||||
|
import os
|
||||||
|
|
||||||
|
class SFNTReader:
|
||||||
|
|
||||||
|
def __init__(self, file, checkchecksums=1):
|
||||||
|
self.file = file
|
||||||
|
self.checkchecksums = checkchecksums
|
||||||
|
data = self.file.read(sfntDirectorySize)
|
||||||
|
if len(data) <> sfntDirectorySize:
|
||||||
|
from fontTools import ttLib
|
||||||
|
raise ttLib.TTLibError, "Not a TrueType or OpenType font (not enough data)"
|
||||||
|
sstruct.unpack(sfntDirectoryFormat, data, self)
|
||||||
|
if self.sfntVersion not in ("\000\001\000\000", "OTTO", "true"):
|
||||||
|
from fontTools import ttLib
|
||||||
|
raise ttLib.TTLibError, "Not a TrueType or OpenType font (bad sfntVersion)"
|
||||||
|
self.tables = {}
|
||||||
|
for i in range(self.numTables):
|
||||||
|
entry = SFNTDirectoryEntry()
|
||||||
|
entry.fromfile(self.file)
|
||||||
|
self.tables[entry.tag] = entry
|
||||||
|
|
||||||
|
def has_key(self, tag):
|
||||||
|
return self.tables.has_key(tag)
|
||||||
|
|
||||||
|
def keys(self):
|
||||||
|
return self.tables.keys()
|
||||||
|
|
||||||
|
def __getitem__(self, tag):
|
||||||
|
"""Fetch the raw table data."""
|
||||||
|
entry = self.tables[tag]
|
||||||
|
self.file.seek(entry.offset)
|
||||||
|
data = self.file.read(entry.length)
|
||||||
|
if self.checkchecksums:
|
||||||
|
if tag == 'head':
|
||||||
|
# Beh: we have to special-case the 'head' table.
|
||||||
|
checksum = calcchecksum(data[:8] + '\0\0\0\0' + data[12:])
|
||||||
|
else:
|
||||||
|
checksum = calcchecksum(data)
|
||||||
|
if self.checkchecksums > 1:
|
||||||
|
# Be obnoxious, and barf when it's wrong
|
||||||
|
assert checksum == entry.checksum, "bad checksum for '%s' table" % tag
|
||||||
|
elif checksum <> entry.checkSum:
|
||||||
|
# Be friendly, and just print a warning.
|
||||||
|
print "bad checksum for '%s' table" % tag
|
||||||
|
return data
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
self.file.close()
|
||||||
|
|
||||||
|
|
||||||
|
class SFNTWriter:
|
||||||
|
|
||||||
|
def __init__(self, file, numTables, sfntVersion="\000\001\000\000"):
|
||||||
|
self.file = file
|
||||||
|
self.numTables = numTables
|
||||||
|
self.sfntVersion = sfntVersion
|
||||||
|
self.searchRange, self.entrySelector, self.rangeShift = getsearchrange(numTables)
|
||||||
|
self.nextTableOffset = sfntDirectorySize + numTables * sfntDirectoryEntrySize
|
||||||
|
# clear out directory area
|
||||||
|
self.file.seek(self.nextTableOffset)
|
||||||
|
# make sure we're actually where we want to be. (XXX old cStringIO bug)
|
||||||
|
self.file.write('\0' * (self.nextTableOffset - self.file.tell()))
|
||||||
|
self.tables = {}
|
||||||
|
|
||||||
|
def __setitem__(self, tag, data):
|
||||||
|
"""Write raw table data to disk."""
|
||||||
|
if self.tables.has_key(tag):
|
||||||
|
# We've written this table to file before. If the length
|
||||||
|
# of the data is still the same, we allow overwritng it.
|
||||||
|
entry = self.tables[tag]
|
||||||
|
if len(data) <> entry.length:
|
||||||
|
from fontTools import ttLib
|
||||||
|
raise ttLib.TTLibError, "cannot rewrite '%s' table: length does not match directory entry" % tag
|
||||||
|
else:
|
||||||
|
entry = SFNTDirectoryEntry()
|
||||||
|
entry.tag = tag
|
||||||
|
entry.offset = self.nextTableOffset
|
||||||
|
entry.length = len(data)
|
||||||
|
self.nextTableOffset = self.nextTableOffset + ((len(data) + 3) & ~3)
|
||||||
|
self.file.seek(entry.offset)
|
||||||
|
self.file.write(data)
|
||||||
|
self.file.seek(self.nextTableOffset)
|
||||||
|
# make sure we're actually where we want to be. (XXX old cStringIO bug)
|
||||||
|
self.file.write('\0' * (self.nextTableOffset - self.file.tell()))
|
||||||
|
|
||||||
|
if tag == 'head':
|
||||||
|
entry.checkSum = calcchecksum(data[:8] + '\0\0\0\0' + data[12:])
|
||||||
|
else:
|
||||||
|
entry.checkSum = calcchecksum(data)
|
||||||
|
self.tables[tag] = entry
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
"""All tables must have been written to disk. Now write the
|
||||||
|
directory.
|
||||||
|
"""
|
||||||
|
tables = self.tables.items()
|
||||||
|
tables.sort()
|
||||||
|
if len(tables) <> self.numTables:
|
||||||
|
from fontTools import ttLib
|
||||||
|
raise ttLib.TTLibError, "wrong number of tables; expected %d, found %d" % (self.numTables, len(tables))
|
||||||
|
|
||||||
|
directory = sstruct.pack(sfntDirectoryFormat, self)
|
||||||
|
|
||||||
|
self.file.seek(sfntDirectorySize)
|
||||||
|
for tag, entry in tables:
|
||||||
|
directory = directory + entry.tostring()
|
||||||
|
self.calcmasterchecksum(directory)
|
||||||
|
self.file.seek(0)
|
||||||
|
self.file.write(directory)
|
||||||
|
self.file.close()
|
||||||
|
|
||||||
|
def calcmasterchecksum(self, directory):
|
||||||
|
# calculate checkSumAdjustment
|
||||||
|
tags = self.tables.keys()
|
||||||
|
checksums = Numeric.zeros(len(tags)+1)
|
||||||
|
for i in range(len(tags)):
|
||||||
|
checksums[i] = self.tables[tags[i]].checkSum
|
||||||
|
|
||||||
|
directory_end = sfntDirectorySize + len(self.tables) * sfntDirectoryEntrySize
|
||||||
|
assert directory_end == len(directory)
|
||||||
|
|
||||||
|
checksums[-1] = calcchecksum(directory)
|
||||||
|
checksum = Numeric.add.reduce(checksums)
|
||||||
|
# BiboAfba!
|
||||||
|
checksumadjustment = Numeric.array(0xb1b0afba) - checksum
|
||||||
|
# write the checksum to the file
|
||||||
|
self.file.seek(self.tables['head'].offset + 8)
|
||||||
|
self.file.write(struct.pack("l", checksumadjustment))
|
||||||
|
|
||||||
|
|
||||||
|
# -- sfnt directory helpers and cruft
|
||||||
|
|
||||||
|
sfntDirectoryFormat = """
|
||||||
|
> # big endian
|
||||||
|
sfntVersion: 4s
|
||||||
|
numTables: H # number of tables
|
||||||
|
searchRange: H # (max2 <= numTables)*16
|
||||||
|
entrySelector: H # log2(max2 <= numTables)
|
||||||
|
rangeShift: H # numTables*16-searchRange
|
||||||
|
"""
|
||||||
|
|
||||||
|
sfntDirectorySize = sstruct.calcsize(sfntDirectoryFormat)
|
||||||
|
|
||||||
|
sfntDirectoryEntryFormat = """
|
||||||
|
> # big endian
|
||||||
|
tag: 4s
|
||||||
|
checkSum: l
|
||||||
|
offset: l
|
||||||
|
length: l
|
||||||
|
"""
|
||||||
|
|
||||||
|
sfntDirectoryEntrySize = sstruct.calcsize(sfntDirectoryEntryFormat)
|
||||||
|
|
||||||
|
class SFNTDirectoryEntry:
|
||||||
|
|
||||||
|
def fromfile(self, file):
|
||||||
|
sstruct.unpack(sfntDirectoryEntryFormat,
|
||||||
|
file.read(sfntDirectoryEntrySize), self)
|
||||||
|
|
||||||
|
def fromstring(self, str):
|
||||||
|
sstruct.unpack(sfntDirectoryEntryFormat, str, self)
|
||||||
|
|
||||||
|
def tostring(self):
|
||||||
|
return sstruct.pack(sfntDirectoryEntryFormat, self)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
if hasattr(self, "tag"):
|
||||||
|
return "<SFNTDirectoryEntry '%s' at %x>" % (self.tag, id(self))
|
||||||
|
else:
|
||||||
|
return "<SFNTDirectoryEntry at %x>" % id(self)
|
||||||
|
|
||||||
|
|
||||||
|
def calcchecksum(data, start=0):
|
||||||
|
"""Calculate the checksum for an arbitrary block of data.
|
||||||
|
Optionally takes a 'start' argument, which allows you to
|
||||||
|
calculate a checksum in chunks by feeding it a previous
|
||||||
|
result.
|
||||||
|
|
||||||
|
If the data length is not a multiple of four, it assumes
|
||||||
|
it is to be padded with null byte.
|
||||||
|
"""
|
||||||
|
from fontTools import ttLib
|
||||||
|
remainder = len(data) % 4
|
||||||
|
if remainder:
|
||||||
|
data = data + '\0' * (4-remainder)
|
||||||
|
a = Numeric.fromstring(struct.pack(">l", start) + data, Numeric.Int32)
|
||||||
|
if ttLib.endian <> "big":
|
||||||
|
a = a.byteswapped()
|
||||||
|
return Numeric.add.reduce(a)
|
||||||
|
|
||||||
|
|
||||||
|
def maxpoweroftwo(x):
|
||||||
|
"""Return the highest exponent of two, so that
|
||||||
|
(2 ** exponent) <= x
|
||||||
|
"""
|
||||||
|
exponent = 0
|
||||||
|
while x:
|
||||||
|
x = x >> 1
|
||||||
|
exponent = exponent + 1
|
||||||
|
return exponent - 1
|
||||||
|
|
||||||
|
|
||||||
|
def getsearchrange(n):
|
||||||
|
"""Calculate searchRange, entrySelector, rangeShift for the
|
||||||
|
sfnt directory. 'n' is the number of tables.
|
||||||
|
"""
|
||||||
|
# This stuff needs to be stored in the file, because?
|
||||||
|
import math
|
||||||
|
exponent = maxpoweroftwo(n)
|
||||||
|
searchRange = (2 ** exponent) * 16
|
||||||
|
entrySelector = exponent
|
||||||
|
rangeShift = n * 16 - searchRange
|
||||||
|
return searchRange, entrySelector, rangeShift
|
||||||
|
|
263
Lib/fontTools/ttLib/standardGlyphOrder.py
Normal file
263
Lib/fontTools/ttLib/standardGlyphOrder.py
Normal file
@ -0,0 +1,263 @@
|
|||||||
|
# XXX is this *really* correct?
|
||||||
|
|
||||||
|
standardGlyphOrder = [
|
||||||
|
".notdef", # 0
|
||||||
|
".null", # 1
|
||||||
|
"nonmarkingreturn", # 2
|
||||||
|
"space", # 3
|
||||||
|
"exclam", # 4
|
||||||
|
"quotedbl", # 5
|
||||||
|
"numbersign", # 6
|
||||||
|
"dollar", # 7
|
||||||
|
"percent", # 8
|
||||||
|
"ampersand", # 9
|
||||||
|
"quotesingle", # 10
|
||||||
|
"parenleft", # 11
|
||||||
|
"parenright", # 12
|
||||||
|
"asterisk", # 13
|
||||||
|
"plus", # 14
|
||||||
|
"comma", # 15
|
||||||
|
"hyphen", # 16
|
||||||
|
"period", # 17
|
||||||
|
"slash", # 18
|
||||||
|
"zero", # 19
|
||||||
|
"one", # 20
|
||||||
|
"two", # 21
|
||||||
|
"three", # 22
|
||||||
|
"four", # 23
|
||||||
|
"five", # 24
|
||||||
|
"six", # 25
|
||||||
|
"seven", # 26
|
||||||
|
"eight", # 27
|
||||||
|
"nine", # 28
|
||||||
|
"colon", # 29
|
||||||
|
"semicolon", # 30
|
||||||
|
"less", # 31
|
||||||
|
"equal", # 32
|
||||||
|
"greater", # 33
|
||||||
|
"question", # 34
|
||||||
|
"at", # 35
|
||||||
|
"A", # 36
|
||||||
|
"B", # 37
|
||||||
|
"C", # 38
|
||||||
|
"D", # 39
|
||||||
|
"E", # 40
|
||||||
|
"F", # 41
|
||||||
|
"G", # 42
|
||||||
|
"H", # 43
|
||||||
|
"I", # 44
|
||||||
|
"J", # 45
|
||||||
|
"K", # 46
|
||||||
|
"L", # 47
|
||||||
|
"M", # 48
|
||||||
|
"N", # 49
|
||||||
|
"O", # 50
|
||||||
|
"P", # 51
|
||||||
|
"Q", # 52
|
||||||
|
"R", # 53
|
||||||
|
"S", # 54
|
||||||
|
"T", # 55
|
||||||
|
"U", # 56
|
||||||
|
"V", # 57
|
||||||
|
"W", # 58
|
||||||
|
"X", # 59
|
||||||
|
"Y", # 60
|
||||||
|
"Z", # 61
|
||||||
|
"bracketleft", # 62
|
||||||
|
"backslash", # 63
|
||||||
|
"bracketright", # 64
|
||||||
|
"asciicircum", # 65
|
||||||
|
"underscore", # 66
|
||||||
|
"grave", # 67
|
||||||
|
"a", # 68
|
||||||
|
"b", # 69
|
||||||
|
"c", # 70
|
||||||
|
"d", # 71
|
||||||
|
"e", # 72
|
||||||
|
"f", # 73
|
||||||
|
"g", # 74
|
||||||
|
"h", # 75
|
||||||
|
"i", # 76
|
||||||
|
"j", # 77
|
||||||
|
"k", # 78
|
||||||
|
"l", # 79
|
||||||
|
"m", # 80
|
||||||
|
"n", # 81
|
||||||
|
"o", # 82
|
||||||
|
"p", # 83
|
||||||
|
"q", # 84
|
||||||
|
"r", # 85
|
||||||
|
"s", # 86
|
||||||
|
"t", # 87
|
||||||
|
"u", # 88
|
||||||
|
"v", # 89
|
||||||
|
"w", # 90
|
||||||
|
"x", # 91
|
||||||
|
"y", # 92
|
||||||
|
"z", # 93
|
||||||
|
"braceleft", # 94
|
||||||
|
"bar", # 95
|
||||||
|
"braceright", # 96
|
||||||
|
"asciitilde", # 97
|
||||||
|
"Adieresis", # 98
|
||||||
|
"Aring", # 99
|
||||||
|
"Ccedilla", # 100
|
||||||
|
"Eacute", # 101
|
||||||
|
"Ntilde", # 102
|
||||||
|
"Odieresis", # 103
|
||||||
|
"Udieresis", # 104
|
||||||
|
"aacute", # 105
|
||||||
|
"agrave", # 106
|
||||||
|
"acircumflex", # 107
|
||||||
|
"adieresis", # 108
|
||||||
|
"atilde", # 109
|
||||||
|
"aring", # 110
|
||||||
|
"ccedilla", # 111
|
||||||
|
"eacute", # 112
|
||||||
|
"egrave", # 113
|
||||||
|
"ecircumflex", # 114
|
||||||
|
"edieresis", # 115
|
||||||
|
"iacute", # 116
|
||||||
|
"igrave", # 117
|
||||||
|
"icircumflex", # 118
|
||||||
|
"idieresis", # 119
|
||||||
|
"ntilde", # 120
|
||||||
|
"oacute", # 121
|
||||||
|
"ograve", # 122
|
||||||
|
"ocircumflex", # 123
|
||||||
|
"odieresis", # 124
|
||||||
|
"otilde", # 125
|
||||||
|
"uacute", # 126
|
||||||
|
"ugrave", # 127
|
||||||
|
"ucircumflex", # 128
|
||||||
|
"udieresis", # 129
|
||||||
|
"dagger", # 130
|
||||||
|
"degree", # 131
|
||||||
|
"cent", # 132
|
||||||
|
"sterling", # 133
|
||||||
|
"section", # 134
|
||||||
|
"bullet", # 135
|
||||||
|
"paragraph", # 136
|
||||||
|
"germandbls", # 137
|
||||||
|
"registered", # 138
|
||||||
|
"copyright", # 139
|
||||||
|
"trademark", # 140
|
||||||
|
"acute", # 141
|
||||||
|
"dieresis", # 142
|
||||||
|
"notequal", # 143
|
||||||
|
"AE", # 144
|
||||||
|
"Oslash", # 145
|
||||||
|
"infinity", # 146
|
||||||
|
"plusminus", # 147
|
||||||
|
"lessequal", # 148
|
||||||
|
"greaterequal", # 149
|
||||||
|
"yen", # 150
|
||||||
|
"mu", # 151
|
||||||
|
"partialdiff", # 152
|
||||||
|
"summation", # 153
|
||||||
|
"product", # 154
|
||||||
|
"pi", # 155
|
||||||
|
"integral", # 156
|
||||||
|
"ordfeminine", # 157
|
||||||
|
"ordmasculine", # 158
|
||||||
|
"Omega", # 159
|
||||||
|
"ae", # 160
|
||||||
|
"oslash", # 161
|
||||||
|
"questiondown", # 162
|
||||||
|
"exclamdown", # 163
|
||||||
|
"logicalnot", # 164
|
||||||
|
"radical", # 165
|
||||||
|
"florin", # 166
|
||||||
|
"approxequal", # 167
|
||||||
|
"Delta", # 168
|
||||||
|
"guillemotleft", # 169
|
||||||
|
"guillemotright", # 170
|
||||||
|
"ellipsis", # 171
|
||||||
|
"nonbreakingspace", # 172
|
||||||
|
"Agrave", # 173
|
||||||
|
"Atilde", # 174
|
||||||
|
"Otilde", # 175
|
||||||
|
"OE", # 176
|
||||||
|
"oe", # 177
|
||||||
|
"endash", # 178
|
||||||
|
"emdash", # 179
|
||||||
|
"quotedblleft", # 180
|
||||||
|
"quotedblright", # 181
|
||||||
|
"quoteleft", # 182
|
||||||
|
"quoteright", # 183
|
||||||
|
"divide", # 184
|
||||||
|
"lozenge", # 185
|
||||||
|
"ydieresis", # 186
|
||||||
|
"Ydieresis", # 187
|
||||||
|
"fraction", # 188
|
||||||
|
"currency", # 189
|
||||||
|
"guilsinglleft", # 190
|
||||||
|
"guilsinglright", # 191
|
||||||
|
"fi", # 192
|
||||||
|
"fl", # 193
|
||||||
|
"daggerdbl", # 194
|
||||||
|
"periodcentered", # 195
|
||||||
|
"quotesinglbase", # 196
|
||||||
|
"quotedblbase", # 197
|
||||||
|
"perthousand", # 198
|
||||||
|
"Acircumflex", # 199
|
||||||
|
"Ecircumflex", # 200
|
||||||
|
"Aacute", # 201
|
||||||
|
"Edieresis", # 202
|
||||||
|
"Egrave", # 203
|
||||||
|
"Iacute", # 204
|
||||||
|
"Icircumflex", # 205
|
||||||
|
"Idieresis", # 206
|
||||||
|
"Igrave", # 207
|
||||||
|
"Oacute", # 208
|
||||||
|
"Ocircumflex", # 209
|
||||||
|
"apple", # 210
|
||||||
|
"Ograve", # 211
|
||||||
|
"Uacute", # 212
|
||||||
|
"Ucircumflex", # 213
|
||||||
|
"Ugrave", # 214
|
||||||
|
"dotlessi", # 215
|
||||||
|
"circumflex", # 216
|
||||||
|
"tilde", # 217
|
||||||
|
"macron", # 218
|
||||||
|
"breve", # 219
|
||||||
|
"dotaccent", # 220
|
||||||
|
"ring", # 221
|
||||||
|
"cedilla", # 222
|
||||||
|
"hungarumlaut", # 223
|
||||||
|
"ogonek", # 224
|
||||||
|
"caron", # 225
|
||||||
|
"Lslash", # 226
|
||||||
|
"lslash", # 227
|
||||||
|
"Scaron", # 228
|
||||||
|
"scaron", # 229
|
||||||
|
"Zcaron", # 230
|
||||||
|
"zcaron", # 231
|
||||||
|
"brokenbar", # 232
|
||||||
|
"Eth", # 233
|
||||||
|
"eth", # 234
|
||||||
|
"Yacute", # 235
|
||||||
|
"yacute", # 236
|
||||||
|
"Thorn", # 237
|
||||||
|
"thorn", # 238
|
||||||
|
"minus", # 239
|
||||||
|
"multiply", # 240
|
||||||
|
"onesuperior", # 241
|
||||||
|
"twosuperior", # 242
|
||||||
|
"threesuperior", # 243
|
||||||
|
"onehalf", # 244
|
||||||
|
"onequarter", # 245
|
||||||
|
"threequarters", # 246
|
||||||
|
"franc", # 247
|
||||||
|
"Gbreve", # 248
|
||||||
|
"gbreve", # 249
|
||||||
|
"Idotaccent", # 250
|
||||||
|
"Scedilla", # 251
|
||||||
|
"scedilla", # 252
|
||||||
|
"Cacute", # 253
|
||||||
|
"cacute", # 254
|
||||||
|
"Ccaron", # 255
|
||||||
|
"ccaron", # 256
|
||||||
|
"dcroat" # 257
|
||||||
|
]
|
||||||
|
|
35
Lib/fontTools/ttLib/tables/C_F_F_.py
Normal file
35
Lib/fontTools/ttLib/tables/C_F_F_.py
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
import DefaultTable
|
||||||
|
from fontTools import cffLib
|
||||||
|
|
||||||
|
|
||||||
|
class table_C_F_F_(DefaultTable.DefaultTable, cffLib.CFFFontSet):
|
||||||
|
|
||||||
|
def __init__(self, tag):
|
||||||
|
DefaultTable.DefaultTable.__init__(self, tag)
|
||||||
|
cffLib.CFFFontSet.__init__(self)
|
||||||
|
self._gaveGlyphOrder = 0
|
||||||
|
|
||||||
|
def decompile(self, data, otFont):
|
||||||
|
self.data = data # XXX while work is in progress...
|
||||||
|
cffLib.CFFFontSet.decompile(self, data)
|
||||||
|
assert len(self.fonts) == 1, "can't deal with multi-font CFF tables."
|
||||||
|
|
||||||
|
#def compile(self, otFont):
|
||||||
|
# xxx
|
||||||
|
|
||||||
|
def getGlyphOrder(self):
|
||||||
|
if self._gaveGlyphOrder:
|
||||||
|
from fontTools import ttLib
|
||||||
|
raise ttLib.TTLibError, "illegal use of getGlyphOrder()"
|
||||||
|
self._gaveGlyphOrder = 1
|
||||||
|
return self.fonts[self.fontNames[0]].getGlyphOrder()
|
||||||
|
|
||||||
|
def setGlyphOrder(self, glyphOrder):
|
||||||
|
self.fonts[self.fontNames[0]].setGlyphOrder(glyphOrder)
|
||||||
|
|
||||||
|
def toXML(self, writer, otFont, progress=None):
|
||||||
|
cffLib.CFFFontSet.toXML(self, writer, progress)
|
||||||
|
|
||||||
|
#def fromXML(self, (name, attrs, content), otFont):
|
||||||
|
# xxx
|
||||||
|
|
12
Lib/fontTools/ttLib/tables/D_S_I_G_.py
Normal file
12
Lib/fontTools/ttLib/tables/D_S_I_G_.py
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
import DefaultTable
|
||||||
|
|
||||||
|
class table_D_S_I_G_(DefaultTable.DefaultTable):
|
||||||
|
|
||||||
|
def toXML(self, xmlWriter, ttFont):
|
||||||
|
xmlWriter.comment("note that the Digital Signature will be invalid after recompilation!")
|
||||||
|
xmlWriter.newline()
|
||||||
|
xmlWriter.begintag("hexdata")
|
||||||
|
xmlWriter.newline()
|
||||||
|
xmlWriter.dumphex(self.compile(ttFont))
|
||||||
|
xmlWriter.endtag("hexdata")
|
||||||
|
xmlWriter.newline()
|
36
Lib/fontTools/ttLib/tables/DefaultTable.py
Normal file
36
Lib/fontTools/ttLib/tables/DefaultTable.py
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
import string
|
||||||
|
import sys
|
||||||
|
|
||||||
|
class DefaultTable:
|
||||||
|
|
||||||
|
dependencies = []
|
||||||
|
|
||||||
|
def __init__(self, tag):
|
||||||
|
self.tableTag = tag
|
||||||
|
|
||||||
|
def decompile(self, data, ttFont):
|
||||||
|
self.data = data
|
||||||
|
|
||||||
|
def compile(self, ttFont):
|
||||||
|
return self.data
|
||||||
|
|
||||||
|
def toXML(self, writer, ttFont):
|
||||||
|
writer.begintag("hexdata")
|
||||||
|
writer.newline()
|
||||||
|
writer.dumphex(self.compile(ttFont))
|
||||||
|
writer.endtag("hexdata")
|
||||||
|
writer.newline()
|
||||||
|
|
||||||
|
def fromXML(self, (name, attrs, content), ttFont):
|
||||||
|
from fontTools.misc.textTools import readHex
|
||||||
|
from fontTools import ttLib
|
||||||
|
if name <> "hexdata":
|
||||||
|
raise ttLib.TTLibError, "can't handle '%s' element" % name
|
||||||
|
self.decompile(readHex(content), ttFont)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "<'%s' table at %x>" % (self.tableTag, id(self))
|
||||||
|
|
||||||
|
def __cmp__(self, other):
|
||||||
|
return cmp(self.__dict__, other.__dict__)
|
||||||
|
|
294
Lib/fontTools/ttLib/tables/G_P_O_S_.py
Normal file
294
Lib/fontTools/ttLib/tables/G_P_O_S_.py
Normal file
@ -0,0 +1,294 @@
|
|||||||
|
import otCommon
|
||||||
|
|
||||||
|
|
||||||
|
class table_G_P_O_S_(otCommon.base_GPOS_GSUB):
|
||||||
|
|
||||||
|
def getLookupTypeClass(self, lookupType):
|
||||||
|
return lookupTypeClasses[lookupType]
|
||||||
|
|
||||||
|
|
||||||
|
class SinglePos:
|
||||||
|
|
||||||
|
def decompile(self, reader, otFont):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def compile(self, otFont):
|
||||||
|
xxx
|
||||||
|
|
||||||
|
def toXML(self, xmlWriter, otFont):
|
||||||
|
xmlWriter.comment("XXX")
|
||||||
|
xmlWriter.newline()
|
||||||
|
|
||||||
|
def fromXML(self, (name, attrs, content), otFont):
|
||||||
|
xxx
|
||||||
|
|
||||||
|
|
||||||
|
class PairPos:
|
||||||
|
|
||||||
|
def decompile(self, reader, otFont):
|
||||||
|
self.format = reader.readUShort()
|
||||||
|
if self.format == 1:
|
||||||
|
self.decompileFormat1(reader, otFont)
|
||||||
|
elif self.format == 2:
|
||||||
|
self.decompileFormat2(reader, otFont)
|
||||||
|
else:
|
||||||
|
from fontTools import ttLib
|
||||||
|
raise ttLib.TTLibError, "unknown PairPos format: %d" % self.format
|
||||||
|
|
||||||
|
def decompileFormat1(self, reader, otFont):
|
||||||
|
coverage = reader.readTable(otCommon.CoverageTable, otFont)
|
||||||
|
glyphNames = coverage.glyphNames
|
||||||
|
valueFactory1 = ValueRecordFactory(reader.readUShort())
|
||||||
|
valueFactory2 = ValueRecordFactory(reader.readUShort())
|
||||||
|
self.pairs = pairs = {}
|
||||||
|
for i in range(reader.readUShort()):
|
||||||
|
firstGlyphName = glyphNames[i]
|
||||||
|
offset = reader.readOffset()
|
||||||
|
setData = reader.getSubString(offset)
|
||||||
|
set = PairSet()
|
||||||
|
set.decompile(setData, otFont, valueFactory1, valueFactory2)
|
||||||
|
pairs[firstGlyphName] = set.values
|
||||||
|
|
||||||
|
def decompileFormat2(self, reader, otFont):
|
||||||
|
coverage = reader.readTable(otCommon.CoverageTable, otFont)
|
||||||
|
glyphNames = coverage.glyphNames
|
||||||
|
valueFactory1 = ValueRecordFactory(reader.readUShort())
|
||||||
|
valueFactory2 = ValueRecordFactory(reader.readUShort())
|
||||||
|
self.classDef1 = reader.readTable(otCommon.ClassDefinitionTable, otFont)
|
||||||
|
self.classDef2 = reader.readTable(otCommon.ClassDefinitionTable, otFont)
|
||||||
|
class1Count = reader.readUShort()
|
||||||
|
class2Count = reader.readUShort()
|
||||||
|
self.pairs = pairs = {} # sparse matrix
|
||||||
|
for i in range(class1Count):
|
||||||
|
row = {}
|
||||||
|
for j in range(class2Count):
|
||||||
|
value1 = valueFactory1.getValueRecord(reader)
|
||||||
|
value2 = valueFactory2.getValueRecord(reader)
|
||||||
|
if value1 or value2:
|
||||||
|
row[j] = (value1, value2)
|
||||||
|
if row:
|
||||||
|
pairs[i] = row
|
||||||
|
|
||||||
|
def compile(self, otFont):
|
||||||
|
xxx
|
||||||
|
|
||||||
|
def toXML(self, xmlWriter, otFont):
|
||||||
|
if self.format == 1:
|
||||||
|
self.toXMLFormat1(xmlWriter, otFont)
|
||||||
|
elif self.format == 2:
|
||||||
|
self.toXMLFormat2(xmlWriter, otFont)
|
||||||
|
else:
|
||||||
|
from fontTools import ttLib
|
||||||
|
raise ttLib.TTLibError, "unknown PairPos format: %d" % self.format
|
||||||
|
|
||||||
|
def toXMLFormat1(self, xmlWriter, otFont):
|
||||||
|
pairs = self.pairs.items()
|
||||||
|
pairs.sort()
|
||||||
|
for firstGlyph, secondGlyphs in pairs:
|
||||||
|
for secondGlyph, value1, value2 in secondGlyphs:
|
||||||
|
#XXXXXXXXX
|
||||||
|
xmlWriter.begintag("Pair", first=firstGlyph, second=secondGlyph)
|
||||||
|
xmlWriter.newline()
|
||||||
|
if value1:
|
||||||
|
value1.toXML(xmlWriter, otFont)
|
||||||
|
if value2:
|
||||||
|
value2.toXML(xmlWriter, otFont)
|
||||||
|
xmlWriter.endtag("Pair")
|
||||||
|
xmlWriter.newline()
|
||||||
|
|
||||||
|
def toXMLFormat2(self, xmlWriter, otFont):
|
||||||
|
xmlWriter.comment("XXX")
|
||||||
|
xmlWriter.newline()
|
||||||
|
|
||||||
|
def fromXML(self, (name, attrs, content), otFont):
|
||||||
|
xxx
|
||||||
|
|
||||||
|
|
||||||
|
class PairSet:
|
||||||
|
|
||||||
|
def decompile(self, reader, otFont, valueFactory1, valueFactory2):
|
||||||
|
pairValueCount = reader.readUShort()
|
||||||
|
self.values = values = []
|
||||||
|
for j in range(pairValueCount):
|
||||||
|
secondGlyphID = reader.readUShort()
|
||||||
|
secondGlyphName = otFont.getGlyphName(secondGlyphID)
|
||||||
|
value1 = valueFactory1.getValueRecord(reader)
|
||||||
|
value2 = valueFactory2.getValueRecord(reader)
|
||||||
|
values.append((secondGlyphName, value1, value2))
|
||||||
|
|
||||||
|
def compile(self, otFont):
|
||||||
|
xxx
|
||||||
|
|
||||||
|
#
|
||||||
|
# ------------------
|
||||||
|
#
|
||||||
|
|
||||||
|
class CursivePos:
|
||||||
|
|
||||||
|
def decompile(self, reader, otFont):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def compile(self, otFont):
|
||||||
|
xxx
|
||||||
|
|
||||||
|
def toXML(self, xmlWriter, otFont):
|
||||||
|
xmlWriter.comment("XXX")
|
||||||
|
xmlWriter.newline()
|
||||||
|
|
||||||
|
|
||||||
|
class MarkBasePos:
|
||||||
|
|
||||||
|
def decompile(self, reader, otFont):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def compile(self, otFont):
|
||||||
|
xxx
|
||||||
|
|
||||||
|
def toXML(self, xmlWriter, otFont):
|
||||||
|
xmlWriter.comment("XXX")
|
||||||
|
xmlWriter.newline()
|
||||||
|
|
||||||
|
|
||||||
|
class MarkLigPos:
|
||||||
|
|
||||||
|
def decompile(self, reader, otFont):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def compile(self, otFont):
|
||||||
|
xxx
|
||||||
|
|
||||||
|
def toXML(self, xmlWriter, otFont):
|
||||||
|
xmlWriter.comment("XXX")
|
||||||
|
xmlWriter.newline()
|
||||||
|
|
||||||
|
|
||||||
|
class MarkMarkPos:
|
||||||
|
|
||||||
|
def decompile(self, reader, otFont):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def compile(self, otFont):
|
||||||
|
xxx
|
||||||
|
|
||||||
|
def toXML(self, xmlWriter, otFont):
|
||||||
|
xmlWriter.comment("XXX")
|
||||||
|
xmlWriter.newline()
|
||||||
|
|
||||||
|
|
||||||
|
class ContextPos:
|
||||||
|
|
||||||
|
def decompile(self, reader, otFont):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def compile(self, otFont):
|
||||||
|
xxx
|
||||||
|
|
||||||
|
def toXML(self, xmlWriter, otFont):
|
||||||
|
xmlWriter.comment("XXX")
|
||||||
|
xmlWriter.newline()
|
||||||
|
|
||||||
|
|
||||||
|
class ChainContextPos:
|
||||||
|
|
||||||
|
def decompile(self, reader, otFont):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def compile(self, otFont):
|
||||||
|
xxx
|
||||||
|
|
||||||
|
def toXML(self, xmlWriter, otFont):
|
||||||
|
xmlWriter.comment("XXX")
|
||||||
|
xmlWriter.newline()
|
||||||
|
|
||||||
|
|
||||||
|
lookupTypeClasses = {
|
||||||
|
1: SinglePos,
|
||||||
|
2: PairPos,
|
||||||
|
3: CursivePos,
|
||||||
|
4: MarkBasePos,
|
||||||
|
5: MarkLigPos,
|
||||||
|
6: MarkMarkPos,
|
||||||
|
7: ContextPos,
|
||||||
|
8: ChainContextPos,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
valueRecordFormat = [
|
||||||
|
# Mask Name struct format char
|
||||||
|
(0x0001, "XPlacement", "h"),
|
||||||
|
(0x0002, "YPlacement", "h"),
|
||||||
|
(0x0004, "XAdvance", "h"),
|
||||||
|
(0x0008, "YAdvance", "h"),
|
||||||
|
(0x0010, "XPlaDevice", "H"),
|
||||||
|
(0x0020, "YPlaDevice", "H"),
|
||||||
|
(0x0040, "XAdvDevice", "H"),
|
||||||
|
(0x0080, "YAdvDevice", "H"),
|
||||||
|
# reserved:
|
||||||
|
(0x0100, "Reserved1", "H"),
|
||||||
|
(0x0200, "Reserved2", "H"),
|
||||||
|
(0x0400, "Reserved3", "H"),
|
||||||
|
(0x0800, "Reserved4", "H"),
|
||||||
|
(0x1000, "Reserved5", "H"),
|
||||||
|
(0x2000, "Reserved6", "H"),
|
||||||
|
(0x4000, "Reserved7", "H"),
|
||||||
|
(0x8000, "Reserved8", "H"),
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
class ValueRecordFactory:
|
||||||
|
|
||||||
|
def __init__(self, valueFormat):
|
||||||
|
format = ">"
|
||||||
|
names = []
|
||||||
|
for mask, name, formatChar in valueRecordFormat:
|
||||||
|
if valueFormat & mask:
|
||||||
|
names.append(name)
|
||||||
|
format = format + formatChar
|
||||||
|
self.names, self.format = names, format
|
||||||
|
self.size = 2 * len(names)
|
||||||
|
|
||||||
|
def getValueRecord(self, reader):
|
||||||
|
names = self.names
|
||||||
|
if not names:
|
||||||
|
return None
|
||||||
|
values = reader.readStruct(self.format, self.size)
|
||||||
|
values = map(int, values)
|
||||||
|
valueRecord = ValueRecord()
|
||||||
|
items = map(None, names, values)
|
||||||
|
for name, value in items:
|
||||||
|
setattr(valueRecord, name, value)
|
||||||
|
return valueRecord
|
||||||
|
|
||||||
|
|
||||||
|
class ValueRecord:
|
||||||
|
# see ValueRecordFactory
|
||||||
|
|
||||||
|
def __nonzero__(self):
|
||||||
|
for value in self.__dict__.values():
|
||||||
|
if value:
|
||||||
|
return 1
|
||||||
|
return 0
|
||||||
|
|
||||||
|
def toXML(self, xmlWriter, otFont):
|
||||||
|
simpleItems = []
|
||||||
|
for mask, name, format in valueRecordFormat[:4]: # "simple" values
|
||||||
|
if hasattr(self, name):
|
||||||
|
simpleItems.append((name, getattr(self, name)))
|
||||||
|
deviceItems = []
|
||||||
|
for mask, name, format in valueRecordFormat[4:8]: # device records
|
||||||
|
if hasattr(self, name):
|
||||||
|
deviceItems.append((name, getattr(self, name)))
|
||||||
|
if deviceItems:
|
||||||
|
xmlWriter.begintag("ValueRecord", simpleItems)
|
||||||
|
xmlWriter.newline()
|
||||||
|
for name, deviceRecord in deviceItems:
|
||||||
|
xxx
|
||||||
|
xmlWriter.endtag("ValueRecord")
|
||||||
|
xmlWriter.newline()
|
||||||
|
else:
|
||||||
|
xmlWriter.simpletag("ValueRecord", simpleItems)
|
||||||
|
xmlWriter.newline()
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "<ValueRecord>"
|
||||||
|
|
283
Lib/fontTools/ttLib/tables/G_S_U_B_.py
Normal file
283
Lib/fontTools/ttLib/tables/G_S_U_B_.py
Normal file
@ -0,0 +1,283 @@
|
|||||||
|
import otCommon
|
||||||
|
|
||||||
|
|
||||||
|
class table_G_S_U_B_(otCommon.base_GPOS_GSUB):
|
||||||
|
|
||||||
|
def getLookupTypeClass(self, lookupType):
|
||||||
|
return lookupTypeClasses[lookupType]
|
||||||
|
|
||||||
|
|
||||||
|
class SingleSubst:
|
||||||
|
|
||||||
|
def decompile(self, reader, otFont):
|
||||||
|
self.format = reader.readUShort()
|
||||||
|
if self.format == 1:
|
||||||
|
self.decompileFormat1(reader, otFont)
|
||||||
|
elif self.format == 2:
|
||||||
|
self.decompileFormat2(reader, otFont)
|
||||||
|
else:
|
||||||
|
from fontTools import ttLib
|
||||||
|
raise ttLib.TTLibError, "unknown SingleSub format: %d" % self.format
|
||||||
|
|
||||||
|
def decompileFormat1(self, reader, otFont):
|
||||||
|
coverage = reader.readTable(otCommon.CoverageTable, otFont)
|
||||||
|
glyphIDs = coverage.getGlyphIDs()
|
||||||
|
glyphNames = coverage.getGlyphNames()
|
||||||
|
self.substitutions = substitutions = {}
|
||||||
|
deltaGlyphID = reader.readShort()
|
||||||
|
for i in range(len(glyphIDs)):
|
||||||
|
input = glyphNames[i]
|
||||||
|
output = otFont.getGlyphName(glyphIDs[i] + deltaGlyphID)
|
||||||
|
substitutions[input] = output
|
||||||
|
|
||||||
|
def decompileFormat2(self, reader, otFont):
|
||||||
|
coverage = reader.readTable(otCommon.CoverageTable, otFont)
|
||||||
|
glyphNames = coverage.getGlyphNames()
|
||||||
|
glyphCount = reader.readUShort()
|
||||||
|
self.substitutions = substitutions = {}
|
||||||
|
for i in range(glyphCount):
|
||||||
|
glyphID = reader.readUShort()
|
||||||
|
output = otFont.getGlyphName(glyphID)
|
||||||
|
input = glyphNames[i]
|
||||||
|
substitutions[input] = output
|
||||||
|
|
||||||
|
def compile(self, otFont):
|
||||||
|
xxx
|
||||||
|
|
||||||
|
def toXML(self, xmlWriter, otFont):
|
||||||
|
substitutions = self.substitutions.items()
|
||||||
|
substitutions.sort()
|
||||||
|
for input, output in substitutions:
|
||||||
|
xmlWriter.simpletag("Subst", [("in", input), ("out", output)])
|
||||||
|
xmlWriter.newline()
|
||||||
|
|
||||||
|
def fromXML(self, (name, attrs, content), otFont):
|
||||||
|
xxx
|
||||||
|
|
||||||
|
|
||||||
|
class MultipleSubst:
|
||||||
|
|
||||||
|
def decompile(self, reader, otFont):
|
||||||
|
format = reader.readUShort()
|
||||||
|
if format <> 1:
|
||||||
|
from fontTools import ttLib
|
||||||
|
raise ttLib.TTLibError, "unknown MultipleSubst format: %d" % format
|
||||||
|
glyphNames = reader.readTable(otCommon.CoverageTable, otFont).getGlyphNames()
|
||||||
|
sequenceCount = reader.readUShort()
|
||||||
|
self.substitutions = substitutions = {}
|
||||||
|
for i in range(sequenceCount):
|
||||||
|
sequence = reader.readTable(Sequence, otFont)
|
||||||
|
substitutions[glyphNames[i]] = sequence.glyphs
|
||||||
|
|
||||||
|
def compile(self, otFont):
|
||||||
|
xxx
|
||||||
|
|
||||||
|
def toXML(self, xmlWriter, otFont):
|
||||||
|
import string
|
||||||
|
items = self.substitutions.items()
|
||||||
|
items.sort()
|
||||||
|
for input, output in items:
|
||||||
|
xmlWriter.simpletag("Subst", [("in", input), ("out", string.join(output, ","))])
|
||||||
|
xmlWriter.newline()
|
||||||
|
|
||||||
|
|
||||||
|
class Sequence:
|
||||||
|
|
||||||
|
def decompile(self, reader, otFont):
|
||||||
|
self.glyphs = []
|
||||||
|
for i in range(reader.readUShort()):
|
||||||
|
self.glyphs.append(otFont.getGlyphName(reader.readUShort()))
|
||||||
|
|
||||||
|
def compile(self, otFont):
|
||||||
|
xxx
|
||||||
|
|
||||||
|
|
||||||
|
class AlternateSubst:
|
||||||
|
|
||||||
|
def decompile(self, reader, otFont):
|
||||||
|
format = reader.readUShort()
|
||||||
|
if format <> 1:
|
||||||
|
from fontTools import ttLib
|
||||||
|
raise ttLib.TTLibError, "unknown AlternateSubst format: %d" % format
|
||||||
|
coverage = reader.readTable(otCommon.CoverageTable, otFont)
|
||||||
|
glyphNames = coverage.getGlyphNames()
|
||||||
|
alternateSetCount = reader.readUShort()
|
||||||
|
self.alternateSet = alternateSet = {}
|
||||||
|
for i in range(alternateSetCount):
|
||||||
|
set = reader.readTable(AlternateSet, otFont)
|
||||||
|
alternateSet[glyphNames[i]] = set.glyphs
|
||||||
|
|
||||||
|
def compile(self, otFont):
|
||||||
|
xxx
|
||||||
|
|
||||||
|
def toXML(self, xmlWriter, otFont):
|
||||||
|
alternates = self.alternateSet.items()
|
||||||
|
alternates.sort()
|
||||||
|
for input, substList in alternates:
|
||||||
|
xmlWriter.begintag("AlternateSet", [("in", input)])
|
||||||
|
xmlWriter.newline()
|
||||||
|
for output in substList:
|
||||||
|
xmlWriter.simpletag("Subst", out=output)
|
||||||
|
xmlWriter.newline()
|
||||||
|
xmlWriter.endtag("AlternateSet")
|
||||||
|
xmlWriter.newline()
|
||||||
|
|
||||||
|
|
||||||
|
class AlternateSet:
|
||||||
|
|
||||||
|
def decompile(self, reader, otFont):
|
||||||
|
glyphCount = reader.readUShort()
|
||||||
|
glyphIDs = reader.readUShortArray(glyphCount)
|
||||||
|
self.glyphs = map(otFont.getGlyphName, glyphIDs)
|
||||||
|
|
||||||
|
def compile(self, otFont):
|
||||||
|
xxx
|
||||||
|
|
||||||
|
|
||||||
|
class LigatureSubst:
|
||||||
|
|
||||||
|
def decompile(self, reader, otFont):
|
||||||
|
format = reader.readUShort()
|
||||||
|
if format <> 1:
|
||||||
|
from fontTools import ttLib
|
||||||
|
raise ttLib.TTLibError, "unknown LigatureSubst format: %d" % format
|
||||||
|
coverage = reader.readTable(otCommon.CoverageTable, otFont)
|
||||||
|
glyphNames = coverage.getGlyphNames()
|
||||||
|
ligSetCount = reader.readUShort()
|
||||||
|
self.ligatures = ligatures = []
|
||||||
|
for i in range(ligSetCount):
|
||||||
|
firstGlyph = glyphNames[i]
|
||||||
|
ligSet = reader.readTable(LigatureSet, otFont)
|
||||||
|
for ligatureGlyph, components in ligSet.ligatures:
|
||||||
|
ligatures.append(((firstGlyph,) + tuple(components)), ligatureGlyph)
|
||||||
|
|
||||||
|
def compile(self, otFont):
|
||||||
|
xxx
|
||||||
|
|
||||||
|
def toXML(self, xmlWriter, otFont):
|
||||||
|
import string
|
||||||
|
for input, output in self.ligatures:
|
||||||
|
xmlWriter.simpletag("Subst", [("in", string.join(input, ",")), ("out", output)])
|
||||||
|
xmlWriter.newline()
|
||||||
|
|
||||||
|
|
||||||
|
class LigatureSet:
|
||||||
|
|
||||||
|
def decompile(self, reader, otFont):
|
||||||
|
ligatureCount = reader.readUShort()
|
||||||
|
self.ligatures = ligatures = []
|
||||||
|
for i in range(ligatureCount):
|
||||||
|
lig = reader.readTable(Ligature, otFont)
|
||||||
|
ligatures.append((lig.ligatureGlyph, lig.components))
|
||||||
|
|
||||||
|
def compile(self, otFont):
|
||||||
|
xxx
|
||||||
|
|
||||||
|
|
||||||
|
class Ligature:
|
||||||
|
|
||||||
|
def decompile(self, reader, otFont):
|
||||||
|
self.ligatureGlyph = otFont.getGlyphName(reader.readUShort())
|
||||||
|
compCount = reader.readUShort()
|
||||||
|
self.components = components = []
|
||||||
|
for i in range(compCount-1):
|
||||||
|
components.append(otFont.getGlyphName(reader.readUShort()))
|
||||||
|
|
||||||
|
def compile(self, otFont):
|
||||||
|
xxx
|
||||||
|
|
||||||
|
|
||||||
|
class ContextSubst:
|
||||||
|
|
||||||
|
def decompile(self, reader, otFont):
|
||||||
|
format = reader.readUShort()
|
||||||
|
if format == 1:
|
||||||
|
self.decompileFormat1(reader, otFont)
|
||||||
|
elif format == 2:
|
||||||
|
self.decompileFormat2(reader, otFont)
|
||||||
|
elif format == 3:
|
||||||
|
self.decompileFormat3(reader, otFont)
|
||||||
|
else:
|
||||||
|
from fontTools import ttLib
|
||||||
|
raise ttLib.TTLibError, "unknown ContextSubst format: %d" % format
|
||||||
|
|
||||||
|
def decompileFormat1(self, reader, otFont):
|
||||||
|
xxx
|
||||||
|
|
||||||
|
def decompileFormat2(self, reader, otFont):
|
||||||
|
xxx
|
||||||
|
|
||||||
|
def decompileFormat3(self, reader, otFont):
|
||||||
|
glyphCount = reader.readUShort()
|
||||||
|
substCount = reader.readUShort()
|
||||||
|
coverage = []
|
||||||
|
for i in range(glyphCount):
|
||||||
|
coverage.append(reader.readTable(otCommon.CoverageTable, otFont))
|
||||||
|
self.substitutions = substitutions = []
|
||||||
|
for i in range(substCount):
|
||||||
|
lookupRecord = SubstLookupRecord()
|
||||||
|
lookupRecord.decompile(reader, otFont)
|
||||||
|
substitutions.append((coverage[i].getGlyphNames(), lookupRecord))
|
||||||
|
|
||||||
|
def compile(self, otFont):
|
||||||
|
xxx
|
||||||
|
|
||||||
|
def toXML(self, xmlWriter, otFont):
|
||||||
|
xmlWriter.comment("XXX")
|
||||||
|
xmlWriter.newline()
|
||||||
|
|
||||||
|
|
||||||
|
class ChainContextSubst:
|
||||||
|
|
||||||
|
def decompile(self, reader, otFont):
|
||||||
|
self.format = reader.readUShort()
|
||||||
|
if self.format == 1:
|
||||||
|
self.decompileFormat1(reader, otFont)
|
||||||
|
elif self.format == 2:
|
||||||
|
self.decompileFormat2(reader, otFont)
|
||||||
|
elif self.format == 3:
|
||||||
|
self.decompileFormat3(reader, otFont)
|
||||||
|
else:
|
||||||
|
from fontTools import ttLib
|
||||||
|
raise ttLib.TTLibError, "unknown ChainContextSubst format: %d" % self.format
|
||||||
|
|
||||||
|
def decompileFormat1(self, reader, otFont):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def decompileFormat2(self, reader, otFont):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def decompileFormat3(self, reader, otFont):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def compile(self, otFont):
|
||||||
|
xxx
|
||||||
|
|
||||||
|
def toXML(self, xmlWriter, otFont):
|
||||||
|
xmlWriter.comment("XXX")
|
||||||
|
xmlWriter.newline()
|
||||||
|
|
||||||
|
|
||||||
|
lookupTypeClasses = {
|
||||||
|
1: SingleSubst,
|
||||||
|
2: MultipleSubst,
|
||||||
|
3: AlternateSubst,
|
||||||
|
4: LigatureSubst,
|
||||||
|
5: ContextSubst,
|
||||||
|
6: ChainContextSubst,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#
|
||||||
|
# Shared classes
|
||||||
|
#
|
||||||
|
|
||||||
|
class SubstLookupRecord:
|
||||||
|
|
||||||
|
def decompile(self, reader, otFont):
|
||||||
|
self.sequenceIndex = reader.readUShort()
|
||||||
|
self.lookupListIndex = reader.readUShort()
|
||||||
|
|
||||||
|
def compile(self, otFont):
|
||||||
|
xxx
|
||||||
|
|
50
Lib/fontTools/ttLib/tables/L_T_S_H_.py
Normal file
50
Lib/fontTools/ttLib/tables/L_T_S_H_.py
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
import DefaultTable
|
||||||
|
import array
|
||||||
|
import struct
|
||||||
|
from fontTools.misc.textTools import safeEval
|
||||||
|
|
||||||
|
# XXX I've lowered the strictness, to make sure Apple's own Chicago
|
||||||
|
# XXX gets through. They're looking into it, I hope to raise the standards
|
||||||
|
# XXX back to normal eventually.
|
||||||
|
|
||||||
|
class table_L_T_S_H_(DefaultTable.DefaultTable):
|
||||||
|
|
||||||
|
def decompile(self, data, ttFont):
|
||||||
|
version, numGlyphs = struct.unpack(">HH", data[:4])
|
||||||
|
data = data[4:]
|
||||||
|
assert version == 0
|
||||||
|
assert len(data) == numGlyphs
|
||||||
|
# ouch: the assertion is not true in Chicago!
|
||||||
|
#assert numGlyphs == ttFont['maxp'].numGlyphs
|
||||||
|
yPels = array.array("B")
|
||||||
|
yPels.fromstring(data)
|
||||||
|
self.yPels = {}
|
||||||
|
for i in range(numGlyphs):
|
||||||
|
self.yPels[ttFont.getGlyphName(i)] = yPels[i]
|
||||||
|
|
||||||
|
def compile(self, ttFont):
|
||||||
|
version = 0
|
||||||
|
names = self.yPels.keys()
|
||||||
|
numGlyphs = len(names)
|
||||||
|
yPels = [0] * numGlyphs
|
||||||
|
# ouch: the assertion is not true in Chicago!
|
||||||
|
#assert len(self.yPels) == ttFont['maxp'].numGlyphs == numGlyphs
|
||||||
|
for name in names:
|
||||||
|
yPels[ttFont.getGlyphID(name)] = self.yPels[name]
|
||||||
|
yPels = array.array("B", yPels)
|
||||||
|
return struct.pack(">HH", version, numGlyphs) + yPels.tostring()
|
||||||
|
|
||||||
|
def toXML(self, writer, ttFont):
|
||||||
|
names = self.yPels.keys()
|
||||||
|
names.sort()
|
||||||
|
for name in names:
|
||||||
|
writer.simpletag("yPel", name=name, value=self.yPels[name])
|
||||||
|
writer.newline()
|
||||||
|
|
||||||
|
def fromXML(self, (name, attrs, content), ttFont):
|
||||||
|
if not hasattr(self, "yPels"):
|
||||||
|
self.yPels = {}
|
||||||
|
if name <> "yPel":
|
||||||
|
return # ignore unknown tags
|
||||||
|
self.yPels[attrs["name"]] = safeEval(attrs["value"])
|
||||||
|
|
164
Lib/fontTools/ttLib/tables/O_S_2f_2.py
Normal file
164
Lib/fontTools/ttLib/tables/O_S_2f_2.py
Normal file
@ -0,0 +1,164 @@
|
|||||||
|
import DefaultTable
|
||||||
|
import sstruct
|
||||||
|
from fontTools.misc.textTools import safeEval, num2binary, binary2num
|
||||||
|
|
||||||
|
# panose classification
|
||||||
|
|
||||||
|
panoseFormat = """
|
||||||
|
bFamilyType: B
|
||||||
|
bSerifStyle: B
|
||||||
|
bWeight: B
|
||||||
|
bProportion: B
|
||||||
|
bContrast: B
|
||||||
|
bStrokeVariation: B
|
||||||
|
bArmStyle: B
|
||||||
|
bLetterForm: B
|
||||||
|
bMidline: B
|
||||||
|
bXHeight: B
|
||||||
|
"""
|
||||||
|
|
||||||
|
class Panose:
|
||||||
|
|
||||||
|
def toXML(self, writer, ttFont):
|
||||||
|
formatstring, names, fixes = sstruct.getformat(panoseFormat)
|
||||||
|
for name in names:
|
||||||
|
writer.simpletag(name, value=getattr(self, name))
|
||||||
|
writer.newline()
|
||||||
|
|
||||||
|
def fromXML(self, (name, attrs, content), ttFont):
|
||||||
|
setattr(self, name, safeEval(attrs["value"]))
|
||||||
|
|
||||||
|
|
||||||
|
# 'sfnt' OS/2 and Windows Metrics table - 'OS/2'
|
||||||
|
|
||||||
|
OS2_format_0 = """
|
||||||
|
> # big endian
|
||||||
|
version: H # version
|
||||||
|
xAvgCharWidth: h # average character width
|
||||||
|
usWeightClass: H # degree of thickness of strokes
|
||||||
|
usWidthClass: H # aspect ratio
|
||||||
|
fsType: h # type flags
|
||||||
|
ySubscriptXSize: h # subscript horizontal font size
|
||||||
|
ySubscriptYSize: h # subscript vertical font size
|
||||||
|
ySubscriptXOffset: h # subscript x offset
|
||||||
|
ySubscriptYOffset: h # subscript y offset
|
||||||
|
ySuperscriptXSize: h # superscript horizontal font size
|
||||||
|
ySuperscriptYSize: h # superscript vertical font size
|
||||||
|
ySuperscriptXOffset: h # superscript x offset
|
||||||
|
ySuperscriptYOffset: h # superscript y offset
|
||||||
|
yStrikeoutSize: h # strikeout size
|
||||||
|
yStrikeoutPosition: h # strikeout position
|
||||||
|
sFamilyClass: h # font family class and subclass
|
||||||
|
panose: 10s # panose classification number
|
||||||
|
ulUnicodeRange1: l # character range
|
||||||
|
ulUnicodeRange2: l # character range
|
||||||
|
ulUnicodeRange3: l # character range
|
||||||
|
ulUnicodeRange4: l # character range
|
||||||
|
achVendID: 4s # font vendor identification
|
||||||
|
fsSelection: H # font selection flags
|
||||||
|
fsFirstCharIndex: H # first unicode character index
|
||||||
|
fsLastCharIndex: H # last unicode character index
|
||||||
|
usTypoAscender: H # typographic ascender
|
||||||
|
usTypoDescender: H # typographic descender
|
||||||
|
usTypoLineGap: H # typographic line gap
|
||||||
|
usWinAscent: H # Windows ascender
|
||||||
|
usWinDescent: H # Windows descender
|
||||||
|
"""
|
||||||
|
|
||||||
|
OS2_format_1_addition = """
|
||||||
|
ulCodePageRange1: l
|
||||||
|
ulCodePageRange2: l
|
||||||
|
"""
|
||||||
|
|
||||||
|
OS2_format_2_addition = OS2_format_1_addition + """
|
||||||
|
sxHeight: h
|
||||||
|
sCapHeight: h
|
||||||
|
usDefaultChar: H
|
||||||
|
usBreakChar: H
|
||||||
|
usMaxContex: H
|
||||||
|
"""
|
||||||
|
|
||||||
|
bigendian = " > # big endian\n"
|
||||||
|
|
||||||
|
OS2_format_1 = OS2_format_0 + OS2_format_1_addition
|
||||||
|
OS2_format_2 = OS2_format_0 + OS2_format_2_addition
|
||||||
|
OS2_format_1_addition = bigendian + OS2_format_1_addition
|
||||||
|
OS2_format_2_addition = bigendian + OS2_format_2_addition
|
||||||
|
|
||||||
|
|
||||||
|
class table_O_S_2f_2(DefaultTable.DefaultTable):
|
||||||
|
|
||||||
|
"""the OS/2 table"""
|
||||||
|
|
||||||
|
def decompile(self, data, ttFont):
|
||||||
|
dummy, data = sstruct.unpack2(OS2_format_0, data, self)
|
||||||
|
if self.version == 1:
|
||||||
|
sstruct.unpack(OS2_format_1_addition, data, self)
|
||||||
|
elif self.version == 2:
|
||||||
|
sstruct.unpack(OS2_format_2_addition, data, self)
|
||||||
|
elif self.version <> 0:
|
||||||
|
from fontTools import ttLib
|
||||||
|
raise ttLib.TTLibError, "unknown format for OS/2 table: version %s" % self.version
|
||||||
|
self.panose = sstruct.unpack(panoseFormat, self.panose, Panose())
|
||||||
|
|
||||||
|
def compile(self, ttFont):
|
||||||
|
panose = self.panose
|
||||||
|
self.panose = sstruct.pack(panoseFormat, self.panose)
|
||||||
|
if self.version == 0:
|
||||||
|
data = sstruct.pack(OS2_format_0, self)
|
||||||
|
elif self.version == 1:
|
||||||
|
data = sstruct.pack(OS2_format_1, self)
|
||||||
|
elif self.version == 2:
|
||||||
|
data = sstruct.pack(OS2_format_2, self)
|
||||||
|
else:
|
||||||
|
from fontTools import ttLib
|
||||||
|
raise ttLib.TTLibError, "unknown format for OS/2 table: version %s" % self.version
|
||||||
|
self.panose = panose
|
||||||
|
return data
|
||||||
|
|
||||||
|
def toXML(self, writer, ttFont):
|
||||||
|
if self.version == 1:
|
||||||
|
format = OS2_format_1
|
||||||
|
elif self.version == 2:
|
||||||
|
format = OS2_format_2
|
||||||
|
else:
|
||||||
|
format = OS2_format_0
|
||||||
|
formatstring, names, fixes = sstruct.getformat(format)
|
||||||
|
for name in names:
|
||||||
|
value = getattr(self, name)
|
||||||
|
if type(value) == type(0L):
|
||||||
|
value = int(value)
|
||||||
|
if name=="panose":
|
||||||
|
writer.begintag("panose")
|
||||||
|
writer.newline()
|
||||||
|
value.toXML(writer, ttFont)
|
||||||
|
writer.endtag("panose")
|
||||||
|
elif name in ("ulUnicodeRange1", "ulUnicodeRange2",
|
||||||
|
"ulUnicodeRange3", "ulUnicodeRange4",
|
||||||
|
"ulCodePageRange1", "ulCodePageRange2"):
|
||||||
|
writer.simpletag(name, value=num2binary(value))
|
||||||
|
elif name in ("fsType", "fsSelection"):
|
||||||
|
writer.simpletag(name, value=num2binary(value, 16))
|
||||||
|
elif name == "achVendID":
|
||||||
|
writer.simpletag(name, value=repr(value)[1:-1])
|
||||||
|
else:
|
||||||
|
writer.simpletag(name, value=value)
|
||||||
|
writer.newline()
|
||||||
|
|
||||||
|
def fromXML(self, (name, attrs, content), ttFont):
|
||||||
|
if name == "panose":
|
||||||
|
self.panose = panose = Panose()
|
||||||
|
for element in content:
|
||||||
|
if type(element) == type(()):
|
||||||
|
panose.fromXML(element, ttFont)
|
||||||
|
elif name in ("ulUnicodeRange1", "ulUnicodeRange2",
|
||||||
|
"ulUnicodeRange3", "ulUnicodeRange4",
|
||||||
|
"ulCodePageRange1", "ulCodePageRange2",
|
||||||
|
"fsType", "fsSelection"):
|
||||||
|
setattr(self, name, binary2num(attrs["value"]))
|
||||||
|
elif name == "achVendID":
|
||||||
|
setattr(self, name, safeEval("'''" + attrs["value"] + "'''"))
|
||||||
|
else:
|
||||||
|
setattr(self, name, safeEval(attrs["value"]))
|
||||||
|
|
||||||
|
|
45
Lib/fontTools/ttLib/tables/T_S_I__0.py
Normal file
45
Lib/fontTools/ttLib/tables/T_S_I__0.py
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
import DefaultTable
|
||||||
|
import struct
|
||||||
|
|
||||||
|
tsi0Format = '>HHl'
|
||||||
|
|
||||||
|
def fixlongs((glyphID, textLength, textOffset)):
|
||||||
|
return int(glyphID), int(textLength), textOffset
|
||||||
|
|
||||||
|
|
||||||
|
class table_T_S_I__0(DefaultTable.DefaultTable):
|
||||||
|
|
||||||
|
dependencies = ["TSI1"]
|
||||||
|
|
||||||
|
def decompile(self, data, ttFont):
|
||||||
|
numGlyphs = ttFont['maxp'].numGlyphs
|
||||||
|
indices = []
|
||||||
|
size = struct.calcsize(tsi0Format)
|
||||||
|
for i in range(numGlyphs + 5):
|
||||||
|
glyphID, textLength, textOffset = fixlongs(struct.unpack(tsi0Format, data[:size]))
|
||||||
|
indices.append((glyphID, textLength, textOffset))
|
||||||
|
data = data[size:]
|
||||||
|
assert len(data) == 0
|
||||||
|
assert indices[-5] == (0XFFFE, 0, 0xABFC1F34), "bad magic number"
|
||||||
|
self.indices = indices[:-5]
|
||||||
|
self.extra_indices = indices[-4:]
|
||||||
|
|
||||||
|
def compile(self, ttFont):
|
||||||
|
data = ""
|
||||||
|
size = struct.calcsize(tsi0Format)
|
||||||
|
for index, textLength, textOffset in self.indices:
|
||||||
|
data = data + struct.pack(tsi0Format, index, textLength, textOffset)
|
||||||
|
data = data + struct.pack(tsi0Format, 0XFFFE, 0, 0xABFC1F34)
|
||||||
|
for index, textLength, textOffset in self.extra_indices:
|
||||||
|
data = data + struct.pack(tsi0Format, index, textLength, textOffset)
|
||||||
|
return data
|
||||||
|
|
||||||
|
def set(self, indices, extra_indices):
|
||||||
|
# gets called by 'TSI1' or 'TSI3'
|
||||||
|
self.indices = indices
|
||||||
|
self.extra_indices = extra_indices
|
||||||
|
|
||||||
|
def toXML(self, writer, ttFont):
|
||||||
|
writer.comment("This table will be calculated by the compiler")
|
||||||
|
writer.newline()
|
||||||
|
|
116
Lib/fontTools/ttLib/tables/T_S_I__1.py
Normal file
116
Lib/fontTools/ttLib/tables/T_S_I__1.py
Normal file
@ -0,0 +1,116 @@
|
|||||||
|
import DefaultTable
|
||||||
|
import string
|
||||||
|
|
||||||
|
class table_T_S_I__1(DefaultTable.DefaultTable):
|
||||||
|
|
||||||
|
extras = {0xfffa: "ppgm", 0xfffb: "cvt", 0xfffc: "reserved", 0xfffd: "fpgm"}
|
||||||
|
|
||||||
|
indextable = "TSI0"
|
||||||
|
|
||||||
|
def decompile(self, data, ttFont):
|
||||||
|
indextable = ttFont[self.indextable]
|
||||||
|
self.glyphPrograms = {}
|
||||||
|
for i in range(len(indextable.indices)):
|
||||||
|
glyphID, textLength, textOffset = indextable.indices[i]
|
||||||
|
if textLength == 0x8000:
|
||||||
|
# Ugh. Hi Beat!
|
||||||
|
textLength = indextable.indices[i+1][1]
|
||||||
|
if textLength > 0x8000:
|
||||||
|
pass # XXX Hmmm.
|
||||||
|
text = data[textOffset:textOffset+textLength]
|
||||||
|
assert len(text) == textLength
|
||||||
|
if text:
|
||||||
|
self.glyphPrograms[ttFont.getGlyphName(glyphID)] = text
|
||||||
|
|
||||||
|
self.extraPrograms = {}
|
||||||
|
for i in range(len(indextable.extra_indices)):
|
||||||
|
extraCode, textLength, textOffset = indextable.extra_indices[i]
|
||||||
|
if textLength == 0x8000:
|
||||||
|
if extraName == "fpgm": # this is the last one
|
||||||
|
textLength = len(data) - textOffset
|
||||||
|
else:
|
||||||
|
textLength = indextable.extra_indices[i+1][1]
|
||||||
|
text = data[textOffset:textOffset+textLength]
|
||||||
|
assert len(text) == textLength
|
||||||
|
if text:
|
||||||
|
self.extraPrograms[self.extras[extraCode]] = text
|
||||||
|
|
||||||
|
def compile(self, ttFont):
|
||||||
|
data = ''
|
||||||
|
indextable = ttFont[self.indextable]
|
||||||
|
glyphNames = ttFont.getGlyphOrder()
|
||||||
|
|
||||||
|
indices = []
|
||||||
|
for i in range(len(glyphNames)):
|
||||||
|
if len(data) % 2:
|
||||||
|
data = data + "\015" # align on 2-byte boundaries, fill with return chars. Yum.
|
||||||
|
name = glyphNames[i]
|
||||||
|
if self.glyphPrograms.has_key(name):
|
||||||
|
text = self.glyphPrograms[name]
|
||||||
|
else:
|
||||||
|
text = ""
|
||||||
|
textLength = len(text)
|
||||||
|
if textLength >= 0x8000:
|
||||||
|
textLength = 0x8000 # XXX ???
|
||||||
|
indices.append((i, textLength, len(data)))
|
||||||
|
data = data + text
|
||||||
|
|
||||||
|
extra_indices = []
|
||||||
|
codes = self.extras.items()
|
||||||
|
codes.sort()
|
||||||
|
for i in range(len(codes)):
|
||||||
|
if len(data) % 2:
|
||||||
|
data = data + "\015" # align on 2-byte boundaries, fill with return chars.
|
||||||
|
code, name = codes[i]
|
||||||
|
if self.extraPrograms.has_key(name):
|
||||||
|
text = self.extraPrograms[name]
|
||||||
|
else:
|
||||||
|
text = ""
|
||||||
|
textLength = len(text)
|
||||||
|
if textLength >= 0x8000:
|
||||||
|
textLength = 0x8000 # XXX ???
|
||||||
|
extra_indices.append((code, textLength, len(data)))
|
||||||
|
data = data + text
|
||||||
|
indextable.set(indices, extra_indices)
|
||||||
|
return data
|
||||||
|
|
||||||
|
def toXML(self, writer, ttFont):
|
||||||
|
names = self.glyphPrograms.keys()
|
||||||
|
names.sort()
|
||||||
|
writer.newline()
|
||||||
|
for name in names:
|
||||||
|
text = self.glyphPrograms[name]
|
||||||
|
if not text:
|
||||||
|
continue
|
||||||
|
writer.begintag("glyphProgram", name=name)
|
||||||
|
writer.newline()
|
||||||
|
writer.write_noindent(string.replace(text, "\r", "\n"))
|
||||||
|
writer.newline()
|
||||||
|
writer.endtag("glyphProgram")
|
||||||
|
writer.newline()
|
||||||
|
writer.newline()
|
||||||
|
extra_names = self.extraPrograms.keys()
|
||||||
|
extra_names.sort()
|
||||||
|
for name in extra_names:
|
||||||
|
text = self.extraPrograms[name]
|
||||||
|
if not text:
|
||||||
|
continue
|
||||||
|
writer.begintag("extraProgram", name=name)
|
||||||
|
writer.newline()
|
||||||
|
writer.write_noindent(string.replace(text, "\r", "\n"))
|
||||||
|
writer.newline()
|
||||||
|
writer.endtag("extraProgram")
|
||||||
|
writer.newline()
|
||||||
|
writer.newline()
|
||||||
|
|
||||||
|
def fromXML(self, (name, attrs, content), ttFont):
|
||||||
|
if not hasattr(self, "glyphPrograms"):
|
||||||
|
self.glyphPrograms = {}
|
||||||
|
self.extraPrograms = {}
|
||||||
|
lines = string.split(string.replace(string.join(content, ""), "\r", "\n"), "\n")
|
||||||
|
text = string.join(lines[1:-1], "\r")
|
||||||
|
if name == "glyphProgram":
|
||||||
|
self.glyphPrograms[attrs["name"]] = text
|
||||||
|
elif name == "extraProgram":
|
||||||
|
self.extraPrograms[attrs["name"]] = text
|
||||||
|
|
8
Lib/fontTools/ttLib/tables/T_S_I__2.py
Normal file
8
Lib/fontTools/ttLib/tables/T_S_I__2.py
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
from fontTools import ttLib
|
||||||
|
|
||||||
|
superclass = ttLib.getTableClass("TSI0")
|
||||||
|
|
||||||
|
class table_T_S_I__2(superclass):
|
||||||
|
|
||||||
|
dependencies = ["TSI3"]
|
||||||
|
|
11
Lib/fontTools/ttLib/tables/T_S_I__3.py
Normal file
11
Lib/fontTools/ttLib/tables/T_S_I__3.py
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
from fontTools import ttLib
|
||||||
|
|
||||||
|
superclass = ttLib.getTableClass("TSI1")
|
||||||
|
|
||||||
|
class table_T_S_I__3(superclass):
|
||||||
|
|
||||||
|
extras = {0xfffa: "reserved0", 0xfffb: "reserved1", 0xfffc: "reserved2", 0xfffd: "reserved3"}
|
||||||
|
|
||||||
|
indextable = "TSI2"
|
||||||
|
|
||||||
|
|
42
Lib/fontTools/ttLib/tables/T_S_I__5.py
Normal file
42
Lib/fontTools/ttLib/tables/T_S_I__5.py
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
import DefaultTable
|
||||||
|
import array
|
||||||
|
from fontTools import ttLib
|
||||||
|
from fontTools.misc.textTools import safeEval
|
||||||
|
|
||||||
|
|
||||||
|
class table_T_S_I__5(DefaultTable.DefaultTable):
|
||||||
|
|
||||||
|
def decompile(self, data, ttFont):
|
||||||
|
numGlyphs = ttFont['maxp'].numGlyphs
|
||||||
|
assert len(data) == 2 * numGlyphs
|
||||||
|
a = array.array("H")
|
||||||
|
a.fromstring(data)
|
||||||
|
if ttLib.endian <> "big":
|
||||||
|
a.byteswap()
|
||||||
|
self.glyphGrouping = {}
|
||||||
|
for i in range(numGlyphs):
|
||||||
|
self.glyphGrouping[ttFont.getGlyphName(i)] = a[i]
|
||||||
|
|
||||||
|
def compile(self, ttFont):
|
||||||
|
glyphNames = ttFont.getGlyphOrder()
|
||||||
|
a = array.array("H")
|
||||||
|
for i in range(len(glyphNames)):
|
||||||
|
a.append(self.glyphGrouping[glyphNames[i]])
|
||||||
|
if ttLib.endian <> "big":
|
||||||
|
a.byteswap()
|
||||||
|
return a.tostring()
|
||||||
|
|
||||||
|
def toXML(self, writer, ttFont):
|
||||||
|
names = self.glyphGrouping.keys()
|
||||||
|
names.sort()
|
||||||
|
for glyphName in names:
|
||||||
|
writer.simpletag("glyphgroup", name=glyphName, value=self.glyphGrouping[glyphName])
|
||||||
|
writer.newline()
|
||||||
|
|
||||||
|
def fromXML(self, (name, attrs, content), ttFont):
|
||||||
|
if not hasattr(self, "glyphGrouping"):
|
||||||
|
self.glyphGrouping = {}
|
||||||
|
if name <> "glyphgroup":
|
||||||
|
return
|
||||||
|
self.glyphGrouping[attrs["name"]] = safeEval(attrs["value"])
|
||||||
|
|
1
Lib/fontTools/ttLib/tables/__init__.py
Normal file
1
Lib/fontTools/ttLib/tables/__init__.py
Normal file
@ -0,0 +1 @@
|
|||||||
|
# dummy file, so Python knows ttLib.tables is a subpackage
|
397
Lib/fontTools/ttLib/tables/_c_m_a_p.py
Normal file
397
Lib/fontTools/ttLib/tables/_c_m_a_p.py
Normal file
@ -0,0 +1,397 @@
|
|||||||
|
import DefaultTable
|
||||||
|
import struct
|
||||||
|
import string
|
||||||
|
import array
|
||||||
|
from fontTools import ttLib
|
||||||
|
from fontTools.misc.textTools import safeEval, readHex
|
||||||
|
|
||||||
|
|
||||||
|
class table__c_m_a_p(DefaultTable.DefaultTable):
|
||||||
|
|
||||||
|
def getcmap(self, platformID, platEncID):
|
||||||
|
for subtable in self.tables:
|
||||||
|
if (subtable.platformID == platformID and
|
||||||
|
subtable.platEncID == platEncID):
|
||||||
|
return subtable
|
||||||
|
return None # not found
|
||||||
|
|
||||||
|
def decompile(self, data, ttFont):
|
||||||
|
tableVersion, numSubTables = struct.unpack(">HH", data[:4])
|
||||||
|
self.tableVersion = int(tableVersion)
|
||||||
|
self.tables = tables = []
|
||||||
|
for i in range(numSubTables):
|
||||||
|
platformID, platEncID, offset = struct.unpack(
|
||||||
|
">HHl", data[4+i*8:4+(i+1)*8])
|
||||||
|
platformID, platEncID = int(platformID), int(platEncID)
|
||||||
|
format, length = struct.unpack(">HH", data[offset:offset+4])
|
||||||
|
if not cmap_classes.has_key(format):
|
||||||
|
table = cmap_format_unknown(format)
|
||||||
|
else:
|
||||||
|
table = cmap_classes[format](format)
|
||||||
|
table.platformID = platformID
|
||||||
|
table.platEncID = platEncID
|
||||||
|
table.decompile(data[offset:offset+int(length)], ttFont)
|
||||||
|
tables.append(table)
|
||||||
|
|
||||||
|
def compile(self, ttFont):
|
||||||
|
self.tables.sort() # sort according to the spec; see CmapSubtable.__cmp__()
|
||||||
|
numSubTables = len(self.tables)
|
||||||
|
totalOffset = 4 + 8 * numSubTables
|
||||||
|
data = struct.pack(">HH", self.tableVersion, numSubTables)
|
||||||
|
tableData = ""
|
||||||
|
done = {} # remember the data so we can reuse the "pointers"
|
||||||
|
for table in self.tables:
|
||||||
|
chunk = table.compile(ttFont)
|
||||||
|
if done.has_key(chunk):
|
||||||
|
offset = done[chunk]
|
||||||
|
else:
|
||||||
|
offset = done[chunk] = totalOffset + len(tableData)
|
||||||
|
tableData = tableData + chunk
|
||||||
|
data = data + struct.pack(">HHl", table.platformID, table.platEncID, offset)
|
||||||
|
return data + tableData
|
||||||
|
|
||||||
|
def toXML(self, writer, ttFont):
|
||||||
|
writer.simpletag("tableVersion", version=self.tableVersion)
|
||||||
|
writer.newline()
|
||||||
|
for table in self.tables:
|
||||||
|
table.toXML(writer, ttFont)
|
||||||
|
|
||||||
|
def fromXML(self, (name, attrs, content), ttFont):
|
||||||
|
if name == "tableVersion":
|
||||||
|
self.tableVersion = safeEval(attrs["version"])
|
||||||
|
return
|
||||||
|
if name[:12] <> "cmap_format_":
|
||||||
|
return
|
||||||
|
if not hasattr(self, "tables"):
|
||||||
|
self.tables = []
|
||||||
|
format = safeEval(name[12])
|
||||||
|
if not cmap_classes.has_key(format):
|
||||||
|
table = cmap_format_unknown(format)
|
||||||
|
else:
|
||||||
|
table = cmap_classes[format](format)
|
||||||
|
table.platformID = safeEval(attrs["platformID"])
|
||||||
|
table.platEncID = safeEval(attrs["platEncID"])
|
||||||
|
table.fromXML((name, attrs, content), ttFont)
|
||||||
|
self.tables.append(table)
|
||||||
|
|
||||||
|
|
||||||
|
class CmapSubtable:
|
||||||
|
|
||||||
|
def __init__(self, format):
|
||||||
|
self.format = format
|
||||||
|
|
||||||
|
def toXML(self, writer, ttFont):
|
||||||
|
writer.begintag(self.__class__.__name__, [
|
||||||
|
("platformID", self.platformID),
|
||||||
|
("platEncID", self.platEncID),
|
||||||
|
])
|
||||||
|
writer.newline()
|
||||||
|
writer.dumphex(self.compile(ttFont))
|
||||||
|
writer.endtag(self.__class__.__name__)
|
||||||
|
writer.newline()
|
||||||
|
|
||||||
|
def fromXML(self, (name, attrs, content), ttFont):
|
||||||
|
self.decompile(readHex(content), ttFont)
|
||||||
|
|
||||||
|
def __cmp__(self, other):
|
||||||
|
# implemented so that list.sort() sorts according to the cmap spec.
|
||||||
|
selfTuple = (
|
||||||
|
self.platformID,
|
||||||
|
self.platEncID,
|
||||||
|
self.version,
|
||||||
|
self.__dict__)
|
||||||
|
otherTuple = (
|
||||||
|
other.platformID,
|
||||||
|
other.platEncID,
|
||||||
|
other.version,
|
||||||
|
other.__dict__)
|
||||||
|
return cmp(selfTuple, otherTuple)
|
||||||
|
|
||||||
|
|
||||||
|
class cmap_format_0(CmapSubtable):
|
||||||
|
|
||||||
|
def decompile(self, data, ttFont):
|
||||||
|
format, length, version = struct.unpack(">HHH", data[:6])
|
||||||
|
self.version = int(version)
|
||||||
|
assert len(data) == 262 == length
|
||||||
|
glyphIdArray = array.array("B")
|
||||||
|
glyphIdArray.fromstring(data[6:])
|
||||||
|
self.cmap = cmap = {}
|
||||||
|
for charCode in range(len(glyphIdArray)):
|
||||||
|
cmap[charCode] = ttFont.getGlyphName(glyphIdArray[charCode])
|
||||||
|
|
||||||
|
def compile(self, ttFont):
|
||||||
|
charCodes = self.cmap.keys()
|
||||||
|
charCodes.sort()
|
||||||
|
assert charCodes == range(256) # charCodes[charCode] == charCode
|
||||||
|
for charCode in charCodes:
|
||||||
|
# reusing the charCodes list!
|
||||||
|
charCodes[charCode] = ttFont.getGlyphID(self.cmap[charCode])
|
||||||
|
glyphIdArray = array.array("B", charCodes)
|
||||||
|
data = struct.pack(">HHH", 0, 262, self.version) + glyphIdArray.tostring()
|
||||||
|
assert len(data) == 262
|
||||||
|
return data
|
||||||
|
|
||||||
|
def toXML(self, writer, ttFont):
|
||||||
|
writer.begintag(self.__class__.__name__, [
|
||||||
|
("platformID", self.platformID),
|
||||||
|
("platEncID", self.platEncID),
|
||||||
|
("version", self.version),
|
||||||
|
])
|
||||||
|
writer.newline()
|
||||||
|
items = self.cmap.items()
|
||||||
|
items.sort()
|
||||||
|
for code, name in items:
|
||||||
|
writer.simpletag("map", code=hex(code), name=name)
|
||||||
|
writer.newline()
|
||||||
|
writer.endtag(self.__class__.__name__)
|
||||||
|
writer.newline()
|
||||||
|
|
||||||
|
def fromXML(self, (name, attrs, content), ttFont):
|
||||||
|
self.version = safeEval(attrs["version"])
|
||||||
|
self.cmap = {}
|
||||||
|
for element in content:
|
||||||
|
if type(element) <> type(()):
|
||||||
|
continue
|
||||||
|
name, attrs, content = element
|
||||||
|
if name <> "map":
|
||||||
|
continue
|
||||||
|
self.cmap[safeEval(attrs["code"])] = attrs["name"]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class cmap_format_2(CmapSubtable):
|
||||||
|
|
||||||
|
def decompile(self, data, ttFont):
|
||||||
|
format, length, version = struct.unpack(">HHH", data[:6])
|
||||||
|
self.version = int(version)
|
||||||
|
self.data = data
|
||||||
|
|
||||||
|
def compile(self, ttFont):
|
||||||
|
return self.data
|
||||||
|
|
||||||
|
|
||||||
|
cmap_format_4_format = ">7H"
|
||||||
|
|
||||||
|
#uint16 endCode[segCount] # Ending character code for each segment, last = 0xFFFF.
|
||||||
|
#uint16 reservedPad # This value should be zero
|
||||||
|
#uint16 startCode[segCount] # Starting character code for each segment
|
||||||
|
#uint16 idDelta[segCount] # Delta for all character codes in segment
|
||||||
|
#uint16 idRangeOffset[segCount] # Offset in bytes to glyph indexArray, or 0
|
||||||
|
#uint16 glyphIndexArray[variable] # Glyph index array
|
||||||
|
|
||||||
|
class cmap_format_4(CmapSubtable):
|
||||||
|
|
||||||
|
def decompile(self, data, ttFont):
|
||||||
|
(format, length, self.version, segCountX2,
|
||||||
|
searchRange, entrySelector, rangeShift) = \
|
||||||
|
struct.unpack(cmap_format_4_format, data[:14])
|
||||||
|
assert len(data) == length, "corrupt cmap table (%d, %d)" % (len(data), length)
|
||||||
|
data = data[14:]
|
||||||
|
segCountX2 = int(segCountX2)
|
||||||
|
segCount = segCountX2 / 2
|
||||||
|
|
||||||
|
allcodes = array.array("H")
|
||||||
|
allcodes.fromstring(data)
|
||||||
|
if ttLib.endian <> "big":
|
||||||
|
allcodes.byteswap()
|
||||||
|
|
||||||
|
# divide the data
|
||||||
|
endCode = allcodes[:segCount]
|
||||||
|
allcodes = allcodes[segCount+1:]
|
||||||
|
startCode = allcodes[:segCount]
|
||||||
|
allcodes = allcodes[segCount:]
|
||||||
|
idDelta = allcodes[:segCount]
|
||||||
|
allcodes = allcodes[segCount:]
|
||||||
|
idRangeOffset = allcodes[:segCount]
|
||||||
|
glyphIndexArray = allcodes[segCount:]
|
||||||
|
|
||||||
|
# build 2-byte character mapping
|
||||||
|
cmap = {}
|
||||||
|
for i in range(len(startCode) - 1): # don't do 0xffff!
|
||||||
|
for charCode in range(startCode[i], endCode[i] + 1):
|
||||||
|
rangeOffset = idRangeOffset[i]
|
||||||
|
if rangeOffset == 0:
|
||||||
|
glyphID = charCode + idDelta[i]
|
||||||
|
else:
|
||||||
|
# *someone* needs to get killed.
|
||||||
|
index = idRangeOffset[i] / 2 + (charCode - startCode[i]) + i - len(idRangeOffset)
|
||||||
|
if glyphIndexArray[index] <> 0: # if not missing glyph
|
||||||
|
glyphID = glyphIndexArray[index] + idDelta[i]
|
||||||
|
else:
|
||||||
|
glyphID = 0 # missing glyph
|
||||||
|
cmap[charCode] = ttFont.getGlyphName(glyphID % 0x10000)
|
||||||
|
self.cmap = cmap
|
||||||
|
|
||||||
|
def compile(self, ttFont):
|
||||||
|
from fontTools.ttLib.sfnt import maxpoweroftwo
|
||||||
|
|
||||||
|
codes = self.cmap.items()
|
||||||
|
codes.sort()
|
||||||
|
|
||||||
|
# build startCode and endCode lists
|
||||||
|
last = codes[0][0]
|
||||||
|
endCode = []
|
||||||
|
startCode = [last]
|
||||||
|
for charCode, glyphName in codes[1:]: # skip the first code, it's the first start code
|
||||||
|
if charCode == last + 1:
|
||||||
|
last = charCode
|
||||||
|
continue
|
||||||
|
endCode.append(last)
|
||||||
|
startCode.append(charCode)
|
||||||
|
last = charCode
|
||||||
|
endCode.append(last)
|
||||||
|
startCode.append(0xffff)
|
||||||
|
endCode.append(0xffff)
|
||||||
|
|
||||||
|
# build up rest of cruft.
|
||||||
|
idDelta = []
|
||||||
|
idRangeOffset = []
|
||||||
|
glyphIndexArray = []
|
||||||
|
|
||||||
|
for i in range(len(endCode)-1): # skip the closing codes (0xffff)
|
||||||
|
indices = []
|
||||||
|
for charCode in range(startCode[i], endCode[i]+1):
|
||||||
|
indices.append(ttFont.getGlyphID(self.cmap[charCode]))
|
||||||
|
if indices == range(indices[0], indices[0] + len(indices)):
|
||||||
|
idDelta.append((indices[0] - startCode[i]) % 0x10000)
|
||||||
|
idRangeOffset.append(0)
|
||||||
|
else:
|
||||||
|
# someone *definitely* needs to get killed.
|
||||||
|
idDelta.append(0)
|
||||||
|
idRangeOffset.append(2 * (len(endCode) + len(glyphIndexArray) - i))
|
||||||
|
glyphIndexArray = glyphIndexArray + indices
|
||||||
|
idDelta.append(1) # 0xffff + 1 == (tadaa!) 0. So this end code maps to .notdef
|
||||||
|
idRangeOffset.append(0)
|
||||||
|
|
||||||
|
# Insane.
|
||||||
|
segCount = len(endCode)
|
||||||
|
segCountX2 = segCount * 2
|
||||||
|
maxexponent = maxpoweroftwo(segCount)
|
||||||
|
searchRange = 2 * (2 ** maxexponent)
|
||||||
|
entrySelector = maxexponent
|
||||||
|
rangeShift = 2 * segCount - searchRange
|
||||||
|
|
||||||
|
allcodes = array.array("H",
|
||||||
|
endCode + [0] + startCode + idDelta + idRangeOffset + glyphIndexArray)
|
||||||
|
if ttLib.endian <> "big":
|
||||||
|
allcodes.byteswap()
|
||||||
|
data = allcodes.tostring()
|
||||||
|
length = struct.calcsize(cmap_format_4_format) + len(data)
|
||||||
|
header = struct.pack(cmap_format_4_format, self.format, length, self.version,
|
||||||
|
segCountX2, searchRange, entrySelector, rangeShift)
|
||||||
|
return header + data
|
||||||
|
|
||||||
|
def toXML(self, writer, ttFont):
|
||||||
|
from fontTools.unicode import Unicode
|
||||||
|
codes = self.cmap.items()
|
||||||
|
codes.sort()
|
||||||
|
writer.begintag(self.__class__.__name__, [
|
||||||
|
("platformID", self.platformID),
|
||||||
|
("platEncID", self.platEncID),
|
||||||
|
("version", self.version),
|
||||||
|
])
|
||||||
|
writer.newline()
|
||||||
|
|
||||||
|
for code, name in codes:
|
||||||
|
writer.simpletag("map", code=hex(code), name=name)
|
||||||
|
writer.comment(Unicode[code])
|
||||||
|
writer.newline()
|
||||||
|
|
||||||
|
writer.endtag(self.__class__.__name__)
|
||||||
|
writer.newline()
|
||||||
|
|
||||||
|
def fromXML(self, (name, attrs, content), ttFont):
|
||||||
|
self.version = safeEval(attrs["version"])
|
||||||
|
self.cmap = {}
|
||||||
|
for element in content:
|
||||||
|
if type(element) <> type(()):
|
||||||
|
continue
|
||||||
|
name, attrs, content = element
|
||||||
|
if name <> "map":
|
||||||
|
continue
|
||||||
|
self.cmap[safeEval(attrs["code"])] = attrs["name"]
|
||||||
|
|
||||||
|
|
||||||
|
class cmap_format_6(CmapSubtable):
|
||||||
|
|
||||||
|
def decompile(self, data, ttFont):
|
||||||
|
format, length, version, firstCode, entryCount = struct.unpack(
|
||||||
|
">HHHHH", data[:10])
|
||||||
|
self.version = int(version)
|
||||||
|
firstCode = int(firstCode)
|
||||||
|
self.version = int(version)
|
||||||
|
data = data[10:]
|
||||||
|
assert len(data) == 2 * entryCount
|
||||||
|
glyphIndexArray = array.array("H")
|
||||||
|
glyphIndexArray.fromstring(data)
|
||||||
|
if ttLib.endian <> "big":
|
||||||
|
glyphIndexArray.byteswap()
|
||||||
|
self.cmap = cmap = {}
|
||||||
|
for i in range(len(glyphIndexArray)):
|
||||||
|
glyphID = glyphIndexArray[i]
|
||||||
|
glyphName = ttFont.getGlyphName(glyphID)
|
||||||
|
cmap[i+firstCode] = glyphName
|
||||||
|
|
||||||
|
def compile(self, ttFont):
|
||||||
|
codes = self.cmap.keys()
|
||||||
|
codes.sort()
|
||||||
|
assert codes == range(codes[0], codes[0] + len(codes))
|
||||||
|
glyphIndexArray = array.array("H", [0] * len(codes))
|
||||||
|
firstCode = codes[0]
|
||||||
|
for i in range(len(codes)):
|
||||||
|
code = codes[i]
|
||||||
|
glyphIndexArray[code-firstCode] = ttFont.getGlyphID(self.cmap[code])
|
||||||
|
if ttLib.endian <> "big":
|
||||||
|
glyphIndexArray.byteswap()
|
||||||
|
data = glyphIndexArray.tostring()
|
||||||
|
header = struct.pack(">HHHHH",
|
||||||
|
6, len(data) + 10, self.version, firstCode, len(self.cmap))
|
||||||
|
return header + data
|
||||||
|
|
||||||
|
def toXML(self, writer, ttFont):
|
||||||
|
codes = self.cmap.items()
|
||||||
|
codes.sort()
|
||||||
|
writer.begintag(self.__class__.__name__, [
|
||||||
|
("platformID", self.platformID),
|
||||||
|
("platEncID", self.platEncID),
|
||||||
|
("version", self.version),
|
||||||
|
])
|
||||||
|
writer.newline()
|
||||||
|
|
||||||
|
for code, name in codes:
|
||||||
|
writer.simpletag("map", code=hex(code), name=name)
|
||||||
|
writer.newline()
|
||||||
|
|
||||||
|
writer.endtag(self.__class__.__name__)
|
||||||
|
writer.newline()
|
||||||
|
|
||||||
|
def fromXML(self, (name, attrs, content), ttFont):
|
||||||
|
self.version = safeEval(attrs["version"])
|
||||||
|
self.cmap = {}
|
||||||
|
for element in content:
|
||||||
|
if type(element) <> type(()):
|
||||||
|
continue
|
||||||
|
name, attrs, content = element
|
||||||
|
if name <> "map":
|
||||||
|
continue
|
||||||
|
self.cmap[safeEval(attrs["code"])] = attrs["name"]
|
||||||
|
|
||||||
|
|
||||||
|
class cmap_format_unknown(CmapSubtable):
|
||||||
|
|
||||||
|
def decompile(self, data, ttFont):
|
||||||
|
self.data = data
|
||||||
|
|
||||||
|
def compile(self, ttFont):
|
||||||
|
return self.data
|
||||||
|
|
||||||
|
|
||||||
|
cmap_classes = {
|
||||||
|
0: cmap_format_0,
|
||||||
|
2: cmap_format_2,
|
||||||
|
4: cmap_format_4,
|
||||||
|
6: cmap_format_6,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
48
Lib/fontTools/ttLib/tables/_c_v_t.py
Normal file
48
Lib/fontTools/ttLib/tables/_c_v_t.py
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
import DefaultTable
|
||||||
|
import array
|
||||||
|
from fontTools import ttLib
|
||||||
|
from fontTools.misc.textTools import safeEval
|
||||||
|
|
||||||
|
class table__c_v_t(DefaultTable.DefaultTable):
|
||||||
|
|
||||||
|
def decompile(self, data, ttFont):
|
||||||
|
values = array.array("h")
|
||||||
|
values.fromstring(data)
|
||||||
|
if ttLib.endian <> "big":
|
||||||
|
values.byteswap()
|
||||||
|
self.values = values
|
||||||
|
|
||||||
|
def compile(self, ttFont):
|
||||||
|
values = self.values[:]
|
||||||
|
if ttLib.endian <> "big":
|
||||||
|
values.byteswap()
|
||||||
|
return values.tostring()
|
||||||
|
|
||||||
|
def toXML(self, writer, ttFont):
|
||||||
|
for i in range(len(self.values)):
|
||||||
|
value = self.values[i]
|
||||||
|
writer.simpletag("cv", value=value, index=i)
|
||||||
|
writer.newline()
|
||||||
|
|
||||||
|
def fromXML(self, (name, attrs, content), ttFont):
|
||||||
|
if not hasattr(self, "values"):
|
||||||
|
self.values = array.array("h")
|
||||||
|
if name == "cv":
|
||||||
|
index = safeEval(attrs["index"])
|
||||||
|
value = safeEval(attrs["value"])
|
||||||
|
for i in range(1 + index - len(self.values)):
|
||||||
|
self.values.append(0)
|
||||||
|
self.values[index] = value
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
return len(self.values)
|
||||||
|
|
||||||
|
def __getitem__(self, index):
|
||||||
|
return self.values[index]
|
||||||
|
|
||||||
|
def __setitem__(self, index, value):
|
||||||
|
self.values[index] = value
|
||||||
|
|
||||||
|
def __delitem__(self, index):
|
||||||
|
del self.values[index]
|
||||||
|
|
14
Lib/fontTools/ttLib/tables/_f_p_g_m.py
Normal file
14
Lib/fontTools/ttLib/tables/_f_p_g_m.py
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
import DefaultTable
|
||||||
|
import array
|
||||||
|
|
||||||
|
class table__f_p_g_m(DefaultTable.DefaultTable):
|
||||||
|
|
||||||
|
def decompile(self, data, ttFont):
|
||||||
|
self.fpgm = data
|
||||||
|
|
||||||
|
def compile(self, ttFont):
|
||||||
|
return self.fpgm
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
return len(self.fpgm)
|
||||||
|
|
46
Lib/fontTools/ttLib/tables/_g_a_s_p.py
Normal file
46
Lib/fontTools/ttLib/tables/_g_a_s_p.py
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
import DefaultTable
|
||||||
|
import struct
|
||||||
|
from fontTools.misc.textTools import safeEval
|
||||||
|
|
||||||
|
|
||||||
|
GASP_DOGRAY = 0x0002
|
||||||
|
GASP_GRIDFIT = 0x0001
|
||||||
|
|
||||||
|
class table__g_a_s_p(DefaultTable.DefaultTable):
|
||||||
|
|
||||||
|
def decompile(self, data, ttFont):
|
||||||
|
self.version, numRanges = struct.unpack(">HH", data[:4])
|
||||||
|
assert self.version == 0, "unknown 'gasp' format: %s" % self.version
|
||||||
|
data = data[4:]
|
||||||
|
self.gaspRange = {}
|
||||||
|
for i in range(numRanges):
|
||||||
|
rangeMaxPPEM, rangeGaspBehavior = struct.unpack(">HH", data[:4])
|
||||||
|
self.gaspRange[int(rangeMaxPPEM)] = int(rangeGaspBehavior)
|
||||||
|
data = data[4:]
|
||||||
|
assert not data, "too much data"
|
||||||
|
|
||||||
|
def compile(self, ttFont):
|
||||||
|
numRanges = len(self.gaspRange)
|
||||||
|
data = struct.pack(">HH", 0, numRanges)
|
||||||
|
items = self.gaspRange.items()
|
||||||
|
items.sort()
|
||||||
|
for rangeMaxPPEM, rangeGaspBehavior in items:
|
||||||
|
data = data + struct.pack(">HH", rangeMaxPPEM, rangeGaspBehavior)
|
||||||
|
return data
|
||||||
|
|
||||||
|
def toXML(self, writer, ttFont):
|
||||||
|
items = self.gaspRange.items()
|
||||||
|
items.sort()
|
||||||
|
for rangeMaxPPEM, rangeGaspBehavior in items:
|
||||||
|
writer.simpletag("gaspRange", [
|
||||||
|
("rangeMaxPPEM", rangeMaxPPEM),
|
||||||
|
("rangeGaspBehavior", rangeGaspBehavior)])
|
||||||
|
writer.newline()
|
||||||
|
|
||||||
|
def fromXML(self, (name, attrs, content), ttFont):
|
||||||
|
if name <> "gaspRange":
|
||||||
|
return
|
||||||
|
if not hasattr(self, "gaspRange"):
|
||||||
|
self.gaspRange = {}
|
||||||
|
self.gaspRange[safeEval(attrs["rangeMaxPPEM"])] = safeEval(attrs["rangeGaspBehavior"])
|
||||||
|
|
778
Lib/fontTools/ttLib/tables/_g_l_y_f.py
Normal file
778
Lib/fontTools/ttLib/tables/_g_l_y_f.py
Normal file
@ -0,0 +1,778 @@
|
|||||||
|
"""_g_l_y_f.py -- Converter classes for the 'glyf' table."""
|
||||||
|
|
||||||
|
|
||||||
|
#
|
||||||
|
# The Apple and MS rasterizers behave differently for
|
||||||
|
# scaled composite components: one does scale first and then translate
|
||||||
|
# and the other does it vice versa. MS defined some flags to indicate
|
||||||
|
# the difference, but it seems nobody actually _sets_ those flags.
|
||||||
|
#
|
||||||
|
# Funny thing: Apple seems to _only_ do their thing in the
|
||||||
|
# WE_HAVE_A_SCALE (eg. Chicago) case, and not when it's WE_HAVE_AN_X_AND_Y_SCALE
|
||||||
|
# (eg. Charcoal)...
|
||||||
|
#
|
||||||
|
SCALE_COMPONENT_OFFSET_DEFAULT = 0 # 0 == MS, 1 == Apple
|
||||||
|
|
||||||
|
|
||||||
|
import struct, sstruct
|
||||||
|
import DefaultTable
|
||||||
|
from fontTools import ttLib
|
||||||
|
from fontTools.misc.textTools import safeEval, readHex
|
||||||
|
import array
|
||||||
|
import Numeric
|
||||||
|
import types
|
||||||
|
|
||||||
|
class table__g_l_y_f(DefaultTable.DefaultTable):
|
||||||
|
|
||||||
|
def decompile(self, data, ttFont):
|
||||||
|
loca = ttFont['loca']
|
||||||
|
last = loca[0]
|
||||||
|
self.glyphs = {}
|
||||||
|
self.glyphOrder = []
|
||||||
|
self.glyphOrder = glyphOrder = ttFont.getGlyphOrder()
|
||||||
|
for i in range(0, len(loca)-1):
|
||||||
|
glyphName = glyphOrder[i]
|
||||||
|
next = loca[i+1]
|
||||||
|
glyphdata = data[last:next]
|
||||||
|
if len(glyphdata) <> (next - last):
|
||||||
|
raise ttLib.TTLibError, "not enough 'glyf' table data"
|
||||||
|
glyph = Glyph(glyphdata)
|
||||||
|
self.glyphs[glyphName] = glyph
|
||||||
|
last = next
|
||||||
|
if len(data) > next:
|
||||||
|
raise ttLib.TTLibError, "too much 'glyf' table data"
|
||||||
|
|
||||||
|
def compile(self, ttFont):
|
||||||
|
import string
|
||||||
|
locations = []
|
||||||
|
currentLocation = 0
|
||||||
|
dataList = []
|
||||||
|
for glyphName in ttFont.getGlyphOrder():
|
||||||
|
glyph = self[glyphName]
|
||||||
|
glyphData = glyph.compile(self)
|
||||||
|
locations.append(currentLocation)
|
||||||
|
currentLocation = currentLocation + len(glyphData)
|
||||||
|
dataList.append(glyphData)
|
||||||
|
locations.append(currentLocation)
|
||||||
|
data = string.join(dataList, "")
|
||||||
|
ttFont['loca'].set(locations)
|
||||||
|
ttFont['maxp'].numGlyphs = len(self.glyphs)
|
||||||
|
return data
|
||||||
|
|
||||||
|
def toXML(self, writer, ttFont, progress=None, compactGlyphs=0):
|
||||||
|
writer.newline()
|
||||||
|
glyphOrder = ttFont.getGlyphOrder()
|
||||||
|
writer.begintag("GlyphOrder")
|
||||||
|
writer.newline()
|
||||||
|
for i in range(len(glyphOrder)):
|
||||||
|
glyphName = glyphOrder[i]
|
||||||
|
writer.simpletag("GlyphID", id=i, name=glyphName)
|
||||||
|
writer.newline()
|
||||||
|
writer.endtag("GlyphOrder")
|
||||||
|
writer.newline()
|
||||||
|
writer.newline()
|
||||||
|
glyphNames = ttFont.getGlyphNames()
|
||||||
|
writer.comment("The xMin, yMin, xMax and yMax values\nwill be recalculated by the compiler.")
|
||||||
|
writer.newline()
|
||||||
|
writer.newline()
|
||||||
|
for glyphName in glyphNames:
|
||||||
|
if progress:
|
||||||
|
progress.setlabel("Dumping 'glyf' table... (%s)" % glyphName)
|
||||||
|
progress.increment()
|
||||||
|
glyph = self[glyphName]
|
||||||
|
if glyph.numberOfContours:
|
||||||
|
writer.begintag('TTGlyph', [
|
||||||
|
("name", glyphName),
|
||||||
|
("xMin", glyph.xMin),
|
||||||
|
("yMin", glyph.yMin),
|
||||||
|
("xMax", glyph.xMax),
|
||||||
|
("yMax", glyph.yMax),
|
||||||
|
])
|
||||||
|
writer.newline()
|
||||||
|
glyph.toXML(writer, ttFont)
|
||||||
|
if compactGlyphs:
|
||||||
|
glyph.compact(self)
|
||||||
|
writer.endtag('TTGlyph')
|
||||||
|
writer.newline()
|
||||||
|
else:
|
||||||
|
writer.simpletag('TTGlyph', name=glyphName)
|
||||||
|
writer.comment("contains no outline data")
|
||||||
|
writer.newline()
|
||||||
|
writer.newline()
|
||||||
|
|
||||||
|
def fromXML(self, (name, attrs, content), ttFont):
|
||||||
|
if name == "GlyphOrder":
|
||||||
|
glyphOrder = []
|
||||||
|
for element in content:
|
||||||
|
if type(element) == types.StringType:
|
||||||
|
continue
|
||||||
|
name, attrs, content = element
|
||||||
|
if name == "GlyphID":
|
||||||
|
index = safeEval(attrs["id"])
|
||||||
|
glyphName = attrs["name"]
|
||||||
|
glyphOrder = glyphOrder + (1 + index - len(glyphOrder)) * [".notdef"]
|
||||||
|
glyphOrder[index] = glyphName
|
||||||
|
ttFont.setGlyphOrder(glyphOrder)
|
||||||
|
elif name == "TTGlyph":
|
||||||
|
if not hasattr(self, "glyphs"):
|
||||||
|
self.glyphs = {}
|
||||||
|
glyphName = attrs["name"]
|
||||||
|
if ttFont.verbose:
|
||||||
|
ttLib.debugmsg("unpacking glyph '%s'" % glyphName)
|
||||||
|
glyph = Glyph()
|
||||||
|
for attr in ['xMin', 'yMin', 'xMax', 'yMax']:
|
||||||
|
setattr(glyph, attr, safeEval(attrs.get(attr, '0')))
|
||||||
|
self.glyphs[glyphName] = glyph
|
||||||
|
for element in content:
|
||||||
|
if type(element) == types.StringType:
|
||||||
|
continue
|
||||||
|
glyph.fromXML(element, ttFont)
|
||||||
|
|
||||||
|
def setGlyphOrder(self, glyphOrder):
|
||||||
|
self.glyphOrder = glyphOrder
|
||||||
|
|
||||||
|
def getGlyphName(self, glyphID):
|
||||||
|
return self.glyphOrder[glyphID]
|
||||||
|
|
||||||
|
def getGlyphID(self, glyphName):
|
||||||
|
# XXX optimize with reverse dict!!!
|
||||||
|
return self.glyphOrder.index(glyphName)
|
||||||
|
|
||||||
|
#def keys(self):
|
||||||
|
# return self.glyphOrder[:]
|
||||||
|
#
|
||||||
|
#def has_key(self, glyphName):
|
||||||
|
# return self.glyphs.has_key(glyphName)
|
||||||
|
#
|
||||||
|
def __getitem__(self, glyphName):
|
||||||
|
glyph = self.glyphs[glyphName]
|
||||||
|
glyph.expand(self)
|
||||||
|
return glyph
|
||||||
|
|
||||||
|
def __setitem__(self, glyphName, glyph):
|
||||||
|
self.glyphs[glyphName] = glyph
|
||||||
|
if glyphName not in self.glyphOrder:
|
||||||
|
self.glyphOrder.append(glyphName)
|
||||||
|
|
||||||
|
def __delitem__(self, glyphName):
|
||||||
|
del self.glyphs[glyphName]
|
||||||
|
self.glyphOrder.remove(glyphName)
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
assert len(self.glyphOrder) == len(self.glyphs)
|
||||||
|
return len(self.glyphs)
|
||||||
|
|
||||||
|
|
||||||
|
glyphHeaderFormat = """
|
||||||
|
> # big endian
|
||||||
|
numberOfContours: h
|
||||||
|
xMin: h
|
||||||
|
yMin: h
|
||||||
|
xMax: h
|
||||||
|
yMax: h
|
||||||
|
"""
|
||||||
|
|
||||||
|
# flags
|
||||||
|
flagOnCurve = 0x01
|
||||||
|
flagXShort = 0x02
|
||||||
|
flagYShort = 0x04
|
||||||
|
flagRepeat = 0x08
|
||||||
|
flagXsame = 0x10
|
||||||
|
flagYsame = 0x20
|
||||||
|
flagReserved1 = 0x40
|
||||||
|
flagReserved2 = 0x80
|
||||||
|
|
||||||
|
|
||||||
|
ARG_1_AND_2_ARE_WORDS = 0x0001 # if set args are words otherwise they are bytes
|
||||||
|
ARGS_ARE_XY_VALUES = 0x0002 # if set args are xy values, otherwise they are points
|
||||||
|
ROUND_XY_TO_GRID = 0x0004 # for the xy values if above is true
|
||||||
|
WE_HAVE_A_SCALE = 0x0008 # Sx = Sy, otherwise scale == 1.0
|
||||||
|
NON_OVERLAPPING = 0x0010 # set to same value for all components (obsolete!)
|
||||||
|
MORE_COMPONENTS = 0x0020 # indicates at least one more glyph after this one
|
||||||
|
WE_HAVE_AN_X_AND_Y_SCALE = 0x0040 # Sx, Sy
|
||||||
|
WE_HAVE_A_TWO_BY_TWO = 0x0080 # t00, t01, t10, t11
|
||||||
|
WE_HAVE_INSTRUCTIONS = 0x0100 # instructions follow
|
||||||
|
USE_MY_METRICS = 0x0200 # apply these metrics to parent glyph
|
||||||
|
OVERLAP_COMPOUND = 0x0400 # used by Apple in GX fonts
|
||||||
|
SCALED_COMPONENT_OFFSET = 0x0800 # composite designed to have the component offset scaled (designed for Apple)
|
||||||
|
UNSCALED_COMPONENT_OFFSET = 0x1000 # composite designed not to have the component offset scaled (designed for MS)
|
||||||
|
|
||||||
|
|
||||||
|
class Glyph:
|
||||||
|
|
||||||
|
def __init__(self, data=""):
|
||||||
|
if not data:
|
||||||
|
# empty char
|
||||||
|
self.numberOfContours = 0
|
||||||
|
return
|
||||||
|
self.data = data
|
||||||
|
|
||||||
|
def compact(self, glyfTable):
|
||||||
|
data = self.compile(glyfTable)
|
||||||
|
self.__dict__.clear()
|
||||||
|
self.data = data
|
||||||
|
|
||||||
|
def expand(self, glyfTable):
|
||||||
|
if not hasattr(self, "data"):
|
||||||
|
# already unpacked
|
||||||
|
return
|
||||||
|
dummy, data = sstruct.unpack2(glyphHeaderFormat, self.data, self)
|
||||||
|
del self.data
|
||||||
|
if self.numberOfContours == -1:
|
||||||
|
self.decompileComponents(data, glyfTable)
|
||||||
|
else:
|
||||||
|
self.decompileCoordinates(data)
|
||||||
|
|
||||||
|
def compile(self, glyfTable):
|
||||||
|
if hasattr(self, "data"):
|
||||||
|
return self.data
|
||||||
|
if self.numberOfContours == 0:
|
||||||
|
return ""
|
||||||
|
self.recalcBounds(glyfTable)
|
||||||
|
data = sstruct.pack(glyphHeaderFormat, self)
|
||||||
|
if self.numberOfContours == -1:
|
||||||
|
data = data + self.compileComponents(glyfTable)
|
||||||
|
else:
|
||||||
|
data = data + self.compileCoordinates()
|
||||||
|
# from the spec: "Note that the local offsets should be word-aligned"
|
||||||
|
if len(data) % 2:
|
||||||
|
# ...so if the length of the data is odd, append a null byte
|
||||||
|
data = data + "\0"
|
||||||
|
return data
|
||||||
|
|
||||||
|
def toXML(self, writer, ttFont):
|
||||||
|
if self.numberOfContours == -1:
|
||||||
|
for compo in self.components:
|
||||||
|
compo.toXML(writer, ttFont)
|
||||||
|
if hasattr(self, "instructions"):
|
||||||
|
writer.begintag("instructions")
|
||||||
|
writer.newline()
|
||||||
|
writer.dumphex(self.instructions)
|
||||||
|
writer.endtag("instructions")
|
||||||
|
writer.newline()
|
||||||
|
else:
|
||||||
|
last = 0
|
||||||
|
for i in range(self.numberOfContours):
|
||||||
|
writer.begintag("contour")
|
||||||
|
writer.newline()
|
||||||
|
for j in range(last, self.endPtsOfContours[i] + 1):
|
||||||
|
writer.simpletag("pt", [
|
||||||
|
("x", self.coordinates[j][0]),
|
||||||
|
("y", self.coordinates[j][1]),
|
||||||
|
("on", self.flags[j] & flagOnCurve)])
|
||||||
|
writer.newline()
|
||||||
|
last = self.endPtsOfContours[i] + 1
|
||||||
|
writer.endtag("contour")
|
||||||
|
writer.newline()
|
||||||
|
if self.numberOfContours:
|
||||||
|
writer.begintag("instructions")
|
||||||
|
writer.newline()
|
||||||
|
writer.dumphex(self.instructions)
|
||||||
|
writer.endtag("instructions")
|
||||||
|
writer.newline()
|
||||||
|
|
||||||
|
def fromXML(self, (name, attrs, content), ttFont):
|
||||||
|
if name == "contour":
|
||||||
|
self.numberOfContours = self.numberOfContours + 1
|
||||||
|
if self.numberOfContours < 0:
|
||||||
|
raise ttLib.TTLibError, "can't mix composites and contours in glyph"
|
||||||
|
coordinates = []
|
||||||
|
flags = []
|
||||||
|
for element in content:
|
||||||
|
if type(element) == types.StringType:
|
||||||
|
continue
|
||||||
|
name, attrs, content = element
|
||||||
|
if name <> "pt":
|
||||||
|
continue # ignore anything but "pt"
|
||||||
|
coordinates.append([safeEval(attrs["x"]), safeEval(attrs["y"])])
|
||||||
|
flags.append(not not safeEval(attrs["on"]))
|
||||||
|
coordinates = Numeric.array(coordinates, Numeric.Int16)
|
||||||
|
flags = Numeric.array(flags, Numeric.Int8)
|
||||||
|
if not hasattr(self, "coordinates"):
|
||||||
|
self.coordinates = coordinates
|
||||||
|
self.flags = flags
|
||||||
|
self.endPtsOfContours = [len(coordinates)-1]
|
||||||
|
else:
|
||||||
|
self.coordinates = Numeric.concatenate((self.coordinates, coordinates))
|
||||||
|
self.flags = Numeric.concatenate((self.flags, flags))
|
||||||
|
self.endPtsOfContours.append(len(self.coordinates)-1)
|
||||||
|
elif name == "component":
|
||||||
|
if self.numberOfContours > 0:
|
||||||
|
raise ttLib.TTLibError, "can't mix composites and contours in glyph"
|
||||||
|
self.numberOfContours = -1
|
||||||
|
if not hasattr(self, "components"):
|
||||||
|
self.components = []
|
||||||
|
component = GlyphComponent()
|
||||||
|
self.components.append(component)
|
||||||
|
component.fromXML((name, attrs, content), ttFont)
|
||||||
|
elif name == "instructions":
|
||||||
|
self.instructions = readHex(content)
|
||||||
|
|
||||||
|
def getCompositeMaxpValues(self, glyfTable, maxComponentDepth=1):
|
||||||
|
assert self.numberOfContours == -1
|
||||||
|
nContours = 0
|
||||||
|
nPoints = 0
|
||||||
|
for compo in self.components:
|
||||||
|
baseGlyph = glyfTable[compo.glyphName]
|
||||||
|
if baseGlyph.numberOfContours == 0:
|
||||||
|
continue
|
||||||
|
elif baseGlyph.numberOfContours > 0:
|
||||||
|
nP, nC = baseGlyph.getMaxpValues()
|
||||||
|
else:
|
||||||
|
nP, nC, maxComponentDepth = baseGlyph.getCompositeMaxpValues(
|
||||||
|
glyfTable, maxComponentDepth + 1)
|
||||||
|
nPoints = nPoints + nP
|
||||||
|
nContours = nContours + nC
|
||||||
|
return nPoints, nContours, maxComponentDepth
|
||||||
|
|
||||||
|
def getMaxpValues(self):
|
||||||
|
assert self.numberOfContours > 0
|
||||||
|
return len(self.coordinates), len(self.endPtsOfContours)
|
||||||
|
|
||||||
|
def decompileComponents(self, data, glyfTable):
|
||||||
|
self.components = []
|
||||||
|
more = 1
|
||||||
|
haveInstructions = 0
|
||||||
|
while more:
|
||||||
|
component = GlyphComponent()
|
||||||
|
more, haveInstr, data = component.decompile(data, glyfTable)
|
||||||
|
haveInstructions = haveInstructions | haveInstr
|
||||||
|
self.components.append(component)
|
||||||
|
if haveInstructions:
|
||||||
|
numInstructions, = struct.unpack(">h", data[:2])
|
||||||
|
data = data[2:]
|
||||||
|
self.instructions = data[:numInstructions]
|
||||||
|
data = data[numInstructions:]
|
||||||
|
assert len(data) in (0, 1), "bad composite data"
|
||||||
|
|
||||||
|
def decompileCoordinates(self, data):
|
||||||
|
endPtsOfContours = array.array("h")
|
||||||
|
endPtsOfContours.fromstring(data[:2*self.numberOfContours])
|
||||||
|
if ttLib.endian <> "big":
|
||||||
|
endPtsOfContours.byteswap()
|
||||||
|
self.endPtsOfContours = endPtsOfContours.tolist()
|
||||||
|
|
||||||
|
data = data[2*self.numberOfContours:]
|
||||||
|
|
||||||
|
instructionLength, = struct.unpack(">h", data[:2])
|
||||||
|
data = data[2:]
|
||||||
|
self.instructions = data[:instructionLength]
|
||||||
|
data = data[instructionLength:]
|
||||||
|
nCoordinates = self.endPtsOfContours[-1] + 1
|
||||||
|
flags, xCoordinates, yCoordinates = \
|
||||||
|
self.decompileCoordinatesRaw(nCoordinates, data)
|
||||||
|
|
||||||
|
# fill in repetitions and apply signs
|
||||||
|
coordinates = Numeric.zeros((nCoordinates, 2), Numeric.Int16)
|
||||||
|
xIndex = 0
|
||||||
|
yIndex = 0
|
||||||
|
for i in range(nCoordinates):
|
||||||
|
flag = flags[i]
|
||||||
|
# x coordinate
|
||||||
|
if flag & flagXShort:
|
||||||
|
if flag & flagXsame:
|
||||||
|
x = xCoordinates[xIndex]
|
||||||
|
else:
|
||||||
|
x = -xCoordinates[xIndex]
|
||||||
|
xIndex = xIndex + 1
|
||||||
|
elif flag & flagXsame:
|
||||||
|
x = 0
|
||||||
|
else:
|
||||||
|
x = xCoordinates[xIndex]
|
||||||
|
xIndex = xIndex + 1
|
||||||
|
# y coordinate
|
||||||
|
if flag & flagYShort:
|
||||||
|
if flag & flagYsame:
|
||||||
|
y = yCoordinates[yIndex]
|
||||||
|
else:
|
||||||
|
y = -yCoordinates[yIndex]
|
||||||
|
yIndex = yIndex + 1
|
||||||
|
elif flag & flagYsame:
|
||||||
|
y = 0
|
||||||
|
else:
|
||||||
|
y = yCoordinates[yIndex]
|
||||||
|
yIndex = yIndex + 1
|
||||||
|
coordinates[i] = (x, y)
|
||||||
|
assert xIndex == len(xCoordinates)
|
||||||
|
assert yIndex == len(yCoordinates)
|
||||||
|
# convert relative to absolute coordinates
|
||||||
|
self.coordinates = Numeric.add.accumulate(coordinates)
|
||||||
|
# discard all flags but for "flagOnCurve"
|
||||||
|
if hasattr(Numeric, "__version__"):
|
||||||
|
self.flags = Numeric.bitwise_and(flags, flagOnCurve).astype(Numeric.Int8)
|
||||||
|
else:
|
||||||
|
self.flags = Numeric.boolean_and(flags, flagOnCurve).astype(Numeric.Int8)
|
||||||
|
|
||||||
|
def decompileCoordinatesRaw(self, nCoordinates, data):
|
||||||
|
# unpack flags and prepare unpacking of coordinates
|
||||||
|
flags = Numeric.array([0] * nCoordinates, Numeric.Int8)
|
||||||
|
# Warning: deep Python trickery going on. We use the struct module to unpack
|
||||||
|
# the coordinates. We build a format string based on the flags, so we can
|
||||||
|
# unpack the coordinates in one struct.unpack() call.
|
||||||
|
xFormat = ">" # big endian
|
||||||
|
yFormat = ">" # big endian
|
||||||
|
i = j = 0
|
||||||
|
while 1:
|
||||||
|
flag = ord(data[i])
|
||||||
|
i = i + 1
|
||||||
|
repeat = 1
|
||||||
|
if flag & flagRepeat:
|
||||||
|
repeat = ord(data[i]) + 1
|
||||||
|
i = i + 1
|
||||||
|
for k in range(repeat):
|
||||||
|
if flag & flagXShort:
|
||||||
|
xFormat = xFormat + 'B'
|
||||||
|
elif not (flag & flagXsame):
|
||||||
|
xFormat = xFormat + 'h'
|
||||||
|
if flag & flagYShort:
|
||||||
|
yFormat = yFormat + 'B'
|
||||||
|
elif not (flag & flagYsame):
|
||||||
|
yFormat = yFormat + 'h'
|
||||||
|
flags[j] = flag
|
||||||
|
j = j + 1
|
||||||
|
if j >= nCoordinates:
|
||||||
|
break
|
||||||
|
assert j == nCoordinates, "bad glyph flags"
|
||||||
|
data = data[i:]
|
||||||
|
# unpack raw coordinates, krrrrrr-tching!
|
||||||
|
xDataLen = struct.calcsize(xFormat)
|
||||||
|
yDataLen = struct.calcsize(yFormat)
|
||||||
|
if (len(data) - (xDataLen + yDataLen)) not in (0, 1):
|
||||||
|
raise ttLib.TTLibError, "bad glyph record"
|
||||||
|
xCoordinates = struct.unpack(xFormat, data[:xDataLen])
|
||||||
|
yCoordinates = struct.unpack(yFormat, data[xDataLen:xDataLen+yDataLen])
|
||||||
|
return flags, xCoordinates, yCoordinates
|
||||||
|
|
||||||
|
def compileComponents(self, glyfTable):
|
||||||
|
data = ""
|
||||||
|
lastcomponent = len(self.components) - 1
|
||||||
|
more = 1
|
||||||
|
haveInstructions = 0
|
||||||
|
for i in range(len(self.components)):
|
||||||
|
if i == lastcomponent:
|
||||||
|
haveInstructions = hasattr(self, "instructions")
|
||||||
|
more = 0
|
||||||
|
compo = self.components[i]
|
||||||
|
data = data + compo.compile(more, haveInstructions, glyfTable)
|
||||||
|
if haveInstructions:
|
||||||
|
data = data + struct.pack(">h", len(self.instructions)) + self.instructions
|
||||||
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
def compileCoordinates(self):
|
||||||
|
assert len(self.coordinates) == len(self.flags)
|
||||||
|
data = ""
|
||||||
|
endPtsOfContours = array.array("h", self.endPtsOfContours)
|
||||||
|
if ttLib.endian <> "big":
|
||||||
|
endPtsOfContours.byteswap()
|
||||||
|
data = data + endPtsOfContours.tostring()
|
||||||
|
data = data + struct.pack(">h", len(self.instructions))
|
||||||
|
data = data + self.instructions
|
||||||
|
nCoordinates = len(self.coordinates)
|
||||||
|
|
||||||
|
# make a copy
|
||||||
|
coordinates = self.coordinates.astype(self.coordinates.typecode())
|
||||||
|
# absolute to relative coordinates
|
||||||
|
coordinates[1:] = Numeric.subtract(coordinates[1:], coordinates[:-1])
|
||||||
|
flags = self.flags
|
||||||
|
compressedflags = []
|
||||||
|
xPoints = []
|
||||||
|
yPoints = []
|
||||||
|
xFormat = ">"
|
||||||
|
yFormat = ">"
|
||||||
|
lastflag = None
|
||||||
|
repeat = 0
|
||||||
|
for i in range(len(coordinates)):
|
||||||
|
# Oh, the horrors of TrueType
|
||||||
|
flag = self.flags[i]
|
||||||
|
x, y = coordinates[i]
|
||||||
|
# do x
|
||||||
|
if x == 0:
|
||||||
|
flag = flag | flagXsame
|
||||||
|
elif -255 <= x <= 255:
|
||||||
|
flag = flag | flagXShort
|
||||||
|
if x > 0:
|
||||||
|
flag = flag | flagXsame
|
||||||
|
else:
|
||||||
|
x = -x
|
||||||
|
xPoints.append(x)
|
||||||
|
xFormat = xFormat + 'B'
|
||||||
|
else:
|
||||||
|
xPoints.append(x)
|
||||||
|
xFormat = xFormat + 'h'
|
||||||
|
# do y
|
||||||
|
if y == 0:
|
||||||
|
flag = flag | flagYsame
|
||||||
|
elif -255 <= y <= 255:
|
||||||
|
flag = flag | flagYShort
|
||||||
|
if y > 0:
|
||||||
|
flag = flag | flagYsame
|
||||||
|
else:
|
||||||
|
y = -y
|
||||||
|
yPoints.append(y)
|
||||||
|
yFormat = yFormat + 'B'
|
||||||
|
else:
|
||||||
|
yPoints.append(y)
|
||||||
|
yFormat = yFormat + 'h'
|
||||||
|
# handle repeating flags
|
||||||
|
if flag == lastflag:
|
||||||
|
repeat = repeat + 1
|
||||||
|
if repeat == 1:
|
||||||
|
compressedflags.append(flag)
|
||||||
|
elif repeat > 1:
|
||||||
|
compressedflags[-2] = flag | flagRepeat
|
||||||
|
compressedflags[-1] = repeat
|
||||||
|
else:
|
||||||
|
compressedflags[-1] = repeat
|
||||||
|
else:
|
||||||
|
repeat = 0
|
||||||
|
compressedflags.append(flag)
|
||||||
|
lastflag = flag
|
||||||
|
data = data + array.array("B", compressedflags).tostring()
|
||||||
|
data = data + apply(struct.pack, (xFormat,)+tuple(xPoints))
|
||||||
|
data = data + apply(struct.pack, (yFormat,)+tuple(yPoints))
|
||||||
|
return data
|
||||||
|
|
||||||
|
def recalcBounds(self, glyfTable):
|
||||||
|
coordinates, endPts, flags = self.getCoordinates(glyfTable)
|
||||||
|
self.xMin, self.yMin = Numeric.minimum.reduce(coordinates)
|
||||||
|
self.xMax, self.yMax = Numeric.maximum.reduce(coordinates)
|
||||||
|
|
||||||
|
def getCoordinates(self, glyfTable):
|
||||||
|
if self.numberOfContours > 0:
|
||||||
|
return self.coordinates, self.endPtsOfContours, self.flags
|
||||||
|
elif self.numberOfContours == -1:
|
||||||
|
# it's a composite
|
||||||
|
allCoords = None
|
||||||
|
allFlags = None
|
||||||
|
allEndPts = None
|
||||||
|
for compo in self.components:
|
||||||
|
g = glyfTable[compo.glyphName]
|
||||||
|
coordinates, endPts, flags = g.getCoordinates(glyfTable)
|
||||||
|
if hasattr(compo, "firstpt"):
|
||||||
|
# move according to two reference points
|
||||||
|
move = allCoords[compo.firstpt] - coordinates[compo.secondpt]
|
||||||
|
else:
|
||||||
|
move = compo.x, compo.y
|
||||||
|
|
||||||
|
if not hasattr(compo, "transform"):
|
||||||
|
coordinates = coordinates + move # I love NumPy!
|
||||||
|
else:
|
||||||
|
apple_way = compo.flags & SCALED_COMPONENT_OFFSET
|
||||||
|
ms_way = compo.flags & UNSCALED_COMPONENT_OFFSET
|
||||||
|
assert not (apple_way and ms_way)
|
||||||
|
if not (apple_way or ms_way):
|
||||||
|
scale_component_offset = SCALE_COMPONENT_OFFSET_DEFAULT # see top of this file
|
||||||
|
else:
|
||||||
|
scale_component_offset = apple_way
|
||||||
|
if scale_component_offset:
|
||||||
|
# the Apple way: first move, then scale (ie. scale the component offset)
|
||||||
|
coordinates = coordinates + move
|
||||||
|
coordinates = Numeric.dot(coordinates, compo.transform)
|
||||||
|
else:
|
||||||
|
# the MS way: first scale, then move
|
||||||
|
coordinates = Numeric.dot(coordinates, compo.transform)
|
||||||
|
coordinates = coordinates + move
|
||||||
|
# due to the transformation the coords. are now floats;
|
||||||
|
# round them off nicely, and cast to short
|
||||||
|
coordinates = Numeric.floor(coordinates + 0.5).astype(Numeric.Int16)
|
||||||
|
if allCoords is None:
|
||||||
|
allCoords = coordinates
|
||||||
|
allEndPts = endPts
|
||||||
|
allFlags = flags
|
||||||
|
else:
|
||||||
|
allEndPts = allEndPts + (Numeric.array(endPts) + len(allCoords)).tolist()
|
||||||
|
allCoords = Numeric.concatenate((allCoords, coordinates))
|
||||||
|
allFlags = Numeric.concatenate((allFlags, flags))
|
||||||
|
return allCoords, allEndPts, allFlags
|
||||||
|
else:
|
||||||
|
return Numeric.array([], Numeric.Int16), [], Numeric.array([], Numeric.Int8)
|
||||||
|
|
||||||
|
def __cmp__(self, other):
|
||||||
|
if self.numberOfContours <= 0:
|
||||||
|
return cmp(self.__dict__, other.__dict__)
|
||||||
|
else:
|
||||||
|
if cmp(len(self.coordinates), len(other.coordinates)):
|
||||||
|
return 1
|
||||||
|
ctest = Numeric.alltrue(Numeric.alltrue(Numeric.equal(self.coordinates, other.coordinates)))
|
||||||
|
ftest = Numeric.alltrue(Numeric.equal(self.flags, other.flags))
|
||||||
|
if not ctest or not ftest:
|
||||||
|
return 1
|
||||||
|
return (
|
||||||
|
cmp(self.endPtsOfContours, other.endPtsOfContours) or
|
||||||
|
cmp(self.instructions, other.instructions)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class GlyphComponent:
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def decompile(self, data, glyfTable):
|
||||||
|
flags, glyphID = struct.unpack(">HH", data[:4])
|
||||||
|
self.flags = int(flags)
|
||||||
|
glyphID = int(glyphID)
|
||||||
|
self.glyphName = glyfTable.getGlyphName(int(glyphID))
|
||||||
|
#print ">>", reprflag(self.flags)
|
||||||
|
data = data[4:]
|
||||||
|
|
||||||
|
if self.flags & ARG_1_AND_2_ARE_WORDS:
|
||||||
|
if self.flags & ARGS_ARE_XY_VALUES:
|
||||||
|
self.x, self.y = struct.unpack(">hh", data[:4])
|
||||||
|
else:
|
||||||
|
x, y = struct.unpack(">HH", data[:4])
|
||||||
|
self.firstpt, self.secondpt = int(x), int(y)
|
||||||
|
data = data[4:]
|
||||||
|
else:
|
||||||
|
if self.flags & ARGS_ARE_XY_VALUES:
|
||||||
|
self.x, self.y = struct.unpack(">bb", data[:2])
|
||||||
|
else:
|
||||||
|
x, y = struct.unpack(">BB", data[:4])
|
||||||
|
self.firstpt, self.secondpt = int(x), int(y)
|
||||||
|
data = data[2:]
|
||||||
|
|
||||||
|
if self.flags & WE_HAVE_A_SCALE:
|
||||||
|
scale, = struct.unpack(">h", data[:2])
|
||||||
|
self.transform = Numeric.array(
|
||||||
|
[[scale, 0], [0, scale]]) / float(0x4000) # fixed 2.14
|
||||||
|
data = data[2:]
|
||||||
|
elif self.flags & WE_HAVE_AN_X_AND_Y_SCALE:
|
||||||
|
xscale, yscale = struct.unpack(">hh", data[:4])
|
||||||
|
self.transform = Numeric.array(
|
||||||
|
[[xscale, 0], [0, yscale]]) / float(0x4000) # fixed 2.14
|
||||||
|
data = data[4:]
|
||||||
|
elif self.flags & WE_HAVE_A_TWO_BY_TWO:
|
||||||
|
(xscale, scale01,
|
||||||
|
scale10, yscale) = struct.unpack(">hhhh", data[:8])
|
||||||
|
self.transform = Numeric.array(
|
||||||
|
[[xscale, scale01], [scale10, yscale]]) / float(0x4000) # fixed 2.14
|
||||||
|
data = data[8:]
|
||||||
|
more = self.flags & MORE_COMPONENTS
|
||||||
|
haveInstructions = self.flags & WE_HAVE_INSTRUCTIONS
|
||||||
|
self.flags = self.flags & (ROUND_XY_TO_GRID | USE_MY_METRICS |
|
||||||
|
SCALED_COMPONENT_OFFSET | UNSCALED_COMPONENT_OFFSET |
|
||||||
|
NON_OVERLAPPING)
|
||||||
|
return more, haveInstructions, data
|
||||||
|
|
||||||
|
def compile(self, more, haveInstructions, glyfTable):
|
||||||
|
data = ""
|
||||||
|
|
||||||
|
# reset all flags we will calculate ourselves
|
||||||
|
flags = self.flags & (ROUND_XY_TO_GRID | USE_MY_METRICS |
|
||||||
|
SCALED_COMPONENT_OFFSET | UNSCALED_COMPONENT_OFFSET |
|
||||||
|
NON_OVERLAPPING)
|
||||||
|
if more:
|
||||||
|
flags = flags | MORE_COMPONENTS
|
||||||
|
if haveInstructions:
|
||||||
|
flags = flags | WE_HAVE_INSTRUCTIONS
|
||||||
|
|
||||||
|
if hasattr(self, "firstpt"):
|
||||||
|
if (0 <= self.firstpt <= 255) and (0 <= self.secondpt <= 255):
|
||||||
|
data = data + struct.pack(">BB", self.firstpt, self.secondpt)
|
||||||
|
else:
|
||||||
|
data = data + struct.pack(">HH", self.firstpt, self.secondpt)
|
||||||
|
flags = flags | ARG_1_AND_2_ARE_WORDS
|
||||||
|
else:
|
||||||
|
flags = flags | ARGS_ARE_XY_VALUES
|
||||||
|
if (-128 <= self.x <= 127) and (-128 <= self.y <= 127):
|
||||||
|
data = data + struct.pack(">bb", self.x, self.y)
|
||||||
|
else:
|
||||||
|
data = data + struct.pack(">hh", self.x, self.y)
|
||||||
|
flags = flags | ARG_1_AND_2_ARE_WORDS
|
||||||
|
|
||||||
|
if hasattr(self, "transform"):
|
||||||
|
# XXX needs more testing
|
||||||
|
transform = Numeric.floor(self.transform * 0x4000 + 0.5)
|
||||||
|
if transform[0][1] or transform[1][0]:
|
||||||
|
flags = flags | WE_HAVE_A_TWO_BY_TWO
|
||||||
|
data = data + struct.pack(">hhhh",
|
||||||
|
transform[0][0], transform[0][1],
|
||||||
|
transform[1][0], transform[1][1])
|
||||||
|
elif transform[0][0] <> transform[1][1]:
|
||||||
|
flags = flags | WE_HAVE_AN_X_AND_Y_SCALE
|
||||||
|
data = data + struct.pack(">hh",
|
||||||
|
transform[0][0], transform[1][1])
|
||||||
|
else:
|
||||||
|
flags = flags | WE_HAVE_A_SCALE
|
||||||
|
data = data + struct.pack(">h",
|
||||||
|
transform[0][0])
|
||||||
|
|
||||||
|
glyphID = glyfTable.getGlyphID(self.glyphName)
|
||||||
|
return struct.pack(">HH", flags, glyphID) + data
|
||||||
|
|
||||||
|
def toXML(self, writer, ttFont):
|
||||||
|
attrs = [("glyphName", self.glyphName)]
|
||||||
|
if not hasattr(self, "firstpt"):
|
||||||
|
attrs = attrs + [("x", self.x), ("y", self.y)]
|
||||||
|
else:
|
||||||
|
attrs = attrs + [("firstpt", self.firstpt), ("secondpt", self.secondpt)]
|
||||||
|
|
||||||
|
if hasattr(self, "transform"):
|
||||||
|
# XXX needs more testing
|
||||||
|
transform = self.transform
|
||||||
|
if transform[0][1] or transform[1][0]:
|
||||||
|
attrs = attrs + [
|
||||||
|
("scalex", transform[0][0]), ("scale01", transform[0][1]),
|
||||||
|
("scale10", transform[1][0]), ("scaley", transform[1][1]),
|
||||||
|
]
|
||||||
|
elif transform[0][0] <> transform[1][1]:
|
||||||
|
attrs = attrs + [
|
||||||
|
("scalex", transform[0][0]), ("scaley", transform[1][1]),
|
||||||
|
]
|
||||||
|
else:
|
||||||
|
attrs = attrs + [("scale", transform[0][0])]
|
||||||
|
attrs = attrs + [("flags", hex(self.flags))]
|
||||||
|
writer.simpletag("component", attrs)
|
||||||
|
writer.newline()
|
||||||
|
|
||||||
|
def fromXML(self, (name, attrs, content), ttFont):
|
||||||
|
self.glyphName = attrs["glyphName"]
|
||||||
|
if attrs.has_key("firstpt"):
|
||||||
|
self.firstpt = safeEval(attrs["firstpt"])
|
||||||
|
self.secondpt = safeEval(attrs["secondpt"])
|
||||||
|
else:
|
||||||
|
self.x = safeEval(attrs["x"])
|
||||||
|
self.y = safeEval(attrs["y"])
|
||||||
|
if attrs.has_key("scale01"):
|
||||||
|
scalex = safeEval(attrs["scalex"])
|
||||||
|
scale01 = safeEval(attrs["scale01"])
|
||||||
|
scale10 = safeEval(attrs["scale10"])
|
||||||
|
scaley = safeEval(attrs["scaley"])
|
||||||
|
self.transform = Numeric.array([[scalex, scale01], [scale10, scaley]])
|
||||||
|
elif attrs.has_key("scalex"):
|
||||||
|
scalex = safeEval(attrs["scalex"])
|
||||||
|
scaley = safeEval(attrs["scaley"])
|
||||||
|
self.transform = Numeric.array([[scalex, 0], [0, scaley]])
|
||||||
|
elif attrs.has_key("scale"):
|
||||||
|
scale = safeEval(attrs["scale"])
|
||||||
|
self.transform = Numeric.array([[scale, 0], [0, scale]])
|
||||||
|
self.flags = safeEval(attrs["flags"])
|
||||||
|
|
||||||
|
def __cmp__(self, other):
|
||||||
|
if hasattr(self, "transform"):
|
||||||
|
if Numeric.alltrue(Numeric.equal(self.transform, other.transform)):
|
||||||
|
selfdict = self.__dict__.copy()
|
||||||
|
otherdict = other.__dict__.copy()
|
||||||
|
del selfdict["transform"]
|
||||||
|
del otherdict["transform"]
|
||||||
|
return cmp(selfdict, otherdict)
|
||||||
|
else:
|
||||||
|
return 1
|
||||||
|
else:
|
||||||
|
return cmp(self.__dict__, other.__dict__)
|
||||||
|
|
||||||
|
|
||||||
|
def reprflag(flag):
|
||||||
|
bin = ""
|
||||||
|
if type(flag) == types.StringType:
|
||||||
|
flag = ord(flag)
|
||||||
|
while flag:
|
||||||
|
if flag & 0x01:
|
||||||
|
bin = "1" + bin
|
||||||
|
else:
|
||||||
|
bin = "0" + bin
|
||||||
|
flag = flag >> 1
|
||||||
|
bin = (14 - len(bin)) * "0" + bin
|
||||||
|
return bin
|
||||||
|
|
95
Lib/fontTools/ttLib/tables/_h_d_m_x.py
Normal file
95
Lib/fontTools/ttLib/tables/_h_d_m_x.py
Normal file
@ -0,0 +1,95 @@
|
|||||||
|
import DefaultTable
|
||||||
|
import sstruct
|
||||||
|
import string
|
||||||
|
|
||||||
|
hdmxHeaderFormat = """
|
||||||
|
version: H
|
||||||
|
numRecords: H
|
||||||
|
recordSize: l
|
||||||
|
"""
|
||||||
|
|
||||||
|
class table__h_d_m_x(DefaultTable.DefaultTable):
|
||||||
|
|
||||||
|
def decompile(self, data, ttFont):
|
||||||
|
numGlyphs = ttFont['maxp'].numGlyphs
|
||||||
|
glyphOrder = ttFont.getGlyphOrder()
|
||||||
|
dummy, data = sstruct.unpack2(hdmxHeaderFormat, data, self)
|
||||||
|
self.hdmx = {}
|
||||||
|
for i in range(self.numRecords):
|
||||||
|
ppem = ord(data[0])
|
||||||
|
maxSize = ord(data[1])
|
||||||
|
widths = {}
|
||||||
|
for glyphID in range(numGlyphs):
|
||||||
|
widths[glyphOrder[glyphID]] = ord(data[glyphID+2])
|
||||||
|
self.hdmx[ppem] = widths
|
||||||
|
data = data[self.recordSize:]
|
||||||
|
assert len(data) == 0, "too much hdmx data"
|
||||||
|
|
||||||
|
def compile(self, ttFont):
|
||||||
|
self.version = 0
|
||||||
|
numGlyphs = ttFont['maxp'].numGlyphs
|
||||||
|
glyphOrder = ttFont.getGlyphOrder()
|
||||||
|
self.recordSize = 4 * ((2 + numGlyphs + 3) / 4)
|
||||||
|
pad = (self.recordSize - 2 - numGlyphs) * "\0"
|
||||||
|
self.numRecords = len(self.hdmx)
|
||||||
|
data = sstruct.pack(hdmxHeaderFormat, self)
|
||||||
|
items = self.hdmx.items()
|
||||||
|
items.sort()
|
||||||
|
for ppem, widths in items:
|
||||||
|
data = data + chr(ppem) + chr(max(widths.values()))
|
||||||
|
for glyphID in range(len(glyphOrder)):
|
||||||
|
width = widths[glyphOrder[glyphID]]
|
||||||
|
data = data + chr(width)
|
||||||
|
data = data + pad
|
||||||
|
return data
|
||||||
|
|
||||||
|
def toXML(self, writer, ttFont):
|
||||||
|
writer.begintag("hdmxData")
|
||||||
|
writer.newline()
|
||||||
|
ppems = self.hdmx.keys()
|
||||||
|
ppems.sort()
|
||||||
|
records = []
|
||||||
|
format = ""
|
||||||
|
for ppem in ppems:
|
||||||
|
widths = self.hdmx[ppem]
|
||||||
|
records.append(widths)
|
||||||
|
format = format + "%4d"
|
||||||
|
glyphNames = ttFont.getGlyphOrder()[:]
|
||||||
|
glyphNames.sort()
|
||||||
|
maxNameLen = max(map(len, glyphNames))
|
||||||
|
format = "%" + `maxNameLen` + 's:' + format + ' ;'
|
||||||
|
writer.write(format % (("ppem",) + tuple(ppems)))
|
||||||
|
writer.newline()
|
||||||
|
writer.newline()
|
||||||
|
for glyphName in glyphNames:
|
||||||
|
row = []
|
||||||
|
for ppem in ppems:
|
||||||
|
widths = self.hdmx[ppem]
|
||||||
|
row.append(widths[glyphName])
|
||||||
|
writer.write(format % ((glyphName,) + tuple(row)))
|
||||||
|
writer.newline()
|
||||||
|
writer.endtag("hdmxData")
|
||||||
|
writer.newline()
|
||||||
|
|
||||||
|
def fromXML(self, (name, attrs, content), ttFont):
|
||||||
|
if name <> "hdmxData":
|
||||||
|
return
|
||||||
|
content = string.join(content, " ")
|
||||||
|
lines = string.split(content, ";")
|
||||||
|
topRow = string.split(lines[0])
|
||||||
|
assert topRow[0] == "ppem:", "illegal hdmx format"
|
||||||
|
ppems = map(int, topRow[1:])
|
||||||
|
self.hdmx = hdmx = {}
|
||||||
|
for ppem in ppems:
|
||||||
|
hdmx[ppem] = {}
|
||||||
|
lines = map(string.split, lines[1:])
|
||||||
|
for line in lines:
|
||||||
|
if not line:
|
||||||
|
continue
|
||||||
|
assert line[0][-1] == ":", "illegal hdmx format"
|
||||||
|
glyphName = line[0][:-1]
|
||||||
|
line = map(int, line[1:])
|
||||||
|
assert len(line) == len(ppems), "illegal hdmx format"
|
||||||
|
for i in range(len(ppems)):
|
||||||
|
hdmx[ppems[i]][glyphName] = line[i]
|
||||||
|
|
136
Lib/fontTools/ttLib/tables/_h_e_a_d.py
Normal file
136
Lib/fontTools/ttLib/tables/_h_e_a_d.py
Normal file
@ -0,0 +1,136 @@
|
|||||||
|
import DefaultTable
|
||||||
|
import sstruct
|
||||||
|
import time
|
||||||
|
import string
|
||||||
|
import calendar
|
||||||
|
from fontTools.misc.textTools import safeEval, num2binary, binary2num
|
||||||
|
|
||||||
|
|
||||||
|
headFormat = """
|
||||||
|
> # big endian
|
||||||
|
tableVersion: 16.16F
|
||||||
|
fontRevision: 16.16F
|
||||||
|
checkSumAdjustment: l
|
||||||
|
magicNumber: l
|
||||||
|
x # pad byte
|
||||||
|
flags: b
|
||||||
|
unitsPerEm: H
|
||||||
|
created: 8s
|
||||||
|
modified: 8s
|
||||||
|
xMin: h
|
||||||
|
yMin: h
|
||||||
|
xMax: h
|
||||||
|
yMax: h
|
||||||
|
macStyle: H
|
||||||
|
lowestRecPPEM: H
|
||||||
|
fontDirectionHint: h
|
||||||
|
indexToLocFormat: h
|
||||||
|
glyphDataFormat: h
|
||||||
|
"""
|
||||||
|
|
||||||
|
class table__h_e_a_d(DefaultTable.DefaultTable):
|
||||||
|
|
||||||
|
dependencies = ['maxp', 'loca']
|
||||||
|
|
||||||
|
def decompile(self, data, ttFont):
|
||||||
|
sstruct.unpack(headFormat, data, self)
|
||||||
|
self.unitsPerEm = int(self.unitsPerEm)
|
||||||
|
self.strings2dates()
|
||||||
|
|
||||||
|
def compile(self, ttFont):
|
||||||
|
self.modified = long(time.time() - mac_epoch_diff)
|
||||||
|
self.dates2strings()
|
||||||
|
data = sstruct.pack(headFormat, self)
|
||||||
|
self.strings2dates()
|
||||||
|
return data
|
||||||
|
|
||||||
|
def strings2dates(self):
|
||||||
|
self.created = bin2long(self.created)
|
||||||
|
self.modified = bin2long(self.modified)
|
||||||
|
|
||||||
|
def dates2strings(self):
|
||||||
|
self.created = long2bin(self.created)
|
||||||
|
self.modified = long2bin(self.modified)
|
||||||
|
|
||||||
|
def toXML(self, writer, ttFont):
|
||||||
|
writer.comment("Most of this table will be recalculated by the compiler")
|
||||||
|
writer.newline()
|
||||||
|
formatstring, names, fixes = sstruct.getformat(headFormat)
|
||||||
|
for name in names:
|
||||||
|
value = getattr(self, name)
|
||||||
|
if name in ("created", "modified"):
|
||||||
|
value = time.asctime(time.gmtime(max(0, value + mac_epoch_diff)))
|
||||||
|
if type(value) == type(0L):
|
||||||
|
value=int(value)
|
||||||
|
if name in ("magicNumber", "checkSumAdjustment"):
|
||||||
|
value = hex(value)
|
||||||
|
elif name == "flags":
|
||||||
|
value = num2binary(value, 16)
|
||||||
|
writer.simpletag(name, value=value)
|
||||||
|
writer.newline()
|
||||||
|
|
||||||
|
def fromXML(self, (name, attrs, content), ttFont):
|
||||||
|
value = attrs["value"]
|
||||||
|
if name in ("created", "modified"):
|
||||||
|
value = parse_date(value) - mac_epoch_diff
|
||||||
|
elif name == "flags":
|
||||||
|
value = binary2num(value)
|
||||||
|
else:
|
||||||
|
value = safeEval(value)
|
||||||
|
setattr(self, name, value)
|
||||||
|
|
||||||
|
def __cmp__(self, other):
|
||||||
|
selfdict = self.__dict__.copy()
|
||||||
|
otherdict = other.__dict__.copy()
|
||||||
|
# for testing purposes, compare without the modified and checkSumAdjustment
|
||||||
|
# fields, since they are allowed to be different.
|
||||||
|
for key in ["modified", "checkSumAdjustment"]:
|
||||||
|
del selfdict[key]
|
||||||
|
del otherdict[key]
|
||||||
|
return cmp(selfdict, otherdict)
|
||||||
|
|
||||||
|
|
||||||
|
def calc_mac_epoch_diff():
|
||||||
|
"""calculate the difference between the original Mac epoch (1904)
|
||||||
|
to the epoch on this machine.
|
||||||
|
"""
|
||||||
|
safe_epoch_t = (1971, 1, 1, 0, 0, 0, 0, 0, 0)
|
||||||
|
safe_epoch = time.mktime(safe_epoch_t) - time.timezone
|
||||||
|
assert time.gmtime(safe_epoch)[:6] == safe_epoch_t[:6]
|
||||||
|
seconds1904to1971 = 60 * 60 * 24 * (365 * (1971-1904) + 17) # thanks, Laurence!
|
||||||
|
return long(safe_epoch - seconds1904to1971)
|
||||||
|
|
||||||
|
mac_epoch_diff = calc_mac_epoch_diff()
|
||||||
|
|
||||||
|
|
||||||
|
_months = map(string.lower, calendar.month_abbr)
|
||||||
|
_weekdays = map(string.lower, calendar.day_abbr)
|
||||||
|
|
||||||
|
def parse_date(datestring):
|
||||||
|
datestring = string.lower(datestring)
|
||||||
|
weekday, month, day, tim, year = string.split(datestring)
|
||||||
|
weekday = _weekdays.index(weekday)
|
||||||
|
month = _months.index(month)
|
||||||
|
year = int(year)
|
||||||
|
day = int(day)
|
||||||
|
hour, minute, second = map(int, string.split(tim, ":"))
|
||||||
|
t = (year, month, day, hour, minute, second, weekday, 0, 0)
|
||||||
|
return long(time.mktime(t) - time.timezone)
|
||||||
|
|
||||||
|
|
||||||
|
def bin2long(data):
|
||||||
|
# thanks </F>!
|
||||||
|
v = 0L
|
||||||
|
for i in map(ord, data):
|
||||||
|
v = v<<8 | i
|
||||||
|
return v
|
||||||
|
|
||||||
|
def long2bin(v, bytes=8):
|
||||||
|
data = ""
|
||||||
|
while v:
|
||||||
|
data = chr(v & 0xff) + data
|
||||||
|
v = v >> 8
|
||||||
|
data = (bytes - len(data)) * "\0" + data
|
||||||
|
assert len(data) == 8, "long too long"
|
||||||
|
return data
|
||||||
|
|
78
Lib/fontTools/ttLib/tables/_h_h_e_a.py
Normal file
78
Lib/fontTools/ttLib/tables/_h_h_e_a.py
Normal file
@ -0,0 +1,78 @@
|
|||||||
|
import DefaultTable
|
||||||
|
import sstruct
|
||||||
|
from fontTools.misc.textTools import safeEval
|
||||||
|
|
||||||
|
hheaFormat = """
|
||||||
|
> # big endian
|
||||||
|
tableVersion: 16.16F
|
||||||
|
ascent: h
|
||||||
|
descent: h
|
||||||
|
lineGap: h
|
||||||
|
advanceWidthMax: H
|
||||||
|
minLeftSideBearing: h
|
||||||
|
minRightSideBearing: h
|
||||||
|
xMaxExtent: h
|
||||||
|
caretSlopeRise: h
|
||||||
|
caretSlopeRun: h
|
||||||
|
reserved0: h
|
||||||
|
reserved1: h
|
||||||
|
reserved2: h
|
||||||
|
reserved3: h
|
||||||
|
reserved4: h
|
||||||
|
metricDataFormat: h
|
||||||
|
numberOfHMetrics: H
|
||||||
|
"""
|
||||||
|
|
||||||
|
class table__h_h_e_a(DefaultTable.DefaultTable):
|
||||||
|
|
||||||
|
dependencies = ['hmtx', 'glyf']
|
||||||
|
|
||||||
|
def decompile(self, data, ttFont):
|
||||||
|
sstruct.unpack(hheaFormat, data, self)
|
||||||
|
|
||||||
|
def compile(self, ttFont):
|
||||||
|
self.recalc(ttFont)
|
||||||
|
return sstruct.pack(hheaFormat, self)
|
||||||
|
|
||||||
|
def recalc(self, ttFont):
|
||||||
|
hmtxTable = ttFont['hmtx']
|
||||||
|
if ttFont.has_key('glyf'):
|
||||||
|
if not ttFont.isLoaded('glyf'):
|
||||||
|
return
|
||||||
|
glyfTable = ttFont['glyf']
|
||||||
|
advanceWidthMax = -100000 # arbitrary big negative number
|
||||||
|
minLeftSideBearing = 100000 # arbitrary big number
|
||||||
|
minRightSideBearing = 100000 # arbitrary big number
|
||||||
|
xMaxExtent = -100000 # arbitrary big negative number
|
||||||
|
|
||||||
|
for name in ttFont.getGlyphOrder():
|
||||||
|
width, lsb = hmtxTable[name]
|
||||||
|
g = glyfTable[name]
|
||||||
|
if g.numberOfContours <= 0:
|
||||||
|
continue
|
||||||
|
advanceWidthMax = max(advanceWidthMax, width)
|
||||||
|
minLeftSideBearing = min(minLeftSideBearing, lsb)
|
||||||
|
rsb = width - lsb - (g.xMax - g.xMin)
|
||||||
|
minRightSideBearing = min(minRightSideBearing, rsb)
|
||||||
|
extent = lsb + (g.xMax - g.xMin)
|
||||||
|
xMaxExtent = max(xMaxExtent, extent)
|
||||||
|
self.advanceWidthMax = advanceWidthMax
|
||||||
|
self.minLeftSideBearing = minLeftSideBearing
|
||||||
|
self.minRightSideBearing = minRightSideBearing
|
||||||
|
self.xMaxExtent = xMaxExtent
|
||||||
|
else:
|
||||||
|
# XXX CFF recalc...
|
||||||
|
pass
|
||||||
|
|
||||||
|
def toXML(self, writer, ttFont):
|
||||||
|
formatstring, names, fixes = sstruct.getformat(hheaFormat)
|
||||||
|
for name in names:
|
||||||
|
value = getattr(self, name)
|
||||||
|
if type(value) == type(0L):
|
||||||
|
value = int(value)
|
||||||
|
writer.simpletag(name, value=value)
|
||||||
|
writer.newline()
|
||||||
|
|
||||||
|
def fromXML(self, (name, attrs, content), ttFont):
|
||||||
|
setattr(self, name, safeEval(attrs["value"]))
|
||||||
|
|
94
Lib/fontTools/ttLib/tables/_h_m_t_x.py
Normal file
94
Lib/fontTools/ttLib/tables/_h_m_t_x.py
Normal file
@ -0,0 +1,94 @@
|
|||||||
|
import DefaultTable
|
||||||
|
import Numeric
|
||||||
|
from fontTools import ttLib
|
||||||
|
from fontTools.misc.textTools import safeEval
|
||||||
|
|
||||||
|
|
||||||
|
class table__h_m_t_x(DefaultTable.DefaultTable):
|
||||||
|
|
||||||
|
headerTag = 'hhea'
|
||||||
|
advanceName = 'width'
|
||||||
|
sideBearingName = 'lsb'
|
||||||
|
numberOfMetricsName = 'numberOfHMetrics'
|
||||||
|
|
||||||
|
def decompile(self, data, ttFont):
|
||||||
|
numberOfMetrics = int(getattr(ttFont[self.headerTag], self.numberOfMetricsName))
|
||||||
|
metrics = Numeric.fromstring(data[:4 * numberOfMetrics],
|
||||||
|
Numeric.Int16)
|
||||||
|
if ttLib.endian <> "big":
|
||||||
|
metrics = metrics.byteswapped()
|
||||||
|
metrics.shape = (numberOfMetrics, 2)
|
||||||
|
data = data[4 * numberOfMetrics:]
|
||||||
|
numberOfSideBearings = ttFont['maxp'].numGlyphs - numberOfMetrics
|
||||||
|
numberOfSideBearings = int(numberOfSideBearings)
|
||||||
|
if numberOfSideBearings:
|
||||||
|
assert numberOfSideBearings > 0, "bad hmtx/vmtx table"
|
||||||
|
lastAdvance = metrics[-1][0]
|
||||||
|
advances = Numeric.array([lastAdvance] * numberOfSideBearings,
|
||||||
|
Numeric.Int16)
|
||||||
|
sideBearings = Numeric.fromstring(data[:2 * numberOfSideBearings],
|
||||||
|
Numeric.Int16)
|
||||||
|
if ttLib.endian <> "big":
|
||||||
|
sideBearings = sideBearings.byteswapped()
|
||||||
|
data = data[2 * numberOfSideBearings:]
|
||||||
|
additionalMetrics = Numeric.array([advances, sideBearings],
|
||||||
|
Numeric.Int16)
|
||||||
|
metrics = Numeric.concatenate((metrics,
|
||||||
|
Numeric.transpose(additionalMetrics)))
|
||||||
|
if data:
|
||||||
|
raise ttLib.TTLibError, "too much data for hmtx/vmtx table"
|
||||||
|
metrics = metrics.tolist()
|
||||||
|
self.metrics = {}
|
||||||
|
for i in range(len(metrics)):
|
||||||
|
glyphName = ttFont.getGlyphName(i)
|
||||||
|
self.metrics[glyphName] = metrics[i]
|
||||||
|
|
||||||
|
def compile(self, ttFont):
|
||||||
|
metrics = []
|
||||||
|
for glyphName in ttFont.getGlyphOrder():
|
||||||
|
metrics.append(self.metrics[glyphName])
|
||||||
|
lastAdvance = metrics[-1][0]
|
||||||
|
lastIndex = len(metrics)
|
||||||
|
while metrics[lastIndex-2][0] == lastAdvance:
|
||||||
|
lastIndex = lastIndex - 1
|
||||||
|
additionalMetrics = metrics[lastIndex:]
|
||||||
|
additionalMetrics = map(lambda (advance, sb): sb, additionalMetrics)
|
||||||
|
metrics = metrics[:lastIndex]
|
||||||
|
setattr(ttFont[self.headerTag], self.numberOfMetricsName, len(metrics))
|
||||||
|
|
||||||
|
metrics = Numeric.array(metrics, Numeric.Int16)
|
||||||
|
if ttLib.endian <> "big":
|
||||||
|
metrics = metrics.byteswapped()
|
||||||
|
data = metrics.tostring()
|
||||||
|
|
||||||
|
additionalMetrics = Numeric.array(additionalMetrics, Numeric.Int16)
|
||||||
|
if ttLib.endian <> "big":
|
||||||
|
additionalMetrics = additionalMetrics.byteswapped()
|
||||||
|
data = data + additionalMetrics.tostring()
|
||||||
|
return data
|
||||||
|
|
||||||
|
def toXML(self, writer, ttFont):
|
||||||
|
names = self.metrics.keys()
|
||||||
|
names.sort()
|
||||||
|
for glyphName in names:
|
||||||
|
advance, sb = self.metrics[glyphName]
|
||||||
|
writer.simpletag("mtx", [
|
||||||
|
("name", glyphName),
|
||||||
|
(self.advanceName, advance),
|
||||||
|
(self.sideBearingName, sb),
|
||||||
|
])
|
||||||
|
writer.newline()
|
||||||
|
|
||||||
|
def fromXML(self, (name, attrs, content), ttFont):
|
||||||
|
if not hasattr(self, "metrics"):
|
||||||
|
self.metrics = {}
|
||||||
|
if name == "mtx":
|
||||||
|
self.metrics[attrs["name"]] = [safeEval(attrs[self.advanceName]),
|
||||||
|
safeEval(attrs[self.sideBearingName])]
|
||||||
|
|
||||||
|
def __getitem__(self, glyphName):
|
||||||
|
return self.metrics[glyphName]
|
||||||
|
|
||||||
|
def __setitem__(self, glyphName, (advance, sb)):
|
||||||
|
self.metrics[glyphName] = advance, sb
|
||||||
|
|
186
Lib/fontTools/ttLib/tables/_k_e_r_n.py
Normal file
186
Lib/fontTools/ttLib/tables/_k_e_r_n.py
Normal file
@ -0,0 +1,186 @@
|
|||||||
|
import DefaultTable
|
||||||
|
import struct
|
||||||
|
import ttLib.sfnt
|
||||||
|
from fontTools.misc.textTools import safeEval, readHex
|
||||||
|
|
||||||
|
|
||||||
|
class table__k_e_r_n(DefaultTable.DefaultTable):
|
||||||
|
|
||||||
|
def getkern(self, format):
|
||||||
|
for subtable in self.kernTables:
|
||||||
|
if subtable.version == format:
|
||||||
|
return subtable
|
||||||
|
return None # not found
|
||||||
|
|
||||||
|
def decompile(self, data, ttFont):
|
||||||
|
version, nTables = struct.unpack(">HH", data[:4])
|
||||||
|
if version == 1:
|
||||||
|
# Apple's new format. Hm.
|
||||||
|
version, nTables = struct.unpack(">ll", data[:8])
|
||||||
|
self.version = version / float(0x10000)
|
||||||
|
data = data[8:]
|
||||||
|
else:
|
||||||
|
self.version = version
|
||||||
|
data = data[4:]
|
||||||
|
tablesIndex = []
|
||||||
|
self.kernTables = []
|
||||||
|
for i in range(nTables):
|
||||||
|
version, length = struct.unpack(">HH", data[:4])
|
||||||
|
length = int(length)
|
||||||
|
if not kern_classes.has_key(version):
|
||||||
|
subtable = KernTable_format_unkown()
|
||||||
|
else:
|
||||||
|
subtable = kern_classes[version]()
|
||||||
|
subtable.decompile(data[:length], ttFont)
|
||||||
|
self.kernTables.append(subtable)
|
||||||
|
data = data[length:]
|
||||||
|
|
||||||
|
def compile(self, ttFont):
|
||||||
|
nTables = len(self.kernTables)
|
||||||
|
if self.version == 1.0:
|
||||||
|
# Apple's new format.
|
||||||
|
data = struct.pack(">ll", self.version * 0x1000, nTables)
|
||||||
|
else:
|
||||||
|
data = struct.pack(">HH", self.version, nTables)
|
||||||
|
for subtable in self.kernTables:
|
||||||
|
data = data + subtable.compile(ttFont)
|
||||||
|
return data
|
||||||
|
|
||||||
|
def toXML(self, writer, ttFont):
|
||||||
|
writer.simpletag("version", value=self.version)
|
||||||
|
writer.newline()
|
||||||
|
for subtable in self.kernTables:
|
||||||
|
subtable.toXML(writer, ttFont)
|
||||||
|
|
||||||
|
def fromXML(self, (name, attrs, content), ttFont):
|
||||||
|
if name == "version":
|
||||||
|
self.version = safeEval(attrs["value"])
|
||||||
|
return
|
||||||
|
if name <> "kernsubtable":
|
||||||
|
return
|
||||||
|
if not hasattr(self, "kernTables"):
|
||||||
|
self.kernTables = []
|
||||||
|
format = safeEval(attrs["format"])
|
||||||
|
if not kern_classes.has_key(format):
|
||||||
|
subtable = KernTable_format_unkown()
|
||||||
|
else:
|
||||||
|
subtable = kern_classes[format]()
|
||||||
|
self.kernTables.append(subtable)
|
||||||
|
subtable.fromXML((name, attrs, content), ttFont)
|
||||||
|
|
||||||
|
|
||||||
|
class KernTable_format_0:
|
||||||
|
|
||||||
|
def decompile(self, data, ttFont):
|
||||||
|
version, length, coverage = struct.unpack(">HHH", data[:6])
|
||||||
|
self.version, self.coverage = int(version), int(coverage)
|
||||||
|
data = data[6:]
|
||||||
|
|
||||||
|
self.kernTable = kernTable = {}
|
||||||
|
|
||||||
|
nPairs, searchRange, entrySelector, rangeShift = struct.unpack(">HHHH", data[:8])
|
||||||
|
data = data[8:]
|
||||||
|
|
||||||
|
for k in range(nPairs):
|
||||||
|
left, right, value = struct.unpack(">HHh", data[:6])
|
||||||
|
data = data[6:]
|
||||||
|
left, right = int(left), int(right)
|
||||||
|
kernTable[(ttFont.getGlyphName(left), ttFont.getGlyphName(right))] = value
|
||||||
|
assert len(data) == 0
|
||||||
|
|
||||||
|
def compile(self, ttFont):
|
||||||
|
nPairs = len(self.kernTable)
|
||||||
|
entrySelector = ttLib.sfnt.maxpoweroftwo(nPairs)
|
||||||
|
searchRange = (2 ** entrySelector) * 6
|
||||||
|
rangeShift = (nPairs - (2 ** entrySelector)) * 6
|
||||||
|
data = struct.pack(">HHHH", nPairs, searchRange, entrySelector, rangeShift)
|
||||||
|
|
||||||
|
# yeehee! (I mean, turn names into indices)
|
||||||
|
kernTable = map(lambda ((left, right), value), getGlyphID=ttFont.getGlyphID:
|
||||||
|
(getGlyphID(left), getGlyphID(right), value),
|
||||||
|
self.kernTable.items())
|
||||||
|
kernTable.sort()
|
||||||
|
for left, right, value in kernTable:
|
||||||
|
data = data + struct.pack(">HHh", left, right, value)
|
||||||
|
return struct.pack(">HHH", self.version, len(data) + 6, self.coverage) + data
|
||||||
|
|
||||||
|
def toXML(self, writer, ttFont):
|
||||||
|
writer.begintag("kernsubtable", coverage=self.coverage, format=0)
|
||||||
|
writer.newline()
|
||||||
|
items = self.kernTable.items()
|
||||||
|
items.sort()
|
||||||
|
for (left, right), value in items:
|
||||||
|
writer.simpletag("pair", [
|
||||||
|
("l", left),
|
||||||
|
("r", right),
|
||||||
|
("v", value)
|
||||||
|
])
|
||||||
|
writer.newline()
|
||||||
|
writer.endtag("kernsubtable")
|
||||||
|
writer.newline()
|
||||||
|
|
||||||
|
def fromXML(self, (name, attrs, content), ttFont):
|
||||||
|
self.coverage = safeEval(attrs["coverage"])
|
||||||
|
self.version = safeEval(attrs["format"])
|
||||||
|
if not hasattr(self, "kernTable"):
|
||||||
|
self.kernTable = {}
|
||||||
|
for element in content:
|
||||||
|
if type(element) <> type(()):
|
||||||
|
continue
|
||||||
|
name, attrs, content = element
|
||||||
|
self.kernTable[(attrs["l"], attrs["r"])] = safeEval(attrs["v"])
|
||||||
|
|
||||||
|
def __getitem__(self, pair):
|
||||||
|
return self.kernTable[pair]
|
||||||
|
|
||||||
|
def __setitem__(self, pair, value):
|
||||||
|
self.kernTable[pair] = value
|
||||||
|
|
||||||
|
def __delitem__(self, pair):
|
||||||
|
del self.kernTable[pair]
|
||||||
|
|
||||||
|
def __cmp__(self, other):
|
||||||
|
return cmp(self.__dict__, other.__dict__)
|
||||||
|
|
||||||
|
|
||||||
|
class KernTable_format_2:
|
||||||
|
|
||||||
|
def decompile(self, data, ttFont):
|
||||||
|
self.data = data
|
||||||
|
|
||||||
|
def compile(self, ttFont, ttFont):
|
||||||
|
return data
|
||||||
|
|
||||||
|
def toXML(self, writer):
|
||||||
|
writer.begintag("kernsubtable", format=2)
|
||||||
|
writer.newline()
|
||||||
|
writer.dumphex(self.data)
|
||||||
|
writer.endtag("kernsubtable")
|
||||||
|
writer.newline()
|
||||||
|
|
||||||
|
def fromXML(self, (name, attrs, content), ttFont):
|
||||||
|
self.decompile(readHex(content))
|
||||||
|
|
||||||
|
|
||||||
|
class KernTable_format_unkown:
|
||||||
|
|
||||||
|
def decompile(self, data, ttFont):
|
||||||
|
self.data = data
|
||||||
|
|
||||||
|
def compile(self, ttFont):
|
||||||
|
return data
|
||||||
|
|
||||||
|
def toXML(self, writer, ttFont):
|
||||||
|
writer.begintag("kernsubtable", format="-1")
|
||||||
|
writer.newline()
|
||||||
|
writer.comment("unknown 'kern' subtable format")
|
||||||
|
writer.dumphex(self.data)
|
||||||
|
writer.endtag("kernsubtable")
|
||||||
|
writer.newline()
|
||||||
|
|
||||||
|
def fromXML(self, (name, attrs, content), ttFont):
|
||||||
|
self.decompile(readHex(content))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
kern_classes = {0: KernTable_format_0, 1: KernTable_format_2}
|
55
Lib/fontTools/ttLib/tables/_l_o_c_a.py
Normal file
55
Lib/fontTools/ttLib/tables/_l_o_c_a.py
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
import DefaultTable
|
||||||
|
import array
|
||||||
|
import Numeric
|
||||||
|
from fontTools import ttLib
|
||||||
|
import struct
|
||||||
|
|
||||||
|
class table__l_o_c_a(DefaultTable.DefaultTable):
|
||||||
|
|
||||||
|
dependencies = ['glyf']
|
||||||
|
|
||||||
|
def decompile(self, data, ttFont):
|
||||||
|
longFormat = ttFont['head'].indexToLocFormat
|
||||||
|
if longFormat:
|
||||||
|
format = "l"
|
||||||
|
else:
|
||||||
|
format = "H"
|
||||||
|
locations = array.array(format)
|
||||||
|
locations.fromstring(data)
|
||||||
|
if ttLib.endian <> "big":
|
||||||
|
locations.byteswap()
|
||||||
|
locations = Numeric.array(locations, Numeric.Int32)
|
||||||
|
if not longFormat:
|
||||||
|
locations = locations * 2
|
||||||
|
if len(locations) <> (ttFont['maxp'].numGlyphs + 1):
|
||||||
|
raise ttLib.TTLibError, "corrupt 'loca' table"
|
||||||
|
self.locations = locations
|
||||||
|
|
||||||
|
def compile(self, ttFont):
|
||||||
|
locations = self.locations
|
||||||
|
if max(locations) < 0x20000:
|
||||||
|
locations = locations / 2
|
||||||
|
locations = locations.astype(Numeric.Int16)
|
||||||
|
ttFont['head'].indexToLocFormat = 0
|
||||||
|
else:
|
||||||
|
ttFont['head'].indexToLocFormat = 1
|
||||||
|
if ttLib.endian <> "big":
|
||||||
|
locations = locations.byteswapped()
|
||||||
|
return locations.tostring()
|
||||||
|
|
||||||
|
def set(self, locations):
|
||||||
|
self.locations = Numeric.array(locations, Numeric.Int32)
|
||||||
|
|
||||||
|
def toXML(self, writer, ttFont):
|
||||||
|
writer.comment("The 'loca' table will be calculated by the compiler")
|
||||||
|
writer.newline()
|
||||||
|
|
||||||
|
def __getitem__(self, index):
|
||||||
|
return self.locations[index]
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
return len(self.locations)
|
||||||
|
|
||||||
|
def __cmp__(self, other):
|
||||||
|
return cmp(len(self), len(other)) or not Numeric.alltrue(Numeric.equal(self.locations, other.locations))
|
||||||
|
|
138
Lib/fontTools/ttLib/tables/_m_a_x_p.py
Normal file
138
Lib/fontTools/ttLib/tables/_m_a_x_p.py
Normal file
@ -0,0 +1,138 @@
|
|||||||
|
import DefaultTable
|
||||||
|
import sstruct
|
||||||
|
from fontTools.misc.textTools import safeEval
|
||||||
|
|
||||||
|
maxpFormat_0_5 = """
|
||||||
|
> # big endian
|
||||||
|
tableVersion: i
|
||||||
|
numGlyphs: H
|
||||||
|
"""
|
||||||
|
|
||||||
|
maxpFormat_1_0_add = """
|
||||||
|
> # big endian
|
||||||
|
maxPoints: H
|
||||||
|
maxContours: H
|
||||||
|
maxCompositePoints: H
|
||||||
|
maxCompositeContours: H
|
||||||
|
maxZones: H
|
||||||
|
maxTwilightPoints: H
|
||||||
|
maxStorage: H
|
||||||
|
maxFunctionDefs: H
|
||||||
|
maxInstructionDefs: H
|
||||||
|
maxStackElements: H
|
||||||
|
maxSizeOfInstructions: H
|
||||||
|
maxComponentElements: H
|
||||||
|
maxComponentDepth: H
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
class table__m_a_x_p(DefaultTable.DefaultTable):
|
||||||
|
|
||||||
|
dependencies = ['glyf']
|
||||||
|
|
||||||
|
def decompile(self, data, ttFont):
|
||||||
|
dummy, data = sstruct.unpack2(maxpFormat_0_5, data, self)
|
||||||
|
self.numGlyphs = int(self.numGlyphs)
|
||||||
|
if self.tableVersion == 0x00010000:
|
||||||
|
dummy, data = sstruct.unpack2(maxpFormat_1_0_add, data, self)
|
||||||
|
else:
|
||||||
|
assert self.tableVersion == 0x00005000, "unknown 'maxp' format: %x" % self.tableVersion
|
||||||
|
assert len(data) == 0
|
||||||
|
|
||||||
|
def compile(self, ttFont):
|
||||||
|
if ttFont.has_key('glyf'):
|
||||||
|
if ttFont.isLoaded('glyf'):
|
||||||
|
self.recalc(ttFont)
|
||||||
|
else:
|
||||||
|
pass # XXX CFF!!!
|
||||||
|
data = sstruct.pack(maxpFormat_0_5, self)
|
||||||
|
if self.tableVersion == 0x00010000:
|
||||||
|
data = data + sstruct.pack(maxpFormat_1_0_add, self)
|
||||||
|
else:
|
||||||
|
assert self.tableVersion == 0x00005000, "unknown 'maxp' format: %f" % self.tableVersion
|
||||||
|
return data
|
||||||
|
|
||||||
|
def recalc(self, ttFont):
|
||||||
|
"""Recalculate the font bounding box, and most other maxp values except
|
||||||
|
for the TT instructions values. Also recalculate the value of bit 1
|
||||||
|
of the flags field of the 'head' table.
|
||||||
|
"""
|
||||||
|
glyfTable = ttFont['glyf']
|
||||||
|
hmtxTable = ttFont['hmtx']
|
||||||
|
headTable = ttFont['head']
|
||||||
|
self.numGlyphs = len(glyfTable)
|
||||||
|
xMin = 100000
|
||||||
|
yMin = 100000
|
||||||
|
xMax = -100000
|
||||||
|
yMax = -100000
|
||||||
|
maxPoints = 0
|
||||||
|
maxContours = 0
|
||||||
|
maxCompositePoints = 0
|
||||||
|
maxCompositeContours = 0
|
||||||
|
maxComponentElements = 0
|
||||||
|
maxComponentDepth = 0
|
||||||
|
allXMaxIsLsb = 1
|
||||||
|
for glyphName in ttFont.getGlyphOrder():
|
||||||
|
g = glyfTable[glyphName]
|
||||||
|
if g.numberOfContours:
|
||||||
|
if hmtxTable[glyphName][1] <> g.xMin:
|
||||||
|
allXMaxIsLsb = 0
|
||||||
|
xMin = min(xMin, g.xMin)
|
||||||
|
yMin = min(yMin, g.yMin)
|
||||||
|
xMax = max(xMax, g.xMax)
|
||||||
|
yMax = max(yMax, g.yMax)
|
||||||
|
if g.numberOfContours > 0:
|
||||||
|
nPoints, nContours = g.getMaxpValues()
|
||||||
|
maxPoints = max(maxPoints, nPoints)
|
||||||
|
maxContours = max(maxContours, nContours)
|
||||||
|
else:
|
||||||
|
nPoints, nContours, componentDepth = g.getCompositeMaxpValues(glyfTable)
|
||||||
|
maxCompositePoints = max(maxCompositePoints, nPoints)
|
||||||
|
maxCompositeContours = max(maxCompositeContours, nContours)
|
||||||
|
maxComponentElements = max(maxComponentElements, len(g.components))
|
||||||
|
maxComponentDepth = max(maxComponentDepth, componentDepth)
|
||||||
|
self.xMin = xMin
|
||||||
|
self.yMin = yMin
|
||||||
|
self.xMax = xMax
|
||||||
|
self.yMax = yMax
|
||||||
|
self.maxPoints = maxPoints
|
||||||
|
self.maxContours = maxContours
|
||||||
|
self.maxCompositePoints = maxCompositePoints
|
||||||
|
self.maxCompositeContours = maxCompositeContours
|
||||||
|
self.maxComponentDepth = maxComponentDepth
|
||||||
|
if allXMaxIsLsb:
|
||||||
|
headTable.flags = headTable.flags | 0x2
|
||||||
|
else:
|
||||||
|
headTable.flags = headTable.flags & ~0x2
|
||||||
|
|
||||||
|
def testrepr(self):
|
||||||
|
items = self.__dict__.items()
|
||||||
|
items.sort()
|
||||||
|
print ". . . . . . . . ."
|
||||||
|
for combo in items:
|
||||||
|
print " %s: %s" % combo
|
||||||
|
print ". . . . . . . . ."
|
||||||
|
|
||||||
|
def toXML(self, writer, ttFont):
|
||||||
|
if self.tableVersion <> 0x00005000:
|
||||||
|
writer.comment("Most of this table will be recalculated by the compiler")
|
||||||
|
writer.newline()
|
||||||
|
formatstring, names, fixes = sstruct.getformat(maxpFormat_0_5)
|
||||||
|
if self.tableVersion == 0x00010000:
|
||||||
|
formatstring, names_1_0, fixes = sstruct.getformat(maxpFormat_1_0_add)
|
||||||
|
names = names + names_1_0
|
||||||
|
else:
|
||||||
|
assert self.tableVersion == 0x00005000, "unknown 'maxp' format: %f" % self.tableVersion
|
||||||
|
for name in names:
|
||||||
|
value = getattr(self, name)
|
||||||
|
if type(value) == type(0L):
|
||||||
|
value=int(value)
|
||||||
|
if name == "tableVersion":
|
||||||
|
value = hex(value)
|
||||||
|
writer.simpletag(name, value=value)
|
||||||
|
writer.newline()
|
||||||
|
|
||||||
|
def fromXML(self, (name, attrs, content), ttFont):
|
||||||
|
setattr(self, name, safeEval(attrs["value"]))
|
||||||
|
|
||||||
|
|
136
Lib/fontTools/ttLib/tables/_n_a_m_e.py
Normal file
136
Lib/fontTools/ttLib/tables/_n_a_m_e.py
Normal file
@ -0,0 +1,136 @@
|
|||||||
|
import DefaultTable
|
||||||
|
import struct, sstruct
|
||||||
|
from fontTools.misc.textTools import safeEval
|
||||||
|
import string
|
||||||
|
import types
|
||||||
|
|
||||||
|
nameRecordFormat = """
|
||||||
|
> # big endian
|
||||||
|
platformID: H
|
||||||
|
platEncID: H
|
||||||
|
langID: H
|
||||||
|
nameID: H
|
||||||
|
length: H
|
||||||
|
offset: H
|
||||||
|
"""
|
||||||
|
|
||||||
|
class table__n_a_m_e(DefaultTable.DefaultTable):
|
||||||
|
|
||||||
|
def decompile(self, data, ttFont):
|
||||||
|
format, n, stringoffset = struct.unpack(">HHH", data[:6])
|
||||||
|
stringoffset = int(stringoffset)
|
||||||
|
stringData = data[stringoffset:]
|
||||||
|
data = data[6:stringoffset]
|
||||||
|
self.names = []
|
||||||
|
for i in range(n):
|
||||||
|
name, data = sstruct.unpack2(nameRecordFormat, data, NameRecord())
|
||||||
|
name.fixlongs()
|
||||||
|
name.string = stringData[name.offset:name.offset+name.length]
|
||||||
|
del name.offset, name.length
|
||||||
|
self.names.append(name)
|
||||||
|
|
||||||
|
def compile(self, ttFont):
|
||||||
|
self.names.sort() # sort according to the spec; see NameRecord.__cmp__()
|
||||||
|
stringData = ""
|
||||||
|
format = 0
|
||||||
|
n = len(self.names)
|
||||||
|
stringoffset = 6 + n * sstruct.calcsize(nameRecordFormat)
|
||||||
|
data = struct.pack(">HHH", format, n, stringoffset)
|
||||||
|
lastoffset = 0
|
||||||
|
done = {} # remember the data so we can reuse the "pointers"
|
||||||
|
for name in self.names:
|
||||||
|
if done.has_key(name.string):
|
||||||
|
name.offset, name.length = done[name.string]
|
||||||
|
else:
|
||||||
|
name.offset, name.length = done[name.string] = len(stringData), len(name.string)
|
||||||
|
stringData = stringData + name.string
|
||||||
|
data = data + sstruct.pack(nameRecordFormat, name)
|
||||||
|
return data + stringData
|
||||||
|
|
||||||
|
def toXML(self, writer, ttFont):
|
||||||
|
for name in self.names:
|
||||||
|
name.toXML(writer, ttFont)
|
||||||
|
|
||||||
|
def fromXML(self, (name, attrs, content), ttFont):
|
||||||
|
if name <> "namerecord":
|
||||||
|
return # ignore unknown tags
|
||||||
|
if not hasattr(self, "names"):
|
||||||
|
self.names = []
|
||||||
|
name = NameRecord()
|
||||||
|
self.names.append(name)
|
||||||
|
name.fromXML((name, attrs, content), ttFont)
|
||||||
|
|
||||||
|
def getname(self, nameID, platformID, platEncID, langID=None):
|
||||||
|
for namerecord in self.names:
|
||||||
|
if ( namerecord.nameID == nameID and
|
||||||
|
namerecord.platformID == platformID and
|
||||||
|
namerecord.platEncID == platEncID):
|
||||||
|
if langID is None or namerecord.langID == langID:
|
||||||
|
return namerecord
|
||||||
|
return None # not found
|
||||||
|
|
||||||
|
def __cmp__(self, other):
|
||||||
|
return cmp(self.names, other.names)
|
||||||
|
|
||||||
|
|
||||||
|
class NameRecord:
|
||||||
|
|
||||||
|
def toXML(self, writer, ttFont):
|
||||||
|
writer.begintag("namerecord", [
|
||||||
|
("nameID", self.nameID),
|
||||||
|
("platformID", self.platformID),
|
||||||
|
("platEncID", self.platEncID),
|
||||||
|
("langID", hex(self.langID)),
|
||||||
|
])
|
||||||
|
writer.newline()
|
||||||
|
if self.platformID == 0 or (self.platformID == 3 and self.platEncID == 1):
|
||||||
|
writer.write16bit(self.string)
|
||||||
|
else:
|
||||||
|
writer.write8bit(self.string)
|
||||||
|
writer.newline()
|
||||||
|
writer.endtag("namerecord")
|
||||||
|
writer.newline()
|
||||||
|
|
||||||
|
def fromXML(self, (name, attrs, content), ttFont):
|
||||||
|
self.nameID = safeEval(attrs["nameID"])
|
||||||
|
self.platformID = safeEval(attrs["platformID"])
|
||||||
|
self.platEncID = safeEval(attrs["platEncID"])
|
||||||
|
self.langID = safeEval(attrs["langID"])
|
||||||
|
if self.platformID == 0 or (self.platformID == 3 and self.platEncID == 1):
|
||||||
|
from fontTools.ttLib.xmlImport import UnicodeString
|
||||||
|
str = UnicodeString("")
|
||||||
|
for element in content:
|
||||||
|
str = str + element
|
||||||
|
self.string = str.stripped().tostring()
|
||||||
|
else:
|
||||||
|
self.string = string.strip(string.join(content, ""))
|
||||||
|
|
||||||
|
def __cmp__(self, other):
|
||||||
|
"""Compare method, so a list of NameRecords can be sorted
|
||||||
|
according to the spec by just sorting it..."""
|
||||||
|
selftuple = (self.platformID,
|
||||||
|
self.platEncID,
|
||||||
|
self.langID,
|
||||||
|
self.nameID,
|
||||||
|
self.string)
|
||||||
|
othertuple = (other.platformID,
|
||||||
|
other.platEncID,
|
||||||
|
other.langID,
|
||||||
|
other.nameID,
|
||||||
|
other.string)
|
||||||
|
return cmp(selftuple, othertuple)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "<NameRecord NameID=%d; PlatformID=%d; LanguageID=%d>" % (
|
||||||
|
self.nameID, self.platformID, self.langID)
|
||||||
|
|
||||||
|
def fixlongs(self):
|
||||||
|
"""correct effects from bug in Python 1.5.1, where "H"
|
||||||
|
returns a Python Long int.
|
||||||
|
This has been fixed in Python 1.5.2.
|
||||||
|
"""
|
||||||
|
for attr in dir(self):
|
||||||
|
val = getattr(self, attr)
|
||||||
|
if type(val) == types.LongType:
|
||||||
|
setattr(self, attr, int(val))
|
||||||
|
|
214
Lib/fontTools/ttLib/tables/_p_o_s_t.py
Normal file
214
Lib/fontTools/ttLib/tables/_p_o_s_t.py
Normal file
@ -0,0 +1,214 @@
|
|||||||
|
from fontTools.ttLib.standardGlyphOrder import standardGlyphOrder
|
||||||
|
import DefaultTable
|
||||||
|
import struct, sstruct
|
||||||
|
import array
|
||||||
|
from fontTools import ttLib
|
||||||
|
from fontTools.misc.textTools import safeEval, readHex
|
||||||
|
|
||||||
|
|
||||||
|
postFormat = """
|
||||||
|
>
|
||||||
|
formatType: 16.16F
|
||||||
|
italicAngle: 16.16F # italic angle in degrees
|
||||||
|
underlinePosition: h
|
||||||
|
underlineThickness: h
|
||||||
|
isFixedPitch: l
|
||||||
|
minMemType42: l # minimum memory if TrueType font is downloaded
|
||||||
|
maxMemType42: l # maximum memory if TrueType font is downloaded
|
||||||
|
minMemType1: l # minimum memory if Type1 font is downloaded
|
||||||
|
maxMemType1: l # maximum memory if Type1 font is downloaded
|
||||||
|
"""
|
||||||
|
|
||||||
|
postFormatSize = sstruct.calcsize(postFormat)
|
||||||
|
|
||||||
|
|
||||||
|
class table__p_o_s_t(DefaultTable.DefaultTable):
|
||||||
|
|
||||||
|
def decompile(self, data, ttFont):
|
||||||
|
sstruct.unpack(postFormat, data[:postFormatSize], self)
|
||||||
|
data = data[postFormatSize:]
|
||||||
|
if self.formatType == 1.0:
|
||||||
|
self.decode_format_1_0(data, ttFont)
|
||||||
|
elif self.formatType == 2.0:
|
||||||
|
self.decode_format_2_0(data, ttFont)
|
||||||
|
elif self.formatType == 3.0:
|
||||||
|
self.decode_format_3_0(data, ttFont)
|
||||||
|
else:
|
||||||
|
# supported format
|
||||||
|
raise ttLib.TTLibError, "'post' table format %f not supported" % self.formatType
|
||||||
|
|
||||||
|
def compile(self, ttFont):
|
||||||
|
data = sstruct.pack(postFormat, self)
|
||||||
|
if self.formatType == 1.0:
|
||||||
|
pass # we're done
|
||||||
|
elif self.formatType == 2.0:
|
||||||
|
data = data + self.encode_format_2_0(ttFont)
|
||||||
|
elif self.formatType == 3.0:
|
||||||
|
pass # we're done
|
||||||
|
else:
|
||||||
|
# supported format
|
||||||
|
raise ttLib.TTLibError, "'post' table format %f not supported" % self.formatType
|
||||||
|
return data
|
||||||
|
|
||||||
|
def getGlyphOrder(self):
|
||||||
|
"""This function will get called by a ttLib.TTFont instance.
|
||||||
|
Do not call this function yourself, use TTFont().getGlyphOrder()
|
||||||
|
or its relatives instead!
|
||||||
|
"""
|
||||||
|
if not hasattr(self, "glyphOrder"):
|
||||||
|
raise ttLib.TTLibError, "illegal use of getGlyphOrder()"
|
||||||
|
glyphOrder = self.glyphOrder
|
||||||
|
del self.glyphOrder
|
||||||
|
return glyphOrder
|
||||||
|
|
||||||
|
def decode_format_1_0(self, data, ttFont):
|
||||||
|
self.glyphOrder = standardGlyphOrder[:]
|
||||||
|
|
||||||
|
def decode_format_2_0(self, data, ttFont):
|
||||||
|
numGlyphs, = struct.unpack(">H", data[:2])
|
||||||
|
numGlyphs = int(numGlyphs)
|
||||||
|
data = data[2:]
|
||||||
|
indices = array.array("H")
|
||||||
|
indices.fromstring(data[:2*numGlyphs])
|
||||||
|
if ttLib.endian <> "big":
|
||||||
|
indices.byteswap()
|
||||||
|
data = data[2*numGlyphs:]
|
||||||
|
self.extraNames = extraNames = unpackPStrings(data)
|
||||||
|
self.glyphOrder = glyphOrder = [None] * int(ttFont['maxp'].numGlyphs)
|
||||||
|
for glyphID in range(numGlyphs):
|
||||||
|
index = indices[glyphID]
|
||||||
|
if index > 257:
|
||||||
|
name = extraNames[index-258]
|
||||||
|
else:
|
||||||
|
# fetch names from standard list
|
||||||
|
name = standardGlyphOrder[index]
|
||||||
|
glyphOrder[glyphID] = name
|
||||||
|
#AL990511: code added to handle the case of new glyphs without
|
||||||
|
# entries into the 'post' table
|
||||||
|
if numGlyphs < ttFont['maxp'].numGlyphs:
|
||||||
|
for i in range(numGlyphs, ttFont['maxp'].numGlyphs):
|
||||||
|
glyphOrder[i] = "glyph#%.5d" % i
|
||||||
|
self.extraNames.append(glyphOrder[i])
|
||||||
|
self.build_psNameMapping(ttFont)
|
||||||
|
|
||||||
|
def build_psNameMapping(self, ttFont):
|
||||||
|
mapping = {}
|
||||||
|
allNames = {}
|
||||||
|
for i in range(ttFont['maxp'].numGlyphs):
|
||||||
|
glyphName = psName = self.glyphOrder[i]
|
||||||
|
if allNames.has_key(glyphName):
|
||||||
|
# make up a new glyphName that's unique
|
||||||
|
n = 1
|
||||||
|
while allNames.has_key(glyphName + "#" + `n`):
|
||||||
|
n = n + 1
|
||||||
|
glyphName = glyphName + "#" + `n`
|
||||||
|
self.glyphOrder[i] = glyphName
|
||||||
|
mapping[glyphName] = psName
|
||||||
|
allNames[glyphName] = psName
|
||||||
|
self.mapping = mapping
|
||||||
|
|
||||||
|
def decode_format_3_0(self, data, ttFont):
|
||||||
|
# Setting self.glyphOrder to None will cause the TTFont object
|
||||||
|
# try and construct glyph names from a Unicode cmap table.
|
||||||
|
self.glyphOrder = None
|
||||||
|
|
||||||
|
def encode_format_2_0(self, ttFont):
|
||||||
|
numGlyphs = ttFont['maxp'].numGlyphs
|
||||||
|
glyphOrder = ttFont.getGlyphOrder()
|
||||||
|
assert len(glyphOrder) == numGlyphs
|
||||||
|
indices = array.array("H")
|
||||||
|
for glyphID in range(numGlyphs):
|
||||||
|
glyphName = glyphOrder[glyphID]
|
||||||
|
if self.mapping.has_key(glyphName):
|
||||||
|
psName = self.mapping[glyphName]
|
||||||
|
else:
|
||||||
|
psName = glyphName
|
||||||
|
if psName in self.extraNames:
|
||||||
|
index = 258 + self.extraNames.index(psName)
|
||||||
|
elif psName in standardGlyphOrder:
|
||||||
|
index = standardGlyphOrder.index(psName)
|
||||||
|
else:
|
||||||
|
index = 258 + len(self.extraNames)
|
||||||
|
extraNames.append(psName)
|
||||||
|
indices.append(index)
|
||||||
|
if ttLib.endian <> "big":
|
||||||
|
indices.byteswap()
|
||||||
|
return struct.pack(">H", numGlyphs) + indices.tostring() + packPStrings(self.extraNames)
|
||||||
|
|
||||||
|
def toXML(self, writer, ttFont):
|
||||||
|
formatstring, names, fixes = sstruct.getformat(postFormat)
|
||||||
|
for name in names:
|
||||||
|
value = getattr(self, name)
|
||||||
|
writer.simpletag(name, value=value)
|
||||||
|
writer.newline()
|
||||||
|
if hasattr(self, "mapping"):
|
||||||
|
writer.begintag("psNames")
|
||||||
|
writer.newline()
|
||||||
|
writer.comment("This file uses unique glyph names based on the information\n"
|
||||||
|
"found in the 'post' table. Since these names might not be unique,\n"
|
||||||
|
"we have to invent artificial names in case of clashes. In order to\n"
|
||||||
|
"be able to retain the original information, we need a name to\n"
|
||||||
|
"ps name mapping for those cases where they differ. That's what\n"
|
||||||
|
"you see below.\n")
|
||||||
|
writer.newline()
|
||||||
|
items = self.mapping.items()
|
||||||
|
items.sort()
|
||||||
|
for name, psName in items:
|
||||||
|
writer.simpletag("psName", name=name, psName=psName)
|
||||||
|
writer.newline()
|
||||||
|
writer.endtag("psNames")
|
||||||
|
writer.newline()
|
||||||
|
if hasattr(self, "extraNames"):
|
||||||
|
writer.begintag("extraNames")
|
||||||
|
writer.newline()
|
||||||
|
writer.comment("following are the name that are not taken from the standard Mac glyph order")
|
||||||
|
writer.newline()
|
||||||
|
for name in self.extraNames:
|
||||||
|
writer.simpletag("psName", name=name)
|
||||||
|
writer.newline()
|
||||||
|
writer.endtag("extraNames")
|
||||||
|
writer.newline()
|
||||||
|
if hasattr(self, "data"):
|
||||||
|
writer.begintag("hexdata")
|
||||||
|
writer.newline()
|
||||||
|
writer.dumphex(self.data)
|
||||||
|
writer.endtag("hexdata")
|
||||||
|
writer.newline()
|
||||||
|
|
||||||
|
def fromXML(self, (name, attrs, content), ttFont):
|
||||||
|
if name not in ("psNames", "extraNames", "hexdata"):
|
||||||
|
setattr(self, name, safeEval(attrs["value"]))
|
||||||
|
elif name == "psNames":
|
||||||
|
self.mapping = {}
|
||||||
|
for element in content:
|
||||||
|
if type(element) <> type(()):
|
||||||
|
continue
|
||||||
|
name, attrs, content = element
|
||||||
|
if name == "psName":
|
||||||
|
self.mapping[attrs["name"]] = attrs["psName"]
|
||||||
|
elif name == "extraNames":
|
||||||
|
self.extraNames = []
|
||||||
|
for element in content:
|
||||||
|
if type(element) <> type(()):
|
||||||
|
continue
|
||||||
|
name, attrs, content = element
|
||||||
|
if name == "psName":
|
||||||
|
self.extraNames.append(attrs["name"])
|
||||||
|
else:
|
||||||
|
self.data = readHex(content)
|
||||||
|
|
||||||
|
|
||||||
|
def unpackPStrings(data):
|
||||||
|
strings = []
|
||||||
|
while data:
|
||||||
|
length = ord(data[0])
|
||||||
|
strings.append(data[1:1+length])
|
||||||
|
data = data[1+length:]
|
||||||
|
return strings
|
||||||
|
|
||||||
|
def packPStrings(strings):
|
||||||
|
data = ""
|
||||||
|
for s in strings:
|
||||||
|
data = data + chr(len(s)) + s
|
||||||
|
return data
|
||||||
|
|
14
Lib/fontTools/ttLib/tables/_p_r_e_p.py
Normal file
14
Lib/fontTools/ttLib/tables/_p_r_e_p.py
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
import DefaultTable
|
||||||
|
import array
|
||||||
|
|
||||||
|
class table__p_r_e_p(DefaultTable.DefaultTable):
|
||||||
|
|
||||||
|
def decompile(self, data, ttFont):
|
||||||
|
self.prep = data
|
||||||
|
|
||||||
|
def compile(self, ttFont):
|
||||||
|
return self.prep
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
return len(self.prep)
|
||||||
|
|
78
Lib/fontTools/ttLib/tables/_v_h_e_a.py
Normal file
78
Lib/fontTools/ttLib/tables/_v_h_e_a.py
Normal file
@ -0,0 +1,78 @@
|
|||||||
|
import DefaultTable
|
||||||
|
import sstruct
|
||||||
|
from fontTools.misc.textTools import safeEval
|
||||||
|
|
||||||
|
vheaFormat = """
|
||||||
|
> # big endian
|
||||||
|
tableVersion: 16.16F
|
||||||
|
ascent: h
|
||||||
|
descent: h
|
||||||
|
lineGap: h
|
||||||
|
advanceHeightMax: H
|
||||||
|
minTopSideBearing: h
|
||||||
|
minBottomSideBearing: h
|
||||||
|
yMaxExtent: h
|
||||||
|
caretSlopeRise: h
|
||||||
|
caretSlopeRun: h
|
||||||
|
reserved0: h
|
||||||
|
reserved1: h
|
||||||
|
reserved2: h
|
||||||
|
reserved3: h
|
||||||
|
reserved4: h
|
||||||
|
metricDataFormat: h
|
||||||
|
numberOfVMetrics: H
|
||||||
|
"""
|
||||||
|
|
||||||
|
class table__v_h_e_a(DefaultTable.DefaultTable):
|
||||||
|
|
||||||
|
dependencies = ['vmtx', 'glyf']
|
||||||
|
|
||||||
|
def decompile(self, data, ttFont):
|
||||||
|
sstruct.unpack(vheaFormat, data, self)
|
||||||
|
|
||||||
|
def compile(self, ttFont):
|
||||||
|
self.recalc(ttFont)
|
||||||
|
return sstruct.pack(vheaFormat, self)
|
||||||
|
|
||||||
|
def recalc(self, ttFont):
|
||||||
|
vtmxTable = ttFont['vmtx']
|
||||||
|
if ttFont.has_key('glyf'):
|
||||||
|
if not ttFont.isLoaded('glyf'):
|
||||||
|
return
|
||||||
|
glyfTable = ttFont['glyf']
|
||||||
|
advanceHeightMax = -100000 # arbitrary big negative number
|
||||||
|
minTopSideBearing = 100000 # arbitrary big number
|
||||||
|
minBottomSideBearing = 100000 # arbitrary big number
|
||||||
|
yMaxExtent = -100000 # arbitrary big negative number
|
||||||
|
|
||||||
|
for name in ttFont.getGlyphOrder():
|
||||||
|
height, tsb = vtmxTable[name]
|
||||||
|
g = glyfTable[name]
|
||||||
|
if g.numberOfContours <= 0:
|
||||||
|
continue
|
||||||
|
advanceHeightMax = max(advanceHeightMax, height)
|
||||||
|
minTopSideBearing = min(minTopSideBearing, tsb)
|
||||||
|
rsb = height - tsb - (g.yMax - g.yMin)
|
||||||
|
minBottomSideBearing = min(minBottomSideBearing, rsb)
|
||||||
|
extent = tsb + (g.yMax - g.yMin)
|
||||||
|
yMaxExtent = max(yMaxExtent, extent)
|
||||||
|
self.advanceHeightMax = advanceHeightMax
|
||||||
|
self.minTopSideBearing = minTopSideBearing
|
||||||
|
self.minBottomSideBearing = minBottomSideBearing
|
||||||
|
self.yMaxExtent = yMaxExtent
|
||||||
|
else:
|
||||||
|
# XXX CFF recalc...
|
||||||
|
pass
|
||||||
|
|
||||||
|
def toXML(self, writer, ttFont):
|
||||||
|
formatstring, names, fixes = sstruct.getformat(vheaFormat)
|
||||||
|
for name in names:
|
||||||
|
value = getattr(self, name)
|
||||||
|
if type(value) == type(0L):
|
||||||
|
value = int(value)
|
||||||
|
writer.simpletag(name, value=value)
|
||||||
|
writer.newline()
|
||||||
|
|
||||||
|
def fromXML(self, (name, attrs, content), ttFont):
|
||||||
|
setattr(self, name, safeEval(attrs["value"]))
|
||||||
|
|
14
Lib/fontTools/ttLib/tables/_v_m_t_x.py
Normal file
14
Lib/fontTools/ttLib/tables/_v_m_t_x.py
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
import DefaultTable
|
||||||
|
import Numeric
|
||||||
|
from fontTools import ttLib
|
||||||
|
from fontTools.misc.textTools import safeEval
|
||||||
|
|
||||||
|
superclass = ttLib.getTableClass("hmtx")
|
||||||
|
|
||||||
|
class table__v_m_t_x(superclass):
|
||||||
|
|
||||||
|
headerTag = 'vhea'
|
||||||
|
advanceName = 'height'
|
||||||
|
sideBearingName = 'tsb'
|
||||||
|
numberOfMetricsName = 'numberOfVMetrics'
|
||||||
|
|
605
Lib/fontTools/ttLib/tables/otCommon.py
Normal file
605
Lib/fontTools/ttLib/tables/otCommon.py
Normal file
@ -0,0 +1,605 @@
|
|||||||
|
"""ttLib.tables.otCommon.py -- Various data structures used by various OpenType tables.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import struct, sstruct
|
||||||
|
import DefaultTable
|
||||||
|
from fontTools import ttLib
|
||||||
|
|
||||||
|
|
||||||
|
class base_GPOS_GSUB(DefaultTable.DefaultTable):
|
||||||
|
|
||||||
|
"""Base class for GPOS and GSUB tables; they share the same high-level structure."""
|
||||||
|
|
||||||
|
version = 0x00010000
|
||||||
|
|
||||||
|
def decompile(self, data, otFont):
|
||||||
|
reader = OTTableReader(data)
|
||||||
|
self.version = reader.readLong()
|
||||||
|
if self.version <> 0x00010000:
|
||||||
|
raise ttLib.TTLibError, "unknown table version: 0x%8x" % self.version
|
||||||
|
|
||||||
|
self.scriptList = reader.readTable(ScriptList, otFont, self.tableTag)
|
||||||
|
self.featureList = reader.readTable(FeatureList, otFont, self.tableTag)
|
||||||
|
self.lookupList = reader.readTable(LookupList, otFont, self.tableTag)
|
||||||
|
|
||||||
|
def compile(self, otFont):
|
||||||
|
XXXXXX
|
||||||
|
|
||||||
|
def toXML(self, xmlWriter, otFont):
|
||||||
|
names = [("ScriptList", "scriptList"),
|
||||||
|
("FeatureList", "featureList"),
|
||||||
|
("LookupList", "lookupList")]
|
||||||
|
for name, attr in names:
|
||||||
|
xmlWriter.newline()
|
||||||
|
xmlWriter.begintag(name)
|
||||||
|
xmlWriter.newline()
|
||||||
|
table = getattr(self, attr)
|
||||||
|
table.toXML(xmlWriter, otFont)
|
||||||
|
xmlWriter.endtag(name)
|
||||||
|
xmlWriter.newline()
|
||||||
|
xmlWriter.newline()
|
||||||
|
|
||||||
|
def fromXML(self, (name, attrs, content), otFont):
|
||||||
|
xxx
|
||||||
|
|
||||||
|
|
||||||
|
#
|
||||||
|
# Script List and subtables
|
||||||
|
#
|
||||||
|
|
||||||
|
class ScriptList:
|
||||||
|
|
||||||
|
def __init__(self, parentTag):
|
||||||
|
self.parentTag = parentTag
|
||||||
|
|
||||||
|
def decompile(self, reader, otFont):
|
||||||
|
scriptCount = reader.readUShort()
|
||||||
|
self.scripts = reader.readTagList(scriptCount, Script, otFont)
|
||||||
|
|
||||||
|
def compile(self, otFont):
|
||||||
|
XXXXXX
|
||||||
|
|
||||||
|
def toXML(self, xmlWriter, otFont):
|
||||||
|
for tag, script in self.scripts:
|
||||||
|
xmlWriter.begintag("Script", tag=tag)
|
||||||
|
xmlWriter.newline()
|
||||||
|
script.toXML(xmlWriter, otFont)
|
||||||
|
xmlWriter.endtag("Script")
|
||||||
|
xmlWriter.newline()
|
||||||
|
|
||||||
|
def fromXML(self, (name, attrs, content), otFont):
|
||||||
|
xxx
|
||||||
|
|
||||||
|
|
||||||
|
class Script:
|
||||||
|
|
||||||
|
def decompile(self, reader, otFont):
|
||||||
|
self.defaultLangSystem = None
|
||||||
|
self.defaultLangSystem = reader.readTable(LanguageSystem, otFont)
|
||||||
|
langSysCount = reader.readUShort()
|
||||||
|
self.languageSystems = reader.readTagList(langSysCount, LanguageSystem, otFont)
|
||||||
|
|
||||||
|
def compile(self, otFont):
|
||||||
|
XXXXX
|
||||||
|
|
||||||
|
def toXML(self, xmlWriter, otFont):
|
||||||
|
xmlWriter.begintag("DefaultLanguageSystem")
|
||||||
|
xmlWriter.newline()
|
||||||
|
self.defaultLangSystem.toXML(xmlWriter, otFont)
|
||||||
|
xmlWriter.endtag("DefaultLanguageSystem")
|
||||||
|
xmlWriter.newline()
|
||||||
|
for tag, langSys in self.languageSystems:
|
||||||
|
xmlWriter.begintag("LanguageSystem", tag=tag)
|
||||||
|
xmlWriter.newline()
|
||||||
|
langSys.toXML(xmlWriter, otFont)
|
||||||
|
xmlWriter.endtag("LanguageSystem")
|
||||||
|
xmlWriter.newline()
|
||||||
|
|
||||||
|
|
||||||
|
class LanguageSystem:
|
||||||
|
|
||||||
|
def decompile(self, reader, otFont):
|
||||||
|
self.lookupOrder = reader.readUShort()
|
||||||
|
self.reqFeatureIndex = reader.readUShort()
|
||||||
|
featureCount = reader.readUShort()
|
||||||
|
self.featureIndex = reader.readUShortArray(featureCount)
|
||||||
|
|
||||||
|
def compile(self, otFont):
|
||||||
|
xxx
|
||||||
|
|
||||||
|
def toXML(self, xmlWriter, otFont):
|
||||||
|
xmlWriter.simpletag("LookupOrder", value=self.lookupOrder)
|
||||||
|
xmlWriter.newline()
|
||||||
|
xmlWriter.simpletag("ReqFeature", index=hex(self.reqFeatureIndex))
|
||||||
|
xmlWriter.newline()
|
||||||
|
for index in self.featureIndex:
|
||||||
|
xmlWriter.simpletag("Feature", index=index)
|
||||||
|
xmlWriter.newline()
|
||||||
|
|
||||||
|
|
||||||
|
#
|
||||||
|
# Feature List and subtables
|
||||||
|
#
|
||||||
|
|
||||||
|
class FeatureList:
|
||||||
|
|
||||||
|
def __init__(self, parentTag):
|
||||||
|
self.parentTag = parentTag
|
||||||
|
|
||||||
|
def decompile(self, reader, otFont):
|
||||||
|
featureCount = reader.readUShort()
|
||||||
|
self.features = reader.readTagList(featureCount, Feature, otFont)
|
||||||
|
|
||||||
|
def compile(self, otFont):
|
||||||
|
XXXXX
|
||||||
|
|
||||||
|
def toXML(self, xmlWriter, otFont):
|
||||||
|
for index in range(len(self.features)):
|
||||||
|
tag, feature = self.features[index]
|
||||||
|
xmlWriter.begintag("Feature", index=index, tag=tag)
|
||||||
|
xmlWriter.newline()
|
||||||
|
feature.toXML(xmlWriter, otFont)
|
||||||
|
xmlWriter.endtag("Feature")
|
||||||
|
xmlWriter.newline()
|
||||||
|
|
||||||
|
def fromXML(self, (name, attrs, content), otFont):
|
||||||
|
xxx
|
||||||
|
|
||||||
|
|
||||||
|
class Feature:
|
||||||
|
|
||||||
|
def decompile(self, reader, otFont):
|
||||||
|
self.featureParams = reader.readUShort()
|
||||||
|
lookupCount = reader.readUShort()
|
||||||
|
self.lookupListIndex = reader.readUShortArray(lookupCount)
|
||||||
|
|
||||||
|
def compile(self, otFont):
|
||||||
|
XXXXX
|
||||||
|
|
||||||
|
def toXML(self, xmlWriter, otFont):
|
||||||
|
xmlWriter.simpletag("FeatureParams", value=hex(self.featureParams))
|
||||||
|
xmlWriter.newline()
|
||||||
|
for lookupIndex in self.lookupListIndex:
|
||||||
|
xmlWriter.simpletag("LookupTable", index=lookupIndex)
|
||||||
|
xmlWriter.newline()
|
||||||
|
|
||||||
|
def fromXML(self, (name, attrs, content), otFont):
|
||||||
|
xxx
|
||||||
|
|
||||||
|
|
||||||
|
#
|
||||||
|
# Lookup List and subtables
|
||||||
|
#
|
||||||
|
|
||||||
|
class LookupList:
|
||||||
|
|
||||||
|
def __init__(self, parentTag):
|
||||||
|
self.parentTag = parentTag
|
||||||
|
|
||||||
|
def decompile(self, reader, otFont):
|
||||||
|
lookupCount = reader.readUShort()
|
||||||
|
self.lookup = lookup = []
|
||||||
|
for i in range(lookupCount):
|
||||||
|
lookup.append(reader.readTable(LookupTable, otFont, self.parentTag))
|
||||||
|
|
||||||
|
def compile(self, otFont):
|
||||||
|
XXXXX
|
||||||
|
|
||||||
|
def toXML(self, xmlWriter, otFont):
|
||||||
|
for i in range(len(self.lookup)):
|
||||||
|
xmlWriter.newline()
|
||||||
|
lookupTable = self.lookup[i]
|
||||||
|
xmlWriter.begintag("LookupTable", index=i)
|
||||||
|
xmlWriter.newline()
|
||||||
|
lookupTable.toXML(xmlWriter, otFont)
|
||||||
|
xmlWriter.endtag("LookupTable")
|
||||||
|
xmlWriter.newline()
|
||||||
|
xmlWriter.newline()
|
||||||
|
|
||||||
|
def fromXML(self, (name, attrs, content), otFont):
|
||||||
|
xxx
|
||||||
|
|
||||||
|
|
||||||
|
class LookupTable:
|
||||||
|
|
||||||
|
def __init__(self, parentTag):
|
||||||
|
self.parentTag = parentTag
|
||||||
|
|
||||||
|
def decompile(self, reader, otFont):
|
||||||
|
parentTable = otFont[self.parentTag]
|
||||||
|
self.lookupType = reader.readUShort()
|
||||||
|
self.lookupFlag = reader.readUShort()
|
||||||
|
subTableCount = reader.readUShort()
|
||||||
|
self.subTables = subTables = []
|
||||||
|
lookupTypeClass = parentTable.getLookupTypeClass(self.lookupType)
|
||||||
|
for i in range(subTableCount):
|
||||||
|
subTables.append(reader.readTable(lookupTypeClass, otFont))
|
||||||
|
|
||||||
|
def compile(self, otFont):
|
||||||
|
XXXXXX
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
if not hasattr(self, "lookupTypeName"):
|
||||||
|
m = ttLib.getTableModule(self.parentTag)
|
||||||
|
self.lookupTypeName = m.lookupTypeClasses[self.lookupType].__name__
|
||||||
|
return "<%s LookupTable at %x>" % (self.lookupTypeName, id(self))
|
||||||
|
|
||||||
|
def toXML(self, xmlWriter, otFont):
|
||||||
|
xmlWriter.simpletag("LookupFlag", value=hex(self.lookupFlag))
|
||||||
|
xmlWriter.newline()
|
||||||
|
for subTable in self.subTables:
|
||||||
|
name = subTable.__class__.__name__
|
||||||
|
xmlWriter.begintag(name)
|
||||||
|
xmlWriter.newline()
|
||||||
|
subTable.toXML(xmlWriter, otFont)
|
||||||
|
xmlWriter.endtag(name)
|
||||||
|
xmlWriter.newline()
|
||||||
|
|
||||||
|
def fromXML(self, (name, attrs, content), otFont):
|
||||||
|
xxx
|
||||||
|
|
||||||
|
|
||||||
|
#
|
||||||
|
# Other common formats
|
||||||
|
#
|
||||||
|
|
||||||
|
class CoverageTable:
|
||||||
|
|
||||||
|
def getGlyphIDs(self):
|
||||||
|
return self.glyphIDs
|
||||||
|
|
||||||
|
def getGlyphNames(self):
|
||||||
|
return self.glyphNames
|
||||||
|
|
||||||
|
def makeGlyphNames(self, otFont):
|
||||||
|
self.glyphNames = map(lambda i, o=otFont.getGlyphOrder(): o[i], self.glyphIDs)
|
||||||
|
|
||||||
|
def decompile(self, reader, otFont):
|
||||||
|
format = reader.readUShort()
|
||||||
|
if format == 1:
|
||||||
|
self.decompileFormat1(reader, otFont)
|
||||||
|
elif format == 2:
|
||||||
|
self.decompileFormat2(reader, otFont)
|
||||||
|
else:
|
||||||
|
raise ttLib.TTLibError, "unknown Coverage table format: %d" % format
|
||||||
|
self.makeGlyphNames(otFont)
|
||||||
|
|
||||||
|
def decompileFormat1(self, reader, otFont):
|
||||||
|
glyphCount = reader.readUShort()
|
||||||
|
self.glyphIDs = glyphIDs = []
|
||||||
|
for i in range(glyphCount):
|
||||||
|
glyphID = reader.readUShort()
|
||||||
|
glyphIDs.append(glyphID)
|
||||||
|
|
||||||
|
def decompileFormat2(self, reader, otFont):
|
||||||
|
rangeCount = reader.readUShort()
|
||||||
|
self.glyphIDs = glyphIDs = []
|
||||||
|
for i in range(rangeCount):
|
||||||
|
startID = reader.readUShort()
|
||||||
|
endID = reader.readUShort()
|
||||||
|
startCoverageIndex = reader.readUShort()
|
||||||
|
for glyphID in range(startID, endID + 1):
|
||||||
|
glyphIDs.append(glyphID)
|
||||||
|
|
||||||
|
def compile(self, otFont):
|
||||||
|
# brute force ;-)
|
||||||
|
data1 = self.compileFormat1(otFont)
|
||||||
|
data2 = self.compileFormat2(otFont)
|
||||||
|
if len(data1) <= len(data2):
|
||||||
|
format = 1
|
||||||
|
reader = data1
|
||||||
|
else:
|
||||||
|
format = 2
|
||||||
|
reader = data2
|
||||||
|
return struct.pack(">H", format) + reader
|
||||||
|
|
||||||
|
def compileFormat1(self, otFont):
|
||||||
|
xxxxx
|
||||||
|
glyphIDs = map(otFont.getGlyphID, self.glyphNames)
|
||||||
|
data = pack(">H", len(glyphIDs))
|
||||||
|
pack = struct.pack
|
||||||
|
for glyphID in glyphIDs:
|
||||||
|
data = data + pack(">H", glyphID)
|
||||||
|
return data
|
||||||
|
|
||||||
|
def compileFormat2(self, otFont):
|
||||||
|
xxxxx
|
||||||
|
glyphIDs = map(otFont.getGlyphID, self.glyphNames)
|
||||||
|
ranges = []
|
||||||
|
lastID = startID = glyphIDs[0]
|
||||||
|
startCoverageIndex = 0
|
||||||
|
glyphCount = len(glyphIDs)
|
||||||
|
for i in range(1, glyphCount+1):
|
||||||
|
if i == glyphCount:
|
||||||
|
glyphID = 0x1ffff # arbitrary, but larger than 0x10000
|
||||||
|
else:
|
||||||
|
glyphID = glyphIDs[i]
|
||||||
|
if glyphID <> (lastID + 1):
|
||||||
|
ranges.append((startID, lastID, startCoverageIndex))
|
||||||
|
startCoverageIndex = i
|
||||||
|
startID = glyphID
|
||||||
|
lastID = glyphID
|
||||||
|
ranges.sort() # sort by startID
|
||||||
|
rangeData = ""
|
||||||
|
for startID, endID, startCoverageIndex in ranges:
|
||||||
|
rangeData = rangeData + struct.pack(">HHH", startID, endID, startCoverageIndex)
|
||||||
|
return pack(">H", len(ranges)) + rangeData
|
||||||
|
|
||||||
|
|
||||||
|
class ClassDefinitionTable:
|
||||||
|
|
||||||
|
def decompile(self, reader, otFont):
|
||||||
|
format = reader.readUShort()
|
||||||
|
if format == 1:
|
||||||
|
self.decompileFormat1(reader, otFont)
|
||||||
|
elif format == 2:
|
||||||
|
self.decompileFormat2(reader, otFont)
|
||||||
|
else:
|
||||||
|
raise ttLib.TTLibError, "unknown Class table format: %d" % format
|
||||||
|
self.reverse()
|
||||||
|
|
||||||
|
def reverse(self):
|
||||||
|
classDefs = {}
|
||||||
|
for glyphName, classCode in self.classDefs:
|
||||||
|
try:
|
||||||
|
classDefs[classCode].append(glyphName)
|
||||||
|
except KeyError:
|
||||||
|
classDefs[classCode] = [glyphName]
|
||||||
|
self.classDefs = classDefs
|
||||||
|
|
||||||
|
def decompileFormat1(self, reader, otFont):
|
||||||
|
self.classDefs = classDefs = []
|
||||||
|
startGlyphID = reader.readUShort()
|
||||||
|
glyphCount = reader.readUShort()
|
||||||
|
for i in range(glyphCount):
|
||||||
|
glyphName = otFont.getglyphName(startGlyphID + i)
|
||||||
|
classValue = reader.readUShort()
|
||||||
|
if classValue:
|
||||||
|
classDefs.append((glyphName, classValue))
|
||||||
|
|
||||||
|
def decompileFormat2(self, reader, otFont):
|
||||||
|
self.classDefs = classDefs = []
|
||||||
|
classRangeCount = reader.readUShort()
|
||||||
|
for i in range(classRangeCount):
|
||||||
|
startID = reader.readUShort()
|
||||||
|
endID = reader.readUShort()
|
||||||
|
classValue = reader.readUShort()
|
||||||
|
for glyphID in range(startID, endID + 1):
|
||||||
|
if classValue:
|
||||||
|
glyphName = otFont.getGlyphName(glyphID)
|
||||||
|
classDefs.append((glyphName, classValue))
|
||||||
|
|
||||||
|
def compile(self, otFont):
|
||||||
|
# brute force again
|
||||||
|
data1 = self.compileFormat1(otFont)
|
||||||
|
data2 = self.compileFormat2(otFont)
|
||||||
|
if len(data1) <= len(data2):
|
||||||
|
format = 1
|
||||||
|
data = data1
|
||||||
|
else:
|
||||||
|
format = 2
|
||||||
|
data = data2
|
||||||
|
return struct.pack(">H", format) + data
|
||||||
|
|
||||||
|
def compileFormat1(self, otFont):
|
||||||
|
items = map(lambda (glyphName, classValue), getGlyphID=otFont.getGlyphID:
|
||||||
|
(getGlyphID(glyphName), classValue), self.glyphs.items())
|
||||||
|
items.sort()
|
||||||
|
startGlyphID = items[0][0]
|
||||||
|
endGlyphID = items[-1][0]
|
||||||
|
data = ""
|
||||||
|
lastID = startGlyphID
|
||||||
|
for glyphID, classValue in items:
|
||||||
|
for i in range(lastID + 1, glyphID - 1):
|
||||||
|
data = data + "\0\0" # 0 == default class
|
||||||
|
data = data + struct.pack(">H", classValue)
|
||||||
|
lastID = glyphID
|
||||||
|
return struct.pack(">H", endGlyphID - startGlyphID + 1) + data
|
||||||
|
|
||||||
|
def compileFormat2(self, otFont):
|
||||||
|
items = map(lambda (glyphName, classValue), getGlyphID=otFont.getGlyphID:
|
||||||
|
(getGlyphID(glyphName), classValue), self.glyphs.items())
|
||||||
|
items.sort()
|
||||||
|
ranges = []
|
||||||
|
lastID, lastClassValue = items[0][0]
|
||||||
|
startID = lastID
|
||||||
|
itemCount = len(items)
|
||||||
|
for i in range(1, itemCount+1):
|
||||||
|
if i == itemCount:
|
||||||
|
glyphID = 0x1ffff # arbitrary, but larger than 0x10000
|
||||||
|
classValue = 0
|
||||||
|
else:
|
||||||
|
glyphID, classValue = items[i]
|
||||||
|
if glyphID <> (lastID + 1) or lastClassValue <> classValue:
|
||||||
|
ranges.append((startID, lastID, lastClassValue))
|
||||||
|
startID = glyphID
|
||||||
|
lastClassValue = classValue
|
||||||
|
lastID = glyphID
|
||||||
|
lastClassValue = classValue
|
||||||
|
rangeData = ""
|
||||||
|
for startID, endID, classValue in ranges:
|
||||||
|
rangeData = rangeData + struct.pack(">HHH", startID, endID, classValue)
|
||||||
|
return pack(">H", len(ranges)) + rangeData
|
||||||
|
|
||||||
|
def __getitem__(self, glyphName):
|
||||||
|
if self.glyphs.has_key(glyphName):
|
||||||
|
return self.glyphs[glyphName]
|
||||||
|
else:
|
||||||
|
return 0 # default class
|
||||||
|
|
||||||
|
|
||||||
|
class DeviceTable:
|
||||||
|
|
||||||
|
def decompile(self, reader, otFont):
|
||||||
|
xxxxxx
|
||||||
|
self.startSize = unpack_uint16(reader[:2])
|
||||||
|
endSize = unpack_uint16(reader[2:4])
|
||||||
|
deltaFormat = unpack_uint16(reader[4:6])
|
||||||
|
reader = reader[6:]
|
||||||
|
if deltaFormat == 1:
|
||||||
|
bits = 2
|
||||||
|
elif deltaFormat == 2:
|
||||||
|
bits = 4
|
||||||
|
elif deltaFormat == 3:
|
||||||
|
bits = 8
|
||||||
|
else:
|
||||||
|
raise ttLib.TTLibError, "unknown Device table delta format: %d" % deltaFormat
|
||||||
|
numCount = 16 / bits
|
||||||
|
deltaCount = endSize - self.startSize + 1
|
||||||
|
deltaValues = []
|
||||||
|
mask = (1 << bits) - 1
|
||||||
|
threshold = (1 << bits) / 2
|
||||||
|
shift = 1 << bits
|
||||||
|
for i in range(0, deltaCount, numCount):
|
||||||
|
offset = 2*i/numCount
|
||||||
|
chunk = unpack_uint16(reader[offset:offset+2])
|
||||||
|
deltas = []
|
||||||
|
for j in range(numCount):
|
||||||
|
delta = chunk & mask
|
||||||
|
if delta >= threshold:
|
||||||
|
delta = delta - shift
|
||||||
|
deltas.append(delta)
|
||||||
|
chunk = chunk >> bits
|
||||||
|
deltas.reverse()
|
||||||
|
deltaValues = deltaValues + deltas
|
||||||
|
self.deltaValues = deltaValues[:deltaCount]
|
||||||
|
|
||||||
|
def compile(self, otFont):
|
||||||
|
deltaValues = self.deltaValues
|
||||||
|
startSize = self.startSize
|
||||||
|
endSize = startSize + len(deltaValues) - 1
|
||||||
|
smallestDelta = min(deltas)
|
||||||
|
largestDelta = ma(deltas)
|
||||||
|
if smallestDelta >= -2 and largestDelta < 2:
|
||||||
|
deltaFormat = 1
|
||||||
|
bits = 2
|
||||||
|
elif smallestDelta >= -8 and largestDelta < 8:
|
||||||
|
deltaFormat = 2
|
||||||
|
bits = 4
|
||||||
|
elif smallestDelta >= -128 and largestDelta < 128:
|
||||||
|
deltaFormat = 3
|
||||||
|
bits = 8
|
||||||
|
else:
|
||||||
|
raise ttLib.TTLibError, "delta value too large: min=%d, max=%d" % (smallestDelta, largestDelta)
|
||||||
|
data = struct.pack(">HHH", startSize, endSize, deltaFormat)
|
||||||
|
numCount = 16 / bits
|
||||||
|
# pad the list to a multiple of numCount values
|
||||||
|
remainder = len(deltaValues) % numCount
|
||||||
|
if remainder:
|
||||||
|
deltaValues = deltaValues + [0] * (numCount - remainder)
|
||||||
|
deltaData = ""
|
||||||
|
for i in range(0, len(deltaValues), numCount):
|
||||||
|
chunk = 0
|
||||||
|
for j in range(numCount):
|
||||||
|
chunk = chunk << bits
|
||||||
|
chunk = chunk | deltaValues[i+j]
|
||||||
|
deltaData = deltaData + struct.pack(">H", chunk)
|
||||||
|
return data + deltaData
|
||||||
|
|
||||||
|
|
||||||
|
#
|
||||||
|
# Miscelaneous helper routines and classes
|
||||||
|
#
|
||||||
|
|
||||||
|
class OTTableReader:
|
||||||
|
|
||||||
|
"""Data wrapper, mostly designed to make reading OT data less cumbersome."""
|
||||||
|
|
||||||
|
def __init__(self, data, offset=0):
|
||||||
|
self.data = data
|
||||||
|
self.offset = offset
|
||||||
|
self.pos = offset
|
||||||
|
|
||||||
|
def readUShort(self):
|
||||||
|
pos = self.pos
|
||||||
|
newpos = pos + 2
|
||||||
|
value = int(struct.unpack(">H", self.data[pos:newpos])[0])
|
||||||
|
self.pos = newpos
|
||||||
|
return value
|
||||||
|
|
||||||
|
readOffset = readUShort
|
||||||
|
|
||||||
|
def readShort(self):
|
||||||
|
pos = self.pos
|
||||||
|
newpos = pos + 2
|
||||||
|
value = int(struct.unpack(">h", self.data[pos:newpos])[0])
|
||||||
|
self.pos = newpos
|
||||||
|
return value
|
||||||
|
|
||||||
|
def readLong(self):
|
||||||
|
pos = self.pos
|
||||||
|
newpos = pos + 4
|
||||||
|
value = int(struct.unpack(">l", self.data[pos:newpos])[0])
|
||||||
|
self.pos = newpos
|
||||||
|
return value
|
||||||
|
|
||||||
|
def readTag(self):
|
||||||
|
pos = self.pos
|
||||||
|
newpos = pos + 4
|
||||||
|
value = self.data[pos:newpos]
|
||||||
|
assert len(value) == 4
|
||||||
|
self.pos = newpos
|
||||||
|
return value
|
||||||
|
|
||||||
|
def readUShortArray(self, count):
|
||||||
|
return self.readArray(count, "H")
|
||||||
|
|
||||||
|
readOffsetArray = readUShortArray
|
||||||
|
|
||||||
|
def readShortArray(self, count):
|
||||||
|
return self.readArray(count, "h")
|
||||||
|
|
||||||
|
def readArray(self, count, format):
|
||||||
|
assert format in "Hh"
|
||||||
|
from array import array
|
||||||
|
pos = self.pos
|
||||||
|
newpos = pos + 2 * count
|
||||||
|
a = array(format)
|
||||||
|
a.fromstring(self.data[pos:newpos])
|
||||||
|
if ttLib.endian <> 'big':
|
||||||
|
a.byteswap()
|
||||||
|
self.pos = newpos
|
||||||
|
return a.tolist()
|
||||||
|
|
||||||
|
def readTable(self, tableClass, otFont, *args):
|
||||||
|
offset = self.readOffset()
|
||||||
|
if offset == 0:
|
||||||
|
return None
|
||||||
|
newReader = self.getSubString(offset)
|
||||||
|
table = apply(tableClass, args)
|
||||||
|
table.decompile(newReader, otFont)
|
||||||
|
return table
|
||||||
|
|
||||||
|
def readTableArray(self, count, tableClass, otFont, *args):
|
||||||
|
list = []
|
||||||
|
for i in range(count):
|
||||||
|
list.append(apply(self.readTable, (tableClass, otFont) + args))
|
||||||
|
return list
|
||||||
|
|
||||||
|
def readTagList(self, count, tableClass, otFont, *args):
|
||||||
|
list = []
|
||||||
|
for i in range(count):
|
||||||
|
tag = self.readTag()
|
||||||
|
table = apply(self.readTable, (tableClass, otFont) + args)
|
||||||
|
list.append((tag, table))
|
||||||
|
return list
|
||||||
|
|
||||||
|
def readStruct(self, format, size=None):
|
||||||
|
if size is None:
|
||||||
|
size = struct.calcsize(format)
|
||||||
|
else:
|
||||||
|
assert size == struct.calcsize(format)
|
||||||
|
pos = self.pos
|
||||||
|
newpos = pos + size
|
||||||
|
values = struct.unpack(format, self.data[pos:newpos])
|
||||||
|
self.pos = newpos
|
||||||
|
return values
|
||||||
|
|
||||||
|
def getSubString(self, offset):
|
||||||
|
return self.__class__(self.data, self.offset+offset)
|
||||||
|
|
||||||
|
def seek(self, n):
|
||||||
|
"""Relative seek."""
|
||||||
|
self.pos = self.pos + n
|
||||||
|
|
||||||
|
|
91
Lib/fontTools/ttLib/tables/table_API_readme.txt
Normal file
91
Lib/fontTools/ttLib/tables/table_API_readme.txt
Normal file
@ -0,0 +1,91 @@
|
|||||||
|
This folder is a subpackage of ttLib. Each module here is a
|
||||||
|
specialized TT/OT table converter: they can convert raw data
|
||||||
|
to Python objects and vice versa. Usually you don't need to
|
||||||
|
use the modules directly: they are imported and used
|
||||||
|
automatically when needed by ttLib.
|
||||||
|
|
||||||
|
If you are writing you own table converter the following is
|
||||||
|
important.
|
||||||
|
|
||||||
|
The modules here have pretty strange names: this is due to the
|
||||||
|
fact that we need to map TT table tags (which are case sensitive)
|
||||||
|
to filenames (which on Mac and Win aren't case sensitive) as well
|
||||||
|
as to Python identifiers. The latter means it can only contain
|
||||||
|
[A-Za-z0-9_] and cannot start with a number.
|
||||||
|
|
||||||
|
ttLib provides functions to expand a tag into the format used here:
|
||||||
|
|
||||||
|
>>> from fontTools import ttLib
|
||||||
|
>>> ttLib.tag2identifier("FOO ")
|
||||||
|
'F_O_O_'
|
||||||
|
>>> ttLib.tag2identifier("cvt ")
|
||||||
|
'_c_v_t'
|
||||||
|
>>> ttLib.tag2identifier("OS/2")
|
||||||
|
'O_S_2f_2'
|
||||||
|
>>> ttLib.tag2identifier("glyf")
|
||||||
|
'_g_l_y_f'
|
||||||
|
>>>
|
||||||
|
|
||||||
|
And vice versa:
|
||||||
|
|
||||||
|
>>> ttLib.identifier2tag("F_O_O_")
|
||||||
|
'FOO '
|
||||||
|
>>> ttLib.identifier2tag("_c_v_t")
|
||||||
|
'cvt '
|
||||||
|
>>> ttLib.identifier2tag("O_S_2f_2")
|
||||||
|
'OS/2'
|
||||||
|
>>> ttLib.identifier2tag("_g_l_y_f")
|
||||||
|
'glyf'
|
||||||
|
>>>
|
||||||
|
|
||||||
|
Eg. the 'glyf' table converter lives in a Python file called:
|
||||||
|
|
||||||
|
_g_l_y_f.py
|
||||||
|
|
||||||
|
The converter itself is a class, named "table_" + expandedtag. Eg:
|
||||||
|
|
||||||
|
class table__g_l_y_f:
|
||||||
|
etc.
|
||||||
|
|
||||||
|
Note that if you _do_ need to use such modules or classes manually,
|
||||||
|
there are two convenient API functions that let you find them by tag:
|
||||||
|
|
||||||
|
>>> ttLib.getTableModule('glyf')
|
||||||
|
<module 'ttLib.tables._g_l_y_f'>
|
||||||
|
>>> ttLib.getTableClass('glyf')
|
||||||
|
<class ttLib.tables._g_l_y_f.table__g_l_y_f at 645f400>
|
||||||
|
>>>
|
||||||
|
|
||||||
|
You must subclass from DefaultTable.DefaultTable. It provides some default
|
||||||
|
behavior, as well as a constructor method (__init__) that you don't need to
|
||||||
|
override.
|
||||||
|
|
||||||
|
Your converter should minimally provide two methods:
|
||||||
|
|
||||||
|
class table_F_O_O_(DefaultTable.DefaultTable): # converter for table 'FOO '
|
||||||
|
|
||||||
|
def decompile(self, data, ttFont):
|
||||||
|
# 'data' is the raw table data. Unpack it into a
|
||||||
|
# Python data structure.
|
||||||
|
# 'ttFont' is a ttLib.TTfile instance, enabling you to
|
||||||
|
# refer to other tables. Do ***not*** keep a reference to
|
||||||
|
# it: it will cause a circular reference (ttFont saves
|
||||||
|
# a reference to us), and that means we'll be leaking
|
||||||
|
# memory. If you need to use it in other methods, just
|
||||||
|
# pass it around as a method argument.
|
||||||
|
|
||||||
|
def compile(self, ttFont):
|
||||||
|
# Return the raw data, as converted from the Python
|
||||||
|
# data structure.
|
||||||
|
# Again, 'ttFont' is there so you can access other tables.
|
||||||
|
# Same warning applies.
|
||||||
|
|
||||||
|
If you want to support XML import/export as well, you need to provide two
|
||||||
|
additional methods:
|
||||||
|
|
||||||
|
def toXML(self, writer, ttFont):
|
||||||
|
# XXX
|
||||||
|
|
||||||
|
def fromXML(self, (name, attrs, content), ttFont):
|
||||||
|
# XXX
|
||||||
|
|
249
Lib/fontTools/ttLib/tables/ttProgram.py
Normal file
249
Lib/fontTools/ttLib/tables/ttProgram.py
Normal file
@ -0,0 +1,249 @@
|
|||||||
|
"""ttLib.tables.ttProgram.py -- Assembler/disassembler for TrueType bytecode programs."""
|
||||||
|
|
||||||
|
import array
|
||||||
|
|
||||||
|
|
||||||
|
# first, the list of instructions that eat bytes or words from the instruction stream
|
||||||
|
|
||||||
|
streamInstructions = [
|
||||||
|
# ------ ----------- ----- ------------------------ --- ------ ---------------------------------- --------------
|
||||||
|
# opcode mnemonic argbits descriptive name pops pushes eats from instruction stream pushes
|
||||||
|
# ------ ----------- ----- ------------------------ --- ------ ---------------------------------- --------------
|
||||||
|
(0x40, 'NPUSHB', 0, 'PushNBytes', 0, -1), # n, b1, b2,...bn b1,b2...bn
|
||||||
|
(0x41, 'NPUSHW', 0, 'PushNWords', 0, -1), # n, w1, w2,...w w1,w2...wn
|
||||||
|
(0xb0, 'PUSHB', 3, 'PushBytes', 0, -1), # b0, b1,..bn b0, b1, ...,bn
|
||||||
|
(0xb8, 'PUSHW', 3, 'PushWords', 0, -1), # w0,w1,..wn w0 ,w1, ...wn
|
||||||
|
# ------ ----------- ----- ------------------------ --- ------ ---------------------------------- --------------
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
# next, the list of "normal" instructions
|
||||||
|
|
||||||
|
instructions = [
|
||||||
|
# ------ ----------- ----- ------------------------ --- ------ ---------------------------------- --------------
|
||||||
|
# opcode mnemonic argbits descriptive name pops pushes pops pushes
|
||||||
|
# ------ ----------- ----- ------------------------ --- ------ ---------------------------------- --------------
|
||||||
|
(0x7f, 'AA', 0, 'AdjustAngle', 1, 0), # p -
|
||||||
|
(0x64, 'ABS', 0, 'Absolute', 1, 1), # n |n|
|
||||||
|
(0x60, 'ADD', 0, 'Add', 2, 1), # n2, n1 (n1 + n2)
|
||||||
|
(0x27, 'ALIGNPTS', 0, 'AlignPts', 2, 0), # p2, p1 -
|
||||||
|
(0x3c, 'ALIGNRP', 0, 'AlignRelativePt', -1, 0), # p1, p2, ... , ploopvalue -
|
||||||
|
(0x5a, 'AND', 0, 'LogicalAnd', 2, 1), # e2, e1 b
|
||||||
|
(0x2b, 'CALL', 0, 'CallFunction', 1, 0), # f -
|
||||||
|
(0x67, 'CEILING', 0, 'Ceiling', 1, 1), # n ceil(n)
|
||||||
|
(0x25, 'CINDEX', 0, 'CopyXToTopStack', 1, 1), # k ek
|
||||||
|
(0x22, 'CLEAR', 0, 'ClearStack', -1, 0), # all items on the stack -
|
||||||
|
(0x4f, 'DEBUG', 0, 'DebugCall', 1, 0), # n -
|
||||||
|
(0x73, 'DELTAC1', 0, 'DeltaExceptionC1', -1, 0), # argn, cn, argn-1,cn-1, , arg1, c1 -
|
||||||
|
(0x74, 'DELTAC2', 0, 'DeltaExceptionC2', -1, 0), # argn, cn, argn-1,cn-1, , arg1, c1 -
|
||||||
|
(0x75, 'DELTAC3', 0, 'DeltaExceptionC3', -1, 0), # argn, cn, argn-1,cn-1, , arg1, c1 -
|
||||||
|
(0x5d, 'DELTAP1', 0, 'DeltaExceptionP1', -1, 0), # argn, pn, argn-1, pn-1, , arg1, p1 -
|
||||||
|
(0x71, 'DELTAP2', 0, 'DeltaExceptionP2', -1, 0), # argn, pn, argn-1, pn-1, , arg1, p1 -
|
||||||
|
(0x72, 'DELTAP3', 0, 'DeltaExceptionP3', -1, 0), # argn, pn, argn-1, pn-1, , arg1, p1 -
|
||||||
|
(0x24, 'DEPTH', 0, 'GetDepthStack', 0, 1), # - n
|
||||||
|
(0x62, 'DIV', 0, 'Divide', 2, 1), # n2, n1 (n1 * 64)/ n2
|
||||||
|
(0x20, 'DUP', 0, 'DuplicateTopStack', 1, 2), # e e, e
|
||||||
|
(0x59, 'EIF', 0, 'EndIf', 0, 0), # - -
|
||||||
|
(0x1b, 'ELSE', 0, 'Else', 0, 0), # - -
|
||||||
|
(0x2d, 'ENDF', 0, 'EndFunctionDefinition', 0, 0), # - -
|
||||||
|
(0x54, 'EQ', 0, 'Equal', 2, 1), # e2, e1 b
|
||||||
|
(0x57, 'EVEN', 0, 'Even', 1, 1), # e b
|
||||||
|
(0x2c, 'FDEF', 0, 'FunctionDefinition', 1, 0), # f -
|
||||||
|
(0x4e, 'FLIPOFF', 0, 'SetAutoFlipOff', 0, 0), # - -
|
||||||
|
(0x4d, 'FLIPON', 0, 'SetAutoFlipOn', 0, 0), # - -
|
||||||
|
(0x80, 'FLIPPT', 0, 'FlipPoint', -1, 0), # p1, p2, ..., ploopvalue -
|
||||||
|
(0x82, 'FLIPRGOFF', 0, 'FlipRangeOff', 2, 0), # h, l -
|
||||||
|
(0x81, 'FLIPRGON', 0, 'FlipRangeOn', 2, 0), # h, l -
|
||||||
|
(0x66, 'FLOOR', 0, 'Floor', 1, 1), # n floor(n)
|
||||||
|
(0x46, 'GC', 1, 'GetCoordOnPVector', 1, 1), # p c
|
||||||
|
(0x88, 'GETINFO', 0, 'GetInfo', 1, 1), # selector result
|
||||||
|
(0x0d, 'GFV', 0, 'GetFVector', 0, 2), # - px, py
|
||||||
|
(0x0c, 'GPV', 0, 'GetPVector', 0, 2), # - px, py
|
||||||
|
(0x52, 'GT', 0, 'GreaterThan', 2, 1), # e2, e1 b
|
||||||
|
(0x53, 'GTEQ', 0, 'GreaterThanOrEqual', 2, 1), # e2, e1 b
|
||||||
|
(0x89, 'IDEF', 0, 'InstructionDefinition', 1, 0), # f -
|
||||||
|
(0x58, 'IF', 0, 'If', 1, 0), # e -
|
||||||
|
(0x8e, 'INSTCTRL', 0, 'SetInstrExecControl', 2, 0), # s, v -
|
||||||
|
(0x39, 'IP', 0, 'InterpolatePts', -1, 0), # p1, p2, ... , ploopvalue -
|
||||||
|
(0x0f, 'ISECT', 0, 'MovePtToIntersect', 5, 0), # a1, a0, b1, b0, p -
|
||||||
|
(0x30, 'IUP', 1, 'InterpolateUntPts', 0, 0), # - -
|
||||||
|
(0x1c, 'JMPR', 0, 'Jump', 1, 0), # offset -
|
||||||
|
(0x79, 'JROF', 0, 'JumpRelativeOnFalse', 2, 0), # e, offset -
|
||||||
|
(0x78, 'JROT', 0, 'JumpRelativeOnTrue', 2, 0), # e, offset -
|
||||||
|
(0x2a, 'LOOPCALL', 0, 'LoopAndCallFunction', 2, 0), # f, count -
|
||||||
|
(0x50, 'LT', 0, 'LessThan', 2, 1), # e2, e1 b
|
||||||
|
(0x51, 'LTEQ', 0, 'LessThenOrEqual', 2, 1), # e2, e1 b
|
||||||
|
(0x8b, 'MAX', 0, 'Maximum', 2, 1), # e2, e1 max(e1, e2)
|
||||||
|
(0x49, 'MD', 1, 'MeasureDistance', 2, 1), # p2,p1 d
|
||||||
|
(0x2e, 'MDAP', 1, 'MoveDirectAbsPt', 1, 0), # p -
|
||||||
|
(0xc0, 'MDRP', 5, 'MoveDirectRelPt', 1, 0), # p -
|
||||||
|
(0x3e, 'MIAP', 1, 'MoveIndirectAbsPt', 2, 0), # n, p -
|
||||||
|
(0x8c, 'MIN', 0, 'Minimum', 2, 1), # e2, e1 min(e1, e2)
|
||||||
|
(0x26, 'MINDEX', 0, 'MoveXToTopStack', 2, 1), # k ek
|
||||||
|
(0xe0, 'MIRP', 5, 'MoveIndirectRelPt', 1, 0), # n, p -
|
||||||
|
(0x4b, 'MPPEM', 0, 'MeasurePixelPerEm', 0, 1), # - ppem
|
||||||
|
(0x4c, 'MPS', 0, 'MeasurePointSize', 0, 1), # - pointSize
|
||||||
|
(0x3a, 'MSIRP', 1, 'MoveStackIndirRelPt', 2, 0), # d, p -
|
||||||
|
(0x63, 'MUL', 0, 'Multiply', 2, 1), # n2, n1 (n1 * n2)/64
|
||||||
|
(0x65, 'NEG', 0, 'Negate', 1, 1), # n -n
|
||||||
|
(0x55, 'NEQ', 0, 'NotEqual', 2, 1), # e2, e1 b
|
||||||
|
(0x5c, 'NOT', 0, 'LogicalNot', 1, 1), # e ( not e )
|
||||||
|
(0x6c, 'NROUND', 2, 'NoRound', 1, 1), # n1 n2
|
||||||
|
(0x56, 'ODD', 0, 'Odd', 1, 1), # e b
|
||||||
|
(0x5b, 'OR', 0, 'LogicalOr', 2, 1), # e2, e1 b
|
||||||
|
(0x21, 'POP', 0, 'PopTopStack', 1, 0), # e -
|
||||||
|
(0x45, 'RCVT', 0, 'ReadCVT', 1, 1), # location value
|
||||||
|
(0x7d, 'RDTG', 0, 'RoundDownToGrid', 0, 0), # - -
|
||||||
|
(0x7a, 'ROFF', 0, 'RoundOff', 0, 0), # - -
|
||||||
|
(0x8a, 'ROLL', 0, 'RollTopThreeStack', 3, 3), # a,b,c b,a,c
|
||||||
|
(0x68, 'ROUND', 2, 'Round', 1, 1), # n1 n2
|
||||||
|
(0x43, 'RS', 0, 'ReadStore', 1, 1), # n v
|
||||||
|
(0x3d, 'RTDG', 0, 'RoundToDoubleGrid', 0, 0), # - -
|
||||||
|
(0x18, 'RTG', 0, 'RoundToGrid', 0, 0), # - -
|
||||||
|
(0x19, 'RTHG', 0, 'RoundToHalfGrid', 0, 0), # - -
|
||||||
|
(0x7c, 'RUTG', 0, 'RoundUpToGrid', 0, 0), # - -
|
||||||
|
(0x77, 'S45ROUND', 0, 'SuperRound45Degrees', 1, 0), # n -
|
||||||
|
(0x7e, 'SANGW', 0, 'SetAngleWeight', 1, 0), # weight -
|
||||||
|
(0x85, 'SCANCTRL', 0, 'ScanConversionControl', 1, 0), # n -
|
||||||
|
(0x8d, 'SCANTYPE', 0, 'ScanType', 1, 0), # n -
|
||||||
|
(0x48, 'SCFS', 0, 'SetCoordFromStackFP', 2, 0), # c, p -
|
||||||
|
(0x1d, 'SCVTCI', 0, 'SetCVTCutIn', 1, 0), # n -
|
||||||
|
(0x5e, 'SDB', 0, 'SetDeltaBaseInGState', 1, 0), # n -
|
||||||
|
(0x86, 'SDPVTL', 1, 'SetDualPVectorToLine', 2, 0), # p2, p1 -
|
||||||
|
(0x5f, 'SDS', 0, 'SetDeltaShiftInGState', 1, 0), # n -
|
||||||
|
(0x0b, 'SFVFS', 0, 'SetFVectorFromStack', 2, 0), # y, x -
|
||||||
|
(0x04, 'SFVTCA', 1, 'SetFVectorToAxis', 0, 0), # - -
|
||||||
|
(0x08, 'SFVTL', 1, 'SetFVectorToLine', 2, 0), # p2, p1 -
|
||||||
|
(0x0e, 'SFVTPV', 0, 'SetFVectorToPVector', 0, 0), # - -
|
||||||
|
(0x34, 'SHC', 1, 'ShiftContourByLastPt', 1, 0), # c -
|
||||||
|
(0x32, 'SHP', 1, 'ShiftPointByLastPoint', -1, 0), # p1, p2, ..., ploopvalue -
|
||||||
|
(0x38, 'SHPIX', 0, 'ShiftZoneByPixel', -1, 0), # d, p1, p2, ..., ploopvalue -
|
||||||
|
(0x36, 'SHZ', 1, 'ShiftZoneByLastPoint', 1, 0), # e -
|
||||||
|
(0x17, 'SLOOP', 0, 'SetLoopVariable', 1, 0), # n -
|
||||||
|
(0x1a, 'SMD', 0, 'SetMinimumDistance', 1, 0), # distance -
|
||||||
|
(0x0a, 'SPVFS', 0, 'SetPVectorFromStack', 2, 0), # y, x -
|
||||||
|
(0x02, 'SPVTCA', 1, 'SetPVectorToAxis', 0, 0), # - -
|
||||||
|
(0x06, 'SPVTL', 1, 'SetPVectorToLine', 2, 0), # p2, p1 -
|
||||||
|
(0x76, 'SROUND', 0, 'SuperRound', 1, 0), # n -
|
||||||
|
(0x10, 'SRP0', 0, 'SetRefPoint0', 1, 0), # p -
|
||||||
|
(0x11, 'SRP1', 0, 'SetRefPoint1', 1, 0), # p -
|
||||||
|
(0x12, 'SRP2', 0, 'SetRefPoint2', 1, 0), # p -
|
||||||
|
(0x1f, 'SSW', 0, 'SetSingleWidth', 1, 0), # n -
|
||||||
|
(0x1e, 'SSWCI', 0, 'SetSingleWidthCutIn', 1, 0), # n -
|
||||||
|
(0x61, 'SUB', 0, 'Subtract', 2, 1), # n2, n1 (n1 - n2)
|
||||||
|
(0x00, 'SVTCA', 1, 'SetFPVectorToAxis', 0, 0), # - -
|
||||||
|
(0x23, 'SWAP', 0, 'SwapTopStack', 2, 2), # e2, e1 e1, e2
|
||||||
|
(0x13, 'SZP0', 0, 'SetZonePointer0', 1, 0), # n -
|
||||||
|
(0x14, 'SZP1', 0, 'SetZonePointer1', 1, 0), # n -
|
||||||
|
(0x15, 'SZP2', 0, 'SetZonePointer2', 1, 0), # n -
|
||||||
|
(0x16, 'SZPS', 0, 'SetZonePointerS', 1, 0), # n -
|
||||||
|
(0x29, 'UTP', 0, 'UnTouchPt', 1, 0), # p -
|
||||||
|
(0x70, 'WCVTF', 0, 'WriteCVTInFUnits', 2, 0), # n, l -
|
||||||
|
(0x44, 'WCVTP', 0, 'WriteCVTInPixels', 2, 0), # v, l -
|
||||||
|
(0x42, 'WS', 0, 'WriteStore', 2, 0), # v, l -
|
||||||
|
# ------ ----------- ----- ------------------------ --- ------ ---------------------------------- --------------
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def bitRepr(value, bits):
|
||||||
|
s = ""
|
||||||
|
for i in range(bits):
|
||||||
|
s = "01"[value & 0x1] + s
|
||||||
|
value = value >> 1
|
||||||
|
return s
|
||||||
|
|
||||||
|
def makeOpcodeDict(instructionList):
|
||||||
|
opcodeDict = {}
|
||||||
|
for op, mnemonic, argbits, name, pops, pushes in instructionList:
|
||||||
|
if argbits:
|
||||||
|
argoffset = op
|
||||||
|
for i in range(1 << argbits):
|
||||||
|
opcodeDict[op+i] = mnemonic, argbits, argoffset, name
|
||||||
|
else:
|
||||||
|
opcodeDict[op] = mnemonic, 0, 0, name
|
||||||
|
return opcodeDict
|
||||||
|
|
||||||
|
streamOpcodeDict = makeOpcodeDict(streamInstructions)
|
||||||
|
opcodeDict = makeOpcodeDict(instructions)
|
||||||
|
|
||||||
|
tt_instructions_error = "TT instructions error"
|
||||||
|
|
||||||
|
|
||||||
|
class Program:
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def fromBytecode(self, bytecode):
|
||||||
|
self.bytecode = array.array("B")
|
||||||
|
self.bytecode.fromstring(bytecode)
|
||||||
|
|
||||||
|
def fromAssembly(self, assembly):
|
||||||
|
self.assembly = assembly
|
||||||
|
|
||||||
|
def getBytecode(self):
|
||||||
|
if not hasattr(self, "bytecode"):
|
||||||
|
self._assemble()
|
||||||
|
return self.bytecode.tostring()
|
||||||
|
|
||||||
|
def getAssembly(self):
|
||||||
|
if not hasattr(self, "assembly"):
|
||||||
|
self._disassemble()
|
||||||
|
return self.assembly
|
||||||
|
|
||||||
|
def _assemble(self):
|
||||||
|
xxx
|
||||||
|
|
||||||
|
def _disassemble(self):
|
||||||
|
assembly = []
|
||||||
|
i = 0
|
||||||
|
bytecode = self.bytecode
|
||||||
|
numBytecode = len(bytecode)
|
||||||
|
while i < numBytecode:
|
||||||
|
op = bytecode[i]
|
||||||
|
arg = 0
|
||||||
|
try:
|
||||||
|
mnemonic, argbits, argoffset, name = opcodeDict[op]
|
||||||
|
except KeyError:
|
||||||
|
try:
|
||||||
|
mnemonic, argbits, argoffset, name = streamOpcodeDict[op]
|
||||||
|
except KeyError:
|
||||||
|
raise tt_instructions_error, "illegal opcode: 0x%.2x" % op
|
||||||
|
pushbytes = pushwords = 0
|
||||||
|
if argbits:
|
||||||
|
if mnemonic == "PUSHB":
|
||||||
|
pushbytes = op - argoffset + 1
|
||||||
|
else:
|
||||||
|
pushwords = op - argoffset + 1
|
||||||
|
else:
|
||||||
|
i = i + 1
|
||||||
|
if mnemonic == "NPUSHB":
|
||||||
|
pushbytes = bytecode[i]
|
||||||
|
else:
|
||||||
|
pushwords = bytecode[i]
|
||||||
|
i = i + 1
|
||||||
|
assembly.append(mnemonic + "[ ]")
|
||||||
|
for j in range(pushbytes):
|
||||||
|
assembly.append(`bytecode[i]`)
|
||||||
|
i = i + 1
|
||||||
|
for j in range(0, pushwords, 2):
|
||||||
|
assembly.append(`(bytecode[i] << 8) + bytecode[i+1]`)
|
||||||
|
i = i + 2
|
||||||
|
else:
|
||||||
|
if argbits:
|
||||||
|
assembly.append(mnemonic + "[%s]" % bitRepr(op - argoffset, argbits))
|
||||||
|
else:
|
||||||
|
assembly.append(mnemonic + "[ ]")
|
||||||
|
i = i + 1
|
||||||
|
self.assembly = assembly
|
||||||
|
del self.bytecode
|
||||||
|
|
||||||
|
|
||||||
|
fpgm = '@\01476&%\037\023\022\015\014\005\004\002, \260\003%E#E#ah\212 Eh \212#D`D-,KRXED\033!!Y-, EhD \260\001` E\260Fvh\030\212E`D-,\260\022+\260\002%E\260\002%Ej\260@\213`\260\002%#D!!!-,\260\023+\260\002%E\260\002%Ej\270\377\300\214`\260\002%#D!!!-,\261\000\003%EhTX\260\003%E\260\003%E`h \260\004%#D\260\004%#D\033\260\003% Eh \212#D\260\003%Eh`\260\003%#DY-,\260\003% Eh \212#D\260\003%Eh`\260\003%#D-,KRXED\033!!Y-,F#F`\212\212F# F\212`\212a\270\377\200b# \020#\212\261KK\212pE` \260\000PX\260\001a\270\377\272\213\033\260F\214Y\260\020`h\001:-, E\260\003%FRX?\033!\021Y-,KS#KQZX E\212`D\033!!Y-,KS#KQZX8\033!!Y-'
|
||||||
|
gpgm = '@\022\011\003\207@\005\200\004\207\000\010\007\202\001\010\004\202\000\010\000\020\320\355\020\336\355\001\020\336\375\032}\336\032\030\375\31610'
|
||||||
|
|
||||||
|
p = Program()
|
||||||
|
p.fromBytecode(fpgm)
|
||||||
|
for line in p.getAssembly():
|
||||||
|
print line
|
||||||
|
|
332
Lib/fontTools/ttLib/test/ttBrowser.py
Normal file
332
Lib/fontTools/ttLib/test/ttBrowser.py
Normal file
@ -0,0 +1,332 @@
|
|||||||
|
from fontTools import ttLib
|
||||||
|
from fontTools.ttLib import macUtils
|
||||||
|
import macfs
|
||||||
|
import PyBrowser
|
||||||
|
import W, Lists
|
||||||
|
import os
|
||||||
|
import ATM
|
||||||
|
import Numeric
|
||||||
|
import Qd
|
||||||
|
from rf.views.wGlyphList import GlyphList
|
||||||
|
|
||||||
|
|
||||||
|
class TableBrowser:
|
||||||
|
|
||||||
|
def __init__(self, path=None, ttFont=None, res_index=None):
|
||||||
|
W.SetCursor('watch')
|
||||||
|
if path is None:
|
||||||
|
self.ttFont = ttFont
|
||||||
|
self.filename = "????"
|
||||||
|
else:
|
||||||
|
self.ttFont = ttLib.TTFont(path, res_index)
|
||||||
|
if res_index is None:
|
||||||
|
self.filename = os.path.basename(path)
|
||||||
|
else:
|
||||||
|
self.filename = os.path.basename(path) + " - " + str(res_index)
|
||||||
|
self.currentglyph = None
|
||||||
|
self.glyphs = {}
|
||||||
|
self.buildinterface()
|
||||||
|
|
||||||
|
def buildinterface(self):
|
||||||
|
buttonwidth = 120
|
||||||
|
glyphlistwidth = 150
|
||||||
|
hmargin = 10
|
||||||
|
vmargin = 8
|
||||||
|
title = self.filename
|
||||||
|
tables = self.ttFont.keys()
|
||||||
|
tables.sort()
|
||||||
|
self.w = w = W.Window((500, 300), title, minsize = (400, 200))
|
||||||
|
w.browsetablebutton = W.Button((hmargin, 32, buttonwidth, 16), "Browse tableŠ",
|
||||||
|
self.browsetable)
|
||||||
|
w.browsefontbutton = W.Button((hmargin, vmargin, buttonwidth, 16), "Browse fontŠ",
|
||||||
|
self.browsefont)
|
||||||
|
w.tablelist = W.List((hmargin, 56, buttonwidth, -128), tables, self.tablelisthit)
|
||||||
|
|
||||||
|
w.divline1 = W.VerticalLine((buttonwidth + 2 * hmargin, vmargin, 1, -vmargin))
|
||||||
|
|
||||||
|
gleft = buttonwidth + 3 * hmargin + 1
|
||||||
|
|
||||||
|
hasGlyfTable = self.ttFont.has_key('glyf')
|
||||||
|
|
||||||
|
glyphnames = self.ttFont.getGlyphNames2() # caselessly sorted glyph names
|
||||||
|
|
||||||
|
if hasGlyfTable:
|
||||||
|
w.glyphlist = GlyphList((gleft, 56, glyphlistwidth, -vmargin),
|
||||||
|
glyphnames, self.glyphlisthit)
|
||||||
|
|
||||||
|
w.divline2 = W.VerticalLine((buttonwidth + glyphlistwidth + 4 * hmargin + 2,
|
||||||
|
vmargin, 1, -vmargin))
|
||||||
|
|
||||||
|
yMin = self.ttFont['head'].yMin
|
||||||
|
yMax = self.ttFont['head'].yMax
|
||||||
|
w.gviewer = GlyphViewer((buttonwidth + glyphlistwidth + 5 * hmargin + 3,
|
||||||
|
vmargin, -hmargin, -vmargin), yMin, yMax)
|
||||||
|
|
||||||
|
w.showpoints = W.CheckBox((gleft, vmargin, glyphlistwidth, 16), "Show points",
|
||||||
|
self.w.gviewer.toggleshowpoints)
|
||||||
|
w.showpoints.set(self.w.gviewer.showpoints)
|
||||||
|
w.showlines = W.CheckBox((gleft, vmargin + 24, glyphlistwidth, 16), "Show lines",
|
||||||
|
self.w.gviewer.toggleshowlines)
|
||||||
|
w.showlines.set(self.w.gviewer.showlines)
|
||||||
|
else:
|
||||||
|
w.glyphlist = GlyphList((gleft, 56, glyphlistwidth, -vmargin),
|
||||||
|
glyphnames)
|
||||||
|
w.noGlyphTable = W.TextBox((gleft, vmargin, -20, 20), "no 'glyf' table found")
|
||||||
|
|
||||||
|
|
||||||
|
w.setdefaultbutton(w.browsetablebutton)
|
||||||
|
|
||||||
|
w.tocurrentfont = W.Button((hmargin, -120, buttonwidth, 16), "Copy to current font", self.copytocurrentfont)
|
||||||
|
w.fromcurrentfont = W.Button((hmargin, -96, buttonwidth, 16), "Copy from current font", self.copyfromcurrentfont)
|
||||||
|
w.saveflat = W.Button((hmargin, -72, buttonwidth, 16), "Save as flat fileŠ", self.saveflat)
|
||||||
|
w.savesuitcasebutton = W.Button((hmargin, -48, buttonwidth, 16), "Save as suitcaseŠ", self.savesuitcase)
|
||||||
|
w.savexmlbutton = W.Button((hmargin, -24, buttonwidth, 16), "Save as XMLŠ", self.saveXML)
|
||||||
|
|
||||||
|
w.open()
|
||||||
|
w.browsetablebutton.enable(0)
|
||||||
|
|
||||||
|
def browsetable(self):
|
||||||
|
self.tablelisthit(1)
|
||||||
|
|
||||||
|
def browsefont(self):
|
||||||
|
PyBrowser.Browser(self.ttFont)
|
||||||
|
|
||||||
|
def copytocurrentfont(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def copyfromcurrentfont(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def saveflat(self):
|
||||||
|
path = putfile("Save font as flat file:", self.filename, ".TTF")
|
||||||
|
if path:
|
||||||
|
W.SetCursor('watch')
|
||||||
|
self.ttFont.save(path)
|
||||||
|
|
||||||
|
def savesuitcase(self):
|
||||||
|
path = putfile("Save font as suitcase:", self.filename, ".suit")
|
||||||
|
if path:
|
||||||
|
W.SetCursor('watch')
|
||||||
|
self.ttFont.save(path, 1)
|
||||||
|
|
||||||
|
def saveXML(self):
|
||||||
|
path = putfile("Save font as XML text file:", self.filename, ".xml")
|
||||||
|
if path:
|
||||||
|
W.SetCursor('watch')
|
||||||
|
pb = macUtils.ProgressBar("Saving %s as XMLŠ" % self.filename)
|
||||||
|
try:
|
||||||
|
self.ttFont.saveXML(path, pb)
|
||||||
|
finally:
|
||||||
|
pb.close()
|
||||||
|
|
||||||
|
def glyphlisthit(self, isDbl):
|
||||||
|
sel = self.w.glyphlist.getselectedobjects()
|
||||||
|
if not sel or sel[0] == self.currentglyph:
|
||||||
|
return
|
||||||
|
self.currentglyph = sel[0]
|
||||||
|
if self.glyphs.has_key(self.currentglyph):
|
||||||
|
g = self.glyphs[self.currentglyph]
|
||||||
|
else:
|
||||||
|
g = Glyph(self.ttFont, self.currentglyph)
|
||||||
|
self.glyphs[self.currentglyph] = g
|
||||||
|
self.w.gviewer.setglyph(g)
|
||||||
|
|
||||||
|
def tablelisthit(self, isdbl):
|
||||||
|
if isdbl:
|
||||||
|
for tag in self.w.tablelist.getselectedobjects():
|
||||||
|
table = self.ttFont[tag]
|
||||||
|
if tag == 'glyf':
|
||||||
|
W.SetCursor('watch')
|
||||||
|
for glyphname in self.ttFont.getGlyphOrder():
|
||||||
|
try:
|
||||||
|
glyph = table[glyphname]
|
||||||
|
except KeyError:
|
||||||
|
pass # incomplete font, oh well.
|
||||||
|
PyBrowser.Browser(table)
|
||||||
|
else:
|
||||||
|
sel = self.w.tablelist.getselection()
|
||||||
|
if sel:
|
||||||
|
self.w.browsetablebutton.enable(1)
|
||||||
|
else:
|
||||||
|
self.w.browsetablebutton.enable(0)
|
||||||
|
|
||||||
|
|
||||||
|
class Glyph:
|
||||||
|
|
||||||
|
def __init__(self, ttFont, glyphName):
|
||||||
|
ttglyph = ttFont['glyf'][glyphName]
|
||||||
|
self.iscomposite = ttglyph.numberOfContours == -1
|
||||||
|
self.width, self.lsb = ttFont['hmtx'][glyphName]
|
||||||
|
if ttglyph.numberOfContours == 0:
|
||||||
|
self.xMin = 0
|
||||||
|
self.contours = []
|
||||||
|
return
|
||||||
|
self.xMin = ttglyph.xMin
|
||||||
|
coordinates, endPts, flags = ttglyph.getCoordinates(ttFont['glyf'])
|
||||||
|
self.contours = []
|
||||||
|
self.flags = []
|
||||||
|
startpt = 0
|
||||||
|
for endpt in endPts:
|
||||||
|
self.contours.append(Numeric.array(coordinates[startpt:endpt+1]))
|
||||||
|
self.flags.append(flags[startpt:endpt+1])
|
||||||
|
startpt = endpt + 1
|
||||||
|
|
||||||
|
def getcontours(self, scale, move):
|
||||||
|
contours = []
|
||||||
|
for i in range(len(self.contours)):
|
||||||
|
contours.append((self.contours[i] * Numeric.array(scale) + move), self.flags[i])
|
||||||
|
return contours
|
||||||
|
|
||||||
|
|
||||||
|
class GlyphViewer(W.Widget):
|
||||||
|
|
||||||
|
def __init__(self, possize, yMin, yMax):
|
||||||
|
W.Widget.__init__(self, possize)
|
||||||
|
self.glyph = None
|
||||||
|
extra = 0.02 * (yMax-yMin)
|
||||||
|
self.yMin, self.yMax = yMin - extra, yMax + extra
|
||||||
|
self.showpoints = 1
|
||||||
|
self.showlines = 1
|
||||||
|
|
||||||
|
def toggleshowpoints(self, onoff):
|
||||||
|
self.showpoints = onoff
|
||||||
|
self.SetPort()
|
||||||
|
self.draw()
|
||||||
|
|
||||||
|
def toggleshowlines(self, onoff):
|
||||||
|
self.showlines = onoff
|
||||||
|
self.SetPort()
|
||||||
|
self.draw()
|
||||||
|
|
||||||
|
def setglyph(self, glyph):
|
||||||
|
self.glyph = glyph
|
||||||
|
self.SetPort()
|
||||||
|
self.draw()
|
||||||
|
|
||||||
|
def draw(self, visRgn=None):
|
||||||
|
# This a HELL of a routine, but it's pretty damn fast...
|
||||||
|
import Qd
|
||||||
|
if not self._visible:
|
||||||
|
return
|
||||||
|
Qd.EraseRect(Qd.InsetRect(self._bounds, 1, 1))
|
||||||
|
cliprgn = Qd.NewRgn()
|
||||||
|
savergn = Qd.NewRgn()
|
||||||
|
Qd.RectRgn(cliprgn, self._bounds)
|
||||||
|
Qd.GetClip(savergn)
|
||||||
|
Qd.SetClip(cliprgn)
|
||||||
|
try:
|
||||||
|
if self.glyph:
|
||||||
|
l, t, r, b = Qd.InsetRect(self._bounds, 1, 1)
|
||||||
|
height = b - t
|
||||||
|
scale = float(height) / (self.yMax - self.yMin)
|
||||||
|
topoffset = t + scale * self.yMax
|
||||||
|
width = scale * self.glyph.width
|
||||||
|
lsb = scale * self.glyph.lsb
|
||||||
|
xMin = scale * self.glyph.xMin
|
||||||
|
# XXXX this is not correct when USE_MY_METRICS is set in component!
|
||||||
|
leftoffset = l + 0.5 * (r - l - width)
|
||||||
|
gleftoffset = leftoffset - xMin + lsb
|
||||||
|
if self.showlines:
|
||||||
|
Qd.RGBForeColor((0xafff, 0xafff, 0xafff))
|
||||||
|
# left sidebearing
|
||||||
|
Qd.MoveTo(leftoffset, t)
|
||||||
|
Qd.LineTo(leftoffset, b - 1)
|
||||||
|
# right sidebearing
|
||||||
|
Qd.MoveTo(leftoffset + width, t)
|
||||||
|
Qd.LineTo(leftoffset + width, b - 1)
|
||||||
|
# baseline
|
||||||
|
Qd.MoveTo(l, topoffset)
|
||||||
|
Qd.LineTo(r - 1, topoffset)
|
||||||
|
|
||||||
|
# origin
|
||||||
|
Qd.RGBForeColor((0x5fff, 0, 0))
|
||||||
|
Qd.MoveTo(gleftoffset, topoffset - 16)
|
||||||
|
Qd.LineTo(gleftoffset, topoffset + 16)
|
||||||
|
# reset color
|
||||||
|
Qd.RGBForeColor((0, 0, 0))
|
||||||
|
|
||||||
|
if self.glyph.iscomposite:
|
||||||
|
Qd.RGBForeColor((0x7fff, 0x7fff, 0x7fff))
|
||||||
|
|
||||||
|
ATM.startFillATM()
|
||||||
|
contours = self.glyph.getcontours((scale, -scale), (gleftoffset, topoffset))
|
||||||
|
for contour, flags in contours:
|
||||||
|
currentpoint = None
|
||||||
|
done_moveto = 0
|
||||||
|
i = 0
|
||||||
|
nPoints = len(contour)
|
||||||
|
while i < nPoints:
|
||||||
|
pt = contour[i]
|
||||||
|
if flags[i]:
|
||||||
|
# onCurve
|
||||||
|
currentpoint = lineto(pt, done_moveto)
|
||||||
|
else:
|
||||||
|
if not currentpoint:
|
||||||
|
if not flags[i-1]:
|
||||||
|
currentpoint = 0.5 * (contour[i-1] + pt)
|
||||||
|
else:
|
||||||
|
currentpoint = contour[i-1]
|
||||||
|
if not flags[(i+1) % nPoints]:
|
||||||
|
endPt = 0.5 * (pt + contour[(i+1) % nPoints])
|
||||||
|
else:
|
||||||
|
endPt = contour[(i+1) % nPoints]
|
||||||
|
i = i + 1
|
||||||
|
# offCurve
|
||||||
|
currentpoint = qcurveto(currentpoint,
|
||||||
|
pt, endPt, done_moveto)
|
||||||
|
done_moveto = 1
|
||||||
|
i = i + 1
|
||||||
|
ATM.fillClosePathATM()
|
||||||
|
ATM.endFillATM()
|
||||||
|
# draw point markers
|
||||||
|
if self.showpoints:
|
||||||
|
for contour, flags in contours:
|
||||||
|
Qd.RGBForeColor((0, 0xffff, 0))
|
||||||
|
for i in range(len(contour)):
|
||||||
|
(x, y) = contour[i]
|
||||||
|
onCurve = flags[i] & 0x1
|
||||||
|
if onCurve:
|
||||||
|
Qd.PaintRect(Qd.InsetRect((x, y, x, y), -2, -2))
|
||||||
|
else:
|
||||||
|
Qd.PaintOval(Qd.InsetRect((x, y, x, y), -2, -2))
|
||||||
|
Qd.RGBForeColor((0xffff, 0, 0))
|
||||||
|
Qd.RGBForeColor((0, 0, 0))
|
||||||
|
Qd.FrameRect(self._bounds)
|
||||||
|
finally:
|
||||||
|
Qd.SetClip(savergn)
|
||||||
|
Qd.DisposeRgn(cliprgn)
|
||||||
|
Qd.DisposeRgn(savergn)
|
||||||
|
|
||||||
|
|
||||||
|
extensions = [".suit", ".xml", ".TTF", ".ttf"]
|
||||||
|
|
||||||
|
def putfile(prompt, filename, newextension):
|
||||||
|
for ext in extensions:
|
||||||
|
if filename[-len(ext):] == ext:
|
||||||
|
filename = filename[:-len(ext)] + newextension
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
filename = filename + newextension
|
||||||
|
fss, ok = macfs.StandardPutFile(prompt, filename)
|
||||||
|
if ok:
|
||||||
|
return fss.as_pathname()
|
||||||
|
|
||||||
|
|
||||||
|
def lineto(pt, done_moveto):
|
||||||
|
x, y = pt
|
||||||
|
if done_moveto:
|
||||||
|
ATM.fillLineToATM((x, y))
|
||||||
|
else:
|
||||||
|
ATM.fillMoveToATM((x, y))
|
||||||
|
return pt
|
||||||
|
|
||||||
|
def qcurveto(pt0, pt1, pt2, done_moveto):
|
||||||
|
if not done_moveto:
|
||||||
|
x0, y0 = pt0
|
||||||
|
ATM.fillMoveToATM((x0, y0))
|
||||||
|
x1a, y1a = pt0 + 0.6666666666667 * (pt1 - pt0)
|
||||||
|
x1b, y1b = pt2 + 0.6666666666667 * (pt1 - pt2)
|
||||||
|
x2, y2 = pt2
|
||||||
|
ATM.fillCurveToATM((x1a, y1a), (x1b, y1b), (x2, y2))
|
||||||
|
return pt2
|
||||||
|
|
196
Lib/fontTools/ttLib/xmlImport.py
Normal file
196
Lib/fontTools/ttLib/xmlImport.py
Normal file
@ -0,0 +1,196 @@
|
|||||||
|
from fontTools import ttLib
|
||||||
|
from fontTools.misc.textTools import safeEval
|
||||||
|
import types
|
||||||
|
import string
|
||||||
|
import Numeric, array
|
||||||
|
from xml.parsers.xmlproc import xmlproc
|
||||||
|
|
||||||
|
|
||||||
|
xmlerror = "xmlerror"
|
||||||
|
xml_parse_error = "XML parse error"
|
||||||
|
|
||||||
|
|
||||||
|
class UnicodeString:
|
||||||
|
|
||||||
|
def __init__(self, value):
|
||||||
|
if isinstance(value, UnicodeString):
|
||||||
|
self.value = value.value
|
||||||
|
else:
|
||||||
|
if type(value) == types.StringType:
|
||||||
|
# Since Numeric interprets char codes as *signed*,
|
||||||
|
# we feed it through the array module.
|
||||||
|
value = array.array("B", value)
|
||||||
|
self.value = Numeric.array(value, Numeric.Int16)
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
return len(self.value)
|
||||||
|
|
||||||
|
#def __hash__(self):
|
||||||
|
# return hash(self.value.tostring())
|
||||||
|
#
|
||||||
|
#def __cmp__(self, other):
|
||||||
|
# if not isinstance(other, UnicodeString):
|
||||||
|
# return 1
|
||||||
|
# else:
|
||||||
|
# return not Numeric.alltrue(
|
||||||
|
# Numeric.equal(self.value, other.value))
|
||||||
|
|
||||||
|
def __add__(self, other):
|
||||||
|
if not isinstance(other, UnicodeString):
|
||||||
|
other = self.__class__(other)
|
||||||
|
return self.__class__(Numeric.concatenate((self.value, other.value)))
|
||||||
|
|
||||||
|
def __radd__(self, other):
|
||||||
|
if not isinstance(other, UnicodeString):
|
||||||
|
other = self.__class__(other)
|
||||||
|
return self.__class__(Numeric.concatenate((other.value, self.value)))
|
||||||
|
|
||||||
|
def __getslice__(self, i, j):
|
||||||
|
return self.__class__(self.value[i:j])
|
||||||
|
|
||||||
|
def __getitem__(self, i):
|
||||||
|
return self.__class__(self.value[i:i+1])
|
||||||
|
|
||||||
|
def tostring(self):
|
||||||
|
value = self.value
|
||||||
|
if ttLib.endian <> "big":
|
||||||
|
value = value.byteswapped()
|
||||||
|
return value.tostring()
|
||||||
|
|
||||||
|
def stripped(self):
|
||||||
|
value = self.value
|
||||||
|
i = 0
|
||||||
|
for i in range(len(value)):
|
||||||
|
if value[i] not in (0xa, 0xd, 0x9, 0x20):
|
||||||
|
break
|
||||||
|
value = value[i:]
|
||||||
|
i = 0
|
||||||
|
for i in range(len(value)-1, -1, -1):
|
||||||
|
if value[i] not in (0xa, 0xd, 0x9, 0x20):
|
||||||
|
break
|
||||||
|
value = value[:i+1]
|
||||||
|
return self.__class__(value)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "<%s %s at %x>" % (self.__class__.__name__, `self.value.tostring()`, id(self))
|
||||||
|
|
||||||
|
|
||||||
|
class UnicodeProcessor(xmlproc.XMLProcessor):
|
||||||
|
|
||||||
|
def parse_charref(self):
|
||||||
|
"Parses a character reference."
|
||||||
|
|
||||||
|
if self.now_at("x"):
|
||||||
|
digs=unhex(self.get_match(xmlproc.reg_hex_digits))
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
digs=string.atoi(self.get_match(xmlproc.reg_digits))
|
||||||
|
except ValueError,e:
|
||||||
|
self.report_error(3027)
|
||||||
|
digs=None
|
||||||
|
if digs == 169:
|
||||||
|
pass
|
||||||
|
if not self.now_at(";"): self.report_error(3005,";")
|
||||||
|
if digs==None: return
|
||||||
|
|
||||||
|
if not (digs==9 or digs==10 or digs==13 or \
|
||||||
|
(digs>=32 and digs<=255)):
|
||||||
|
if digs>255:
|
||||||
|
self.app.handle_data(UnicodeString([digs]),0,1)
|
||||||
|
else:
|
||||||
|
# hrm, I need to let some null bytes go through...
|
||||||
|
self.app.handle_data(chr(digs),0,1)
|
||||||
|
#self.report_error(3018,digs)
|
||||||
|
else:
|
||||||
|
if self.stack==[]:
|
||||||
|
self.report_error(3028)
|
||||||
|
self.app.handle_data(chr(digs),0,1)
|
||||||
|
|
||||||
|
|
||||||
|
class XMLErrorHandler(xmlproc.ErrorHandler):
|
||||||
|
|
||||||
|
def fatal(self, msg):
|
||||||
|
"Handles a fatal error message."
|
||||||
|
# we don't want no stinkin' sys.exit(1)
|
||||||
|
raise xml_parse_error, msg
|
||||||
|
|
||||||
|
|
||||||
|
class XMLApplication(xmlproc.Application):
|
||||||
|
|
||||||
|
def __init__(self, ttFont, progress=None):
|
||||||
|
self.ttFont = ttFont
|
||||||
|
self.progress = progress
|
||||||
|
self.root = None
|
||||||
|
self.content_stack = []
|
||||||
|
self.lastpos = 0
|
||||||
|
|
||||||
|
def handle_start_tag(self, name, attrs):
|
||||||
|
if self.progress:
|
||||||
|
pos = self.locator.pos + self.locator.block_offset
|
||||||
|
if (pos - self.lastpos) > 4000:
|
||||||
|
self.progress.set(pos / 100)
|
||||||
|
self.lastpos = pos
|
||||||
|
stack = self.locator.stack
|
||||||
|
stacksize = len(stack)
|
||||||
|
if not stacksize:
|
||||||
|
if name <> "ttFont":
|
||||||
|
raise xml_parse_error, "illegal root tag: %s" % name
|
||||||
|
sfntVersion = attrs.get("sfntVersion", "\000\001\000\000")
|
||||||
|
if len(sfntVersion) <> 4:
|
||||||
|
sfntVersion = safeEval('"' + sfntVersion + '"')
|
||||||
|
self.ttFont.sfntVersion = sfntVersion
|
||||||
|
self.content_stack.append([])
|
||||||
|
elif stacksize == 1:
|
||||||
|
msg = "Parsing '%s' table..." % ttLib.xmltag2tag(name)
|
||||||
|
if self.progress:
|
||||||
|
self.progress.setlabel(msg)
|
||||||
|
elif self.ttFont.verbose:
|
||||||
|
ttLib.debugmsg(msg)
|
||||||
|
else:
|
||||||
|
print msg
|
||||||
|
tag = ttLib.xmltag2tag(name)
|
||||||
|
tableclass = ttLib.getTableClass(tag)
|
||||||
|
if tableclass is None:
|
||||||
|
from fontTools.ttLib.tables.DefaultTable import DefaultTable
|
||||||
|
tableclass = DefaultTable
|
||||||
|
self.current_table = tableclass(tag)
|
||||||
|
self.ttFont[tag] = self.current_table
|
||||||
|
self.content_stack.append([])
|
||||||
|
elif stacksize == 2:
|
||||||
|
self.content_stack.append([])
|
||||||
|
self.root = (name, attrs, self.content_stack[-1])
|
||||||
|
else:
|
||||||
|
list = []
|
||||||
|
self.content_stack[-1].append(name, attrs, list)
|
||||||
|
self.content_stack.append(list)
|
||||||
|
|
||||||
|
def handle_data(self, data, start, end):
|
||||||
|
if len(self.locator.stack) > 1:
|
||||||
|
self.content_stack[-1].append(data[start:end])
|
||||||
|
|
||||||
|
def handle_end_tag(self, name):
|
||||||
|
del self.content_stack[-1]
|
||||||
|
stack = self.locator.stack
|
||||||
|
stacksize = len(stack)
|
||||||
|
if stacksize == 1:
|
||||||
|
self.root = None
|
||||||
|
elif stacksize == 2:
|
||||||
|
self.current_table.fromXML(self.root, self.ttFont)
|
||||||
|
self.root = None
|
||||||
|
|
||||||
|
|
||||||
|
class ProgressPrinter:
|
||||||
|
|
||||||
|
def __init__(self, title, maxval=100):
|
||||||
|
print title
|
||||||
|
|
||||||
|
def set(self, val, maxval=None):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def increment(self, val=1):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def setlabel(self, text):
|
||||||
|
print text
|
||||||
|
|
||||||
|
|
6646
Lib/fontTools/unicode.py
Normal file
6646
Lib/fontTools/unicode.py
Normal file
File diff suppressed because it is too large
Load Diff
204
Lib/sstruct.py
Normal file
204
Lib/sstruct.py
Normal file
@ -0,0 +1,204 @@
|
|||||||
|
"""sstruct.py -- SuperStruct
|
||||||
|
|
||||||
|
Higher level layer on top of the struct module, enabling to
|
||||||
|
bind names to struct elements. The interface is similar to
|
||||||
|
struct, except the objects passed and returned are not tuples
|
||||||
|
(or argument lists), but dictionaries or instances.
|
||||||
|
|
||||||
|
Just like struct, we use format strings to describe a data
|
||||||
|
structure, except we use one line per element. Lines are
|
||||||
|
separated by newlines or semi-colons. Each line contains
|
||||||
|
either one of the special struct characters ('@', '=', '<',
|
||||||
|
'>' or '!') or a 'name:formatchar' combo (eg. 'myFloat:f').
|
||||||
|
Repetitions, like the struct module offers them are not useful
|
||||||
|
in this context, except for fixed length strings (eg. 'myInt:5h'
|
||||||
|
is not allowed but 'myString:5s' is). The 'x' format character
|
||||||
|
(pad byte) is treated as 'special', since it is by definition
|
||||||
|
anonymous. Extra whitespace is allowed everywhere.
|
||||||
|
|
||||||
|
The sstruct module offers one feature that the "normal" struct
|
||||||
|
module doesn't: support for fixed point numbers. These are spelled
|
||||||
|
as "n.mF", where n is the number of bits before the point, and m
|
||||||
|
the number of bits after the point. Fixed point numbers get
|
||||||
|
converted to floats.
|
||||||
|
|
||||||
|
pack(format, object):
|
||||||
|
'object' is either a dictionary or an instance (or actually
|
||||||
|
anything that has a __dict__ attribute). If it is a dictionary,
|
||||||
|
its keys are used for names. If it is an instance, it's
|
||||||
|
attributes are used to grab struct elements from. Returns
|
||||||
|
a string containing the data.
|
||||||
|
|
||||||
|
unpack(format, data, object=None)
|
||||||
|
If 'object' is omitted (or None), a new dictionary will be
|
||||||
|
returned. If 'object' is a dictionary, it will be used to add
|
||||||
|
struct elements to. If it is an instance (or in fact anything
|
||||||
|
that has a __dict__ attribute), an attribute will be added for
|
||||||
|
each struct element. In the latter two cases, 'object' itself
|
||||||
|
is returned.
|
||||||
|
|
||||||
|
unpack2(format, data, object=None)
|
||||||
|
Convenience function. Same as unpack, except data may be longer
|
||||||
|
than needed. The returned value is a tuple: (object, leftoverdata).
|
||||||
|
|
||||||
|
calcsize(format)
|
||||||
|
like struct.calcsize(), but uses our own format strings:
|
||||||
|
it returns the size of the data in bytes.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# XXX I would like to support pascal strings, too, but I'm not
|
||||||
|
# sure if that's wise. Would be nice if struct supported them
|
||||||
|
# "properly", but that would certainly break calcsize()...
|
||||||
|
|
||||||
|
__version__ = "1.2"
|
||||||
|
__copyright__ = "Copyright 1998, Just van Rossum <just@letterror.com>"
|
||||||
|
|
||||||
|
import struct
|
||||||
|
import re
|
||||||
|
import types
|
||||||
|
|
||||||
|
|
||||||
|
error = "sstruct.error"
|
||||||
|
|
||||||
|
def pack(format, object):
|
||||||
|
formatstring, names, fixes = getformat(format)
|
||||||
|
elements = []
|
||||||
|
if type(object) is not types.DictType:
|
||||||
|
object = object.__dict__
|
||||||
|
for name in names:
|
||||||
|
value = object[name]
|
||||||
|
if fixes.has_key(name):
|
||||||
|
# fixed point conversion
|
||||||
|
value = int(round(value*fixes[name]))
|
||||||
|
elements.append(value)
|
||||||
|
data = apply(struct.pack, (formatstring,) + tuple(elements))
|
||||||
|
return data
|
||||||
|
|
||||||
|
def unpack(format, data, object=None):
|
||||||
|
if object is None:
|
||||||
|
object = {}
|
||||||
|
formatstring, names, fixes = getformat(format)
|
||||||
|
if type(object) is types.DictType:
|
||||||
|
dict = object
|
||||||
|
else:
|
||||||
|
dict = object.__dict__
|
||||||
|
elements = struct.unpack(formatstring, data)
|
||||||
|
for i in range(len(names)):
|
||||||
|
name = names[i]
|
||||||
|
value = elements[i]
|
||||||
|
if fixes.has_key(name):
|
||||||
|
# fixed point conversion
|
||||||
|
value = value / fixes[name]
|
||||||
|
dict[name] = value
|
||||||
|
return object
|
||||||
|
|
||||||
|
def unpack2(format, data, object=None):
|
||||||
|
length = calcsize(format)
|
||||||
|
return unpack(format, data[:length], object), data[length:]
|
||||||
|
|
||||||
|
def calcsize(format):
|
||||||
|
formatstring, names, fixes = getformat(format)
|
||||||
|
return struct.calcsize(formatstring)
|
||||||
|
|
||||||
|
|
||||||
|
# matches "name:formatchar" (whitespace is allowed)
|
||||||
|
_elementRE = re.compile(
|
||||||
|
"\s*" # whitespace
|
||||||
|
"([A-Za-z_][A-Za-z_0-9]*)" # name (python identifier)
|
||||||
|
"\s*:\s*" # whitespace : whitespace
|
||||||
|
"([cbBhHiIlLfd]|[0-9]+[ps]|" # formatchar...
|
||||||
|
"([0-9]+)\.([0-9]+)(F))" # ...formatchar
|
||||||
|
"\s*" # whitespace
|
||||||
|
"(#.*)?$" # [comment] + end of string
|
||||||
|
)
|
||||||
|
|
||||||
|
# matches the special struct format chars and 'x' (pad byte)
|
||||||
|
_extraRE = re.compile("\s*([x@=<>!])\s*(#.*)?$")
|
||||||
|
|
||||||
|
# matches an "empty" string, possibly containing whitespace and/or a comment
|
||||||
|
_emptyRE = re.compile("\s*(#.*)?$")
|
||||||
|
|
||||||
|
_fixedpointmappings = {
|
||||||
|
8: "b",
|
||||||
|
16: "h",
|
||||||
|
32: "l"}
|
||||||
|
|
||||||
|
_formatcache = {}
|
||||||
|
|
||||||
|
def getformat(format):
|
||||||
|
try:
|
||||||
|
formatstring, names, fixes = _formatcache[format]
|
||||||
|
except KeyError:
|
||||||
|
lines = re.split("[\n;]", format)
|
||||||
|
formatstring = ""
|
||||||
|
names = []
|
||||||
|
fixes = {}
|
||||||
|
for line in lines:
|
||||||
|
if _emptyRE.match(line):
|
||||||
|
continue
|
||||||
|
m = _extraRE.match(line)
|
||||||
|
if m:
|
||||||
|
formatchar = m.group(1)
|
||||||
|
if formatchar <> 'x' and formatstring:
|
||||||
|
raise error, "a special format char must be first"
|
||||||
|
else:
|
||||||
|
m = _elementRE.match(line)
|
||||||
|
if not m:
|
||||||
|
raise error, "syntax error in format: '%s'" % line
|
||||||
|
name = m.group(1)
|
||||||
|
names.append(name)
|
||||||
|
formatchar = m.group(2)
|
||||||
|
if m.group(3):
|
||||||
|
# fixed point
|
||||||
|
before = int(m.group(3))
|
||||||
|
after = int(m.group(4))
|
||||||
|
bits = before + after
|
||||||
|
if bits not in [8, 16, 32]:
|
||||||
|
raise error, "fixed point must be 8, 16 or 32 bits long"
|
||||||
|
formatchar = _fixedpointmappings[bits]
|
||||||
|
assert m.group(5) == "F"
|
||||||
|
fixes[name] = float(1 << after)
|
||||||
|
formatstring = formatstring + formatchar
|
||||||
|
_formatcache[format] = formatstring, names, fixes
|
||||||
|
return formatstring, names, fixes
|
||||||
|
|
||||||
|
def _test():
|
||||||
|
format = """
|
||||||
|
# comments are allowed
|
||||||
|
> # big endian (see documentation for struct)
|
||||||
|
# empty lines are allowed:
|
||||||
|
|
||||||
|
ashort: h
|
||||||
|
along: l
|
||||||
|
abyte: b # a byte
|
||||||
|
achar: c
|
||||||
|
astr: 5s
|
||||||
|
afloat: f; adouble: d # multiple "statements" are allowed
|
||||||
|
afixed: 16.16F
|
||||||
|
"""
|
||||||
|
|
||||||
|
print 'size:', calcsize(format)
|
||||||
|
|
||||||
|
class foo:
|
||||||
|
pass
|
||||||
|
|
||||||
|
i = foo()
|
||||||
|
|
||||||
|
i.ashort = 0x7fff
|
||||||
|
i.along = 0x7fffffff
|
||||||
|
i.abyte = 0x7f
|
||||||
|
i.achar = "a"
|
||||||
|
i.astr = "12345"
|
||||||
|
i.afloat = 0.5
|
||||||
|
i.adouble = 0.5
|
||||||
|
i.afixed = 1.5
|
||||||
|
|
||||||
|
data = pack(format, i)
|
||||||
|
print 'data:', `data`
|
||||||
|
print unpack(format, data)
|
||||||
|
i2 = foo()
|
||||||
|
unpack(format, data, i2)
|
||||||
|
print vars(i2)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
_test()
|
172
Lib/xmlWriter.py
Normal file
172
Lib/xmlWriter.py
Normal file
@ -0,0 +1,172 @@
|
|||||||
|
"""xmlWriter.py -- Simple XML authoring class"""
|
||||||
|
|
||||||
|
__author__ = "jvr"
|
||||||
|
__version__ = "0.9"
|
||||||
|
|
||||||
|
import string
|
||||||
|
import struct
|
||||||
|
|
||||||
|
INDENT = " "
|
||||||
|
|
||||||
|
class XMLWriter:
|
||||||
|
|
||||||
|
def __init__(self, file, dtd=None, indentwhite=INDENT):
|
||||||
|
if type(file) == type(""):
|
||||||
|
self.file = open(file, "w")
|
||||||
|
else:
|
||||||
|
# assume writable file object
|
||||||
|
self.file = file
|
||||||
|
self.dtd = dtd
|
||||||
|
self.indentwhite = indentwhite
|
||||||
|
self.indentlevel = 0
|
||||||
|
self.stack = []
|
||||||
|
self.needindent = 1
|
||||||
|
self.writeraw("<?xml version='1.0'?>")
|
||||||
|
self.newline()
|
||||||
|
if self.dtd:
|
||||||
|
# DOCTYPE???
|
||||||
|
self.newline()
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
self.file.close()
|
||||||
|
|
||||||
|
def write(self, data):
|
||||||
|
self.writeraw(escape(data))
|
||||||
|
|
||||||
|
def write_noindent(self, data):
|
||||||
|
self.file.write(escape(data))
|
||||||
|
|
||||||
|
def write8bit(self, data):
|
||||||
|
self.writeraw(escape8bit(data))
|
||||||
|
|
||||||
|
def write16bit(self, data):
|
||||||
|
self.writeraw(escape16bit(data))
|
||||||
|
|
||||||
|
def writeraw(self, data):
|
||||||
|
if self.needindent:
|
||||||
|
self.file.write(self.indentlevel * self.indentwhite)
|
||||||
|
self.needindent = 0
|
||||||
|
self.file.write(data)
|
||||||
|
|
||||||
|
def newline(self):
|
||||||
|
self.file.write("\n")
|
||||||
|
self.needindent = 1
|
||||||
|
|
||||||
|
def comment(self, data):
|
||||||
|
data = escape(data)
|
||||||
|
lines = string.split(data, "\n")
|
||||||
|
self.writeraw("<!-- " + lines[0])
|
||||||
|
for line in lines[1:]:
|
||||||
|
self.newline()
|
||||||
|
self.writeraw(" " + line)
|
||||||
|
self.writeraw(" -->")
|
||||||
|
|
||||||
|
def simpletag(self, _TAG_, *args, **kwargs):
|
||||||
|
attrdata = apply(self.stringifyattrs, args, kwargs)
|
||||||
|
data = "<%s%s/>" % (_TAG_, attrdata)
|
||||||
|
self.writeraw(data)
|
||||||
|
|
||||||
|
def begintag(self, _TAG_, *args, **kwargs):
|
||||||
|
attrdata = apply(self.stringifyattrs, args, kwargs)
|
||||||
|
data = "<%s%s>" % (_TAG_, attrdata)
|
||||||
|
self.writeraw(data)
|
||||||
|
self.stack.append(_TAG_)
|
||||||
|
self.indent()
|
||||||
|
|
||||||
|
def endtag(self, _TAG_):
|
||||||
|
assert self.stack and self.stack[-1] == _TAG_, "nonmatching endtag"
|
||||||
|
del self.stack[-1]
|
||||||
|
self.dedent()
|
||||||
|
data = "</%s>" % _TAG_
|
||||||
|
self.writeraw(data)
|
||||||
|
|
||||||
|
def dumphex(self, data):
|
||||||
|
linelength = 16
|
||||||
|
hexlinelength = linelength * 2
|
||||||
|
chunksize = 8
|
||||||
|
for i in range(0, len(data), linelength):
|
||||||
|
hexline = hexStr(data[i:i+linelength])
|
||||||
|
line = ""
|
||||||
|
white = ""
|
||||||
|
for j in range(0, hexlinelength, chunksize):
|
||||||
|
line = line + white + hexline[j:j+chunksize]
|
||||||
|
white = " "
|
||||||
|
self.writeraw(line)
|
||||||
|
self.newline()
|
||||||
|
|
||||||
|
def indent(self):
|
||||||
|
self.indentlevel = self.indentlevel + 1
|
||||||
|
|
||||||
|
def dedent(self):
|
||||||
|
assert self.indentlevel > 0
|
||||||
|
self.indentlevel = self.indentlevel - 1
|
||||||
|
|
||||||
|
def stringifyattrs(self, *args, **kwargs):
|
||||||
|
if kwargs:
|
||||||
|
assert not args
|
||||||
|
attributes = kwargs.items()
|
||||||
|
attributes.sort()
|
||||||
|
elif args:
|
||||||
|
assert len(args) == 1
|
||||||
|
attributes = args[0]
|
||||||
|
else:
|
||||||
|
return ""
|
||||||
|
data = ""
|
||||||
|
for attr, value in attributes:
|
||||||
|
data = data + ' %s="%s"' % (attr, escapeattr(str(value)))
|
||||||
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
def escape(data):
|
||||||
|
data = string.replace(data, "&", "&")
|
||||||
|
data = string.replace(data, "<", "<")
|
||||||
|
return data
|
||||||
|
|
||||||
|
def escapeattr(data):
|
||||||
|
data = string.replace(data, "&", "&")
|
||||||
|
data = string.replace(data, "<", "<")
|
||||||
|
data = string.replace(data, '"', """)
|
||||||
|
return data
|
||||||
|
|
||||||
|
def escape8bit(data):
|
||||||
|
def escapechar(c):
|
||||||
|
n = ord(c)
|
||||||
|
if c in "<&":
|
||||||
|
if c == "&":
|
||||||
|
return "&"
|
||||||
|
else:
|
||||||
|
return "<"
|
||||||
|
elif 32 <= n <= 127:
|
||||||
|
return c
|
||||||
|
else:
|
||||||
|
return "&#" + `n` + ";"
|
||||||
|
return string.join(map(escapechar, data), "")
|
||||||
|
|
||||||
|
needswap = struct.pack("h", 1) == "\001\000"
|
||||||
|
|
||||||
|
def escape16bit(data):
|
||||||
|
import array
|
||||||
|
a = array.array("H")
|
||||||
|
a.fromstring(data)
|
||||||
|
if needswap:
|
||||||
|
a.byteswap()
|
||||||
|
def escapenum(n, amp=ord("&"), lt=ord("<")):
|
||||||
|
if n == amp:
|
||||||
|
return "&"
|
||||||
|
elif n == lt:
|
||||||
|
return "<"
|
||||||
|
elif 32 <= n <= 127:
|
||||||
|
return chr(n)
|
||||||
|
else:
|
||||||
|
return "&#" + `n` + ";"
|
||||||
|
return string.join(map(escapenum, a), "")
|
||||||
|
|
||||||
|
|
||||||
|
def hexStr(s):
|
||||||
|
h = string.hexdigits
|
||||||
|
r = ''
|
||||||
|
for c in s:
|
||||||
|
i = ord(c)
|
||||||
|
r = r + h[(i >> 4) & 0xF] + h[i & 0xF]
|
||||||
|
return r
|
||||||
|
|
Loading…
x
Reference in New Issue
Block a user