Merge pull request #1326 from BoboTiG/fix-resource-leak
Fix several ResourceWarning: unclosed file and some related improvement
This commit is contained in:
commit
f99afc70d3
@ -1174,7 +1174,8 @@ def main(args=None, font=None):
|
||||
del args[0]
|
||||
for f in args:
|
||||
log.debug("Processing %s", f)
|
||||
table = build(open(f, 'rt', encoding="utf-8"), font, tableTag=tableTag)
|
||||
with open(f, 'rt', encoding="utf-8") as f:
|
||||
table = build(f, font, tableTag=tableTag)
|
||||
blob = table.compile(font) # Make sure it compiles
|
||||
decompiled = table.__class__()
|
||||
decompiled.decompile(blob, font) # Make sure it decompiles!
|
||||
|
@ -3257,7 +3257,8 @@ def main(args=None):
|
||||
text += g[7:]
|
||||
continue
|
||||
if g.startswith('--text-file='):
|
||||
text += open(g[12:], encoding='utf-8').read().replace('\n', '')
|
||||
with open(g[12:], encoding='utf-8') as f:
|
||||
text += f.read().replace('\n', '')
|
||||
continue
|
||||
if g.startswith('--unicodes='):
|
||||
if g[11:] == '*':
|
||||
@ -3266,15 +3267,17 @@ def main(args=None):
|
||||
unicodes.extend(parse_unicodes(g[11:]))
|
||||
continue
|
||||
if g.startswith('--unicodes-file='):
|
||||
for line in open(g[16:]).readlines():
|
||||
unicodes.extend(parse_unicodes(line.split('#')[0]))
|
||||
with open(g[16:]) as f:
|
||||
for line in f.readlines():
|
||||
unicodes.extend(parse_unicodes(line.split('#')[0]))
|
||||
continue
|
||||
if g.startswith('--gids='):
|
||||
gids.extend(parse_gids(g[7:]))
|
||||
continue
|
||||
if g.startswith('--gids-file='):
|
||||
for line in open(g[12:]).readlines():
|
||||
gids.extend(parse_gids(line.split('#')[0]))
|
||||
with open(g[12:]) as f:
|
||||
for line in f.readlines():
|
||||
gids.extend(parse_gids(line.split('#')[0]))
|
||||
continue
|
||||
if g.startswith('--glyphs='):
|
||||
if g[9:] == '*':
|
||||
@ -3283,8 +3286,9 @@ def main(args=None):
|
||||
glyphs.extend(parse_glyphs(g[9:]))
|
||||
continue
|
||||
if g.startswith('--glyphs-file='):
|
||||
for line in open(g[14:]).readlines():
|
||||
glyphs.extend(parse_glyphs(line.split('#')[0]))
|
||||
with open(g[14:]) as f:
|
||||
for line in f.readlines():
|
||||
glyphs.extend(parse_glyphs(line.split('#')[0]))
|
||||
continue
|
||||
glyphs.append(g)
|
||||
|
||||
|
@ -164,9 +164,8 @@ def readLWFN(path, onlyHeader=False):
|
||||
elif code in [3, 5]:
|
||||
break
|
||||
elif code == 4:
|
||||
f = open(path, "rb")
|
||||
data.append(f.read())
|
||||
f.close()
|
||||
with open(path, "rb") as f:
|
||||
data.append(f.read())
|
||||
elif code == 0:
|
||||
pass # comment, ignore
|
||||
else:
|
||||
@ -179,35 +178,32 @@ def readLWFN(path, onlyHeader=False):
|
||||
|
||||
def readPFB(path, onlyHeader=False):
|
||||
"""reads a PFB font file, returns raw data"""
|
||||
f = open(path, "rb")
|
||||
data = []
|
||||
while True:
|
||||
if f.read(1) != bytechr(128):
|
||||
raise T1Error('corrupt PFB file')
|
||||
code = byteord(f.read(1))
|
||||
if code in [1, 2]:
|
||||
chunklen = stringToLong(f.read(4))
|
||||
chunk = f.read(chunklen)
|
||||
assert len(chunk) == chunklen
|
||||
data.append(chunk)
|
||||
elif code == 3:
|
||||
break
|
||||
else:
|
||||
raise T1Error('bad chunk code: ' + repr(code))
|
||||
if onlyHeader:
|
||||
break
|
||||
f.close()
|
||||
with open(path, "rb") as f:
|
||||
while True:
|
||||
if f.read(1) != bytechr(128):
|
||||
raise T1Error('corrupt PFB file')
|
||||
code = byteord(f.read(1))
|
||||
if code in [1, 2]:
|
||||
chunklen = stringToLong(f.read(4))
|
||||
chunk = f.read(chunklen)
|
||||
assert len(chunk) == chunklen
|
||||
data.append(chunk)
|
||||
elif code == 3:
|
||||
break
|
||||
else:
|
||||
raise T1Error('bad chunk code: ' + repr(code))
|
||||
if onlyHeader:
|
||||
break
|
||||
data = bytesjoin(data)
|
||||
assertType1(data)
|
||||
return data
|
||||
|
||||
def readOther(path):
|
||||
"""reads any (font) file, returns raw data"""
|
||||
f = open(path, "rb")
|
||||
data = f.read()
|
||||
f.close()
|
||||
with open(path, "rb") as f:
|
||||
data = f.read()
|
||||
assertType1(data)
|
||||
|
||||
chunks = findEncryptedChunks(data)
|
||||
data = []
|
||||
for isEncrypted, chunk in chunks:
|
||||
@ -244,8 +240,7 @@ def writeLWFN(path, data):
|
||||
|
||||
def writePFB(path, data):
|
||||
chunks = findEncryptedChunks(data)
|
||||
f = open(path, "wb")
|
||||
try:
|
||||
with open(path, "wb") as f:
|
||||
for isEncrypted, chunk in chunks:
|
||||
if isEncrypted:
|
||||
code = 2
|
||||
@ -255,13 +250,10 @@ def writePFB(path, data):
|
||||
f.write(longToString(len(chunk)))
|
||||
f.write(chunk)
|
||||
f.write(bytechr(128) + bytechr(3))
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
def writeOther(path, data, dohex=False):
|
||||
chunks = findEncryptedChunks(data)
|
||||
f = open(path, "wb")
|
||||
try:
|
||||
with open(path, "wb") as f:
|
||||
hexlinelen = HEXLINELENGTH // 2
|
||||
for isEncrypted, chunk in chunks:
|
||||
if isEncrypted:
|
||||
@ -275,8 +267,6 @@ def writeOther(path, data, dohex=False):
|
||||
chunk = chunk[hexlinelen:]
|
||||
else:
|
||||
f.write(chunk)
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
|
||||
# decryption tools
|
||||
|
@ -293,11 +293,11 @@ def ttCompile(input, output, options):
|
||||
def guessFileType(fileName):
|
||||
base, ext = os.path.splitext(fileName)
|
||||
try:
|
||||
f = open(fileName, "rb")
|
||||
with open(fileName, "rb") as f:
|
||||
header = f.read(256)
|
||||
except IOError:
|
||||
return None
|
||||
header = f.read(256)
|
||||
f.close()
|
||||
|
||||
if header.startswith(b'\xef\xbb\xbf<?xml'):
|
||||
header = header.lstrip(b'\xef\xbb\xbf')
|
||||
cr, tp = getMacCreatorAndType(fileName)
|
||||
|
@ -18,8 +18,11 @@ class _UnicodeCustom(object):
|
||||
|
||||
def __init__(self, f):
|
||||
if isinstance(f, basestring):
|
||||
f = open(f)
|
||||
self.codes = _makeunicodes(f)
|
||||
with open(f) as fd:
|
||||
codes = _makeunicodes(fd)
|
||||
else:
|
||||
codes = _makeunicodes(f)
|
||||
self.codes = codes
|
||||
|
||||
def __getitem__(self, charCode):
|
||||
try:
|
||||
|
@ -30,9 +30,9 @@ modules.sort()
|
||||
tables.sort()
|
||||
|
||||
|
||||
file = open(os.path.join(tablesDir, "__init__.py"), "w")
|
||||
with open(os.path.join(tablesDir, "__init__.py"), "w") as file:
|
||||
|
||||
file.write('''
|
||||
file.write('''
|
||||
from __future__ import print_function, division, absolute_import
|
||||
from fontTools.misc.py23 import *
|
||||
|
||||
@ -45,21 +45,20 @@ def _moduleFinderHint():
|
||||
"""
|
||||
''')
|
||||
|
||||
for module in modules:
|
||||
file.write("\tfrom . import %s\n" % module)
|
||||
for module in modules:
|
||||
file.write("\tfrom . import %s\n" % module)
|
||||
|
||||
file.write('''
|
||||
file.write('''
|
||||
if __name__ == "__main__":
|
||||
import doctest, sys
|
||||
sys.exit(doctest.testmod().failed)
|
||||
''')
|
||||
|
||||
file.close()
|
||||
|
||||
|
||||
begin = ".. begin table list\n.. code::\n"
|
||||
end = ".. end table list"
|
||||
doc = open(docFile).read()
|
||||
with open(docFile) as f:
|
||||
doc = f.read()
|
||||
beginPos = doc.find(begin)
|
||||
assert beginPos > 0
|
||||
beginPos = beginPos + len(begin) + 1
|
||||
@ -70,4 +69,5 @@ blockquote = "\n".join(" "*4 + line for line in lines) + "\n"
|
||||
|
||||
doc = doc[:beginPos] + blockquote + doc[endPos:]
|
||||
|
||||
open(docFile, "w").write(doc)
|
||||
with open(docFile, "w") as f:
|
||||
f.write(doc)
|
||||
|
@ -74,23 +74,22 @@ def main(args):
|
||||
if not files:
|
||||
usage()
|
||||
|
||||
report = open("report.txt", "a+")
|
||||
options = ttx.Options(rawOptions, len(files))
|
||||
for ttFile in files:
|
||||
try:
|
||||
roundTrip(ttFile, options, report)
|
||||
except KeyboardInterrupt:
|
||||
print("(Cancelled)")
|
||||
break
|
||||
except:
|
||||
print("*** round tripping aborted ***")
|
||||
traceback.print_exc()
|
||||
report.write("=============================================================\n")
|
||||
report.write(" An exception occurred while round tripping")
|
||||
report.write(" \"%s\"\n" % ttFile)
|
||||
traceback.print_exc(file=report)
|
||||
report.write("-------------------------------------------------------------\n")
|
||||
report.close()
|
||||
with open("report.txt", "a+") as report:
|
||||
options = ttx.Options(rawOptions, len(files))
|
||||
for ttFile in files:
|
||||
try:
|
||||
roundTrip(ttFile, options, report)
|
||||
except KeyboardInterrupt:
|
||||
print("(Cancelled)")
|
||||
break
|
||||
except:
|
||||
print("*** round tripping aborted ***")
|
||||
traceback.print_exc()
|
||||
report.write("=============================================================\n")
|
||||
report.write(" An exception occurred while round tripping")
|
||||
report.write(" \"%s\"\n" % ttFile)
|
||||
traceback.print_exc(file=report)
|
||||
report.write("-------------------------------------------------------------\n")
|
||||
|
||||
|
||||
main(sys.argv[1:])
|
||||
|
@ -237,12 +237,10 @@ def test_unicodes(tmpdir):
|
||||
new.read(testDocPath)
|
||||
new.write(testDocPath2)
|
||||
# compare the file contents
|
||||
f1 = open(testDocPath, 'r', encoding='utf-8')
|
||||
t1 = f1.read()
|
||||
f1.close()
|
||||
f2 = open(testDocPath2, 'r', encoding='utf-8')
|
||||
t2 = f2.read()
|
||||
f2.close()
|
||||
with open(testDocPath, 'r', encoding='utf-8') as f1:
|
||||
t1 = f1.read()
|
||||
with open(testDocPath2, 'r', encoding='utf-8') as f2:
|
||||
t2 = f2.read()
|
||||
assert t1 == t2
|
||||
# check the unicode values read from the document
|
||||
assert new.instances[0].glyphs['arrow']['unicodes'] == [100,200,300]
|
||||
@ -337,12 +335,10 @@ def test_localisedNames(tmpdir):
|
||||
new = DesignSpaceDocument()
|
||||
new.read(testDocPath)
|
||||
new.write(testDocPath2)
|
||||
f1 = open(testDocPath, 'r', encoding='utf-8')
|
||||
t1 = f1.read()
|
||||
f1.close()
|
||||
f2 = open(testDocPath2, 'r', encoding='utf-8')
|
||||
t2 = f2.read()
|
||||
f2.close()
|
||||
with open(testDocPath, 'r', encoding='utf-8') as f1:
|
||||
t1 = f1.read()
|
||||
with open(testDocPath2, 'r', encoding='utf-8') as f2:
|
||||
t2 = f2.read()
|
||||
assert t1 == t2
|
||||
|
||||
|
||||
@ -761,14 +757,12 @@ def _addUnwrappedCondition(path):
|
||||
# only for testing, so we can make an invalid designspace file
|
||||
# older designspace files may have conditions that are not wrapped in a conditionset
|
||||
# These can be read into a new conditionset.
|
||||
f = open(path, 'r', encoding='utf-8')
|
||||
d = f.read()
|
||||
with open(path, 'r', encoding='utf-8') as f:
|
||||
d = f.read()
|
||||
print(d)
|
||||
f.close()
|
||||
d = d.replace('<rule name="named.rule.1">', '<rule name="named.rule.1">\n\t<condition maximum="22" minimum="33" name="axisName_a" />')
|
||||
f = open(path, 'w', encoding='utf-8')
|
||||
f.write(d)
|
||||
f.close()
|
||||
with open(path, 'w', encoding='utf-8') as f:
|
||||
f.write(d)
|
||||
|
||||
def test_documentLib(tmpdir):
|
||||
# roundtrip test of the document lib with some nested data
|
||||
|
Loading…
x
Reference in New Issue
Block a user