Merge pull request #1326 from BoboTiG/fix-resource-leak

Fix several ResourceWarning: unclosed file and some related improvement
This commit is contained in:
Cosimo Lupo 2018-11-16 23:00:24 +00:00 committed by GitHub
commit f99afc70d3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 80 additions and 89 deletions

View File

@ -1174,7 +1174,8 @@ def main(args=None, font=None):
del args[0] del args[0]
for f in args: for f in args:
log.debug("Processing %s", f) log.debug("Processing %s", f)
table = build(open(f, 'rt', encoding="utf-8"), font, tableTag=tableTag) with open(f, 'rt', encoding="utf-8") as f:
table = build(f, font, tableTag=tableTag)
blob = table.compile(font) # Make sure it compiles blob = table.compile(font) # Make sure it compiles
decompiled = table.__class__() decompiled = table.__class__()
decompiled.decompile(blob, font) # Make sure it decompiles! decompiled.decompile(blob, font) # Make sure it decompiles!

View File

@ -3257,7 +3257,8 @@ def main(args=None):
text += g[7:] text += g[7:]
continue continue
if g.startswith('--text-file='): if g.startswith('--text-file='):
text += open(g[12:], encoding='utf-8').read().replace('\n', '') with open(g[12:], encoding='utf-8') as f:
text += f.read().replace('\n', '')
continue continue
if g.startswith('--unicodes='): if g.startswith('--unicodes='):
if g[11:] == '*': if g[11:] == '*':
@ -3266,15 +3267,17 @@ def main(args=None):
unicodes.extend(parse_unicodes(g[11:])) unicodes.extend(parse_unicodes(g[11:]))
continue continue
if g.startswith('--unicodes-file='): if g.startswith('--unicodes-file='):
for line in open(g[16:]).readlines(): with open(g[16:]) as f:
unicodes.extend(parse_unicodes(line.split('#')[0])) for line in f.readlines():
unicodes.extend(parse_unicodes(line.split('#')[0]))
continue continue
if g.startswith('--gids='): if g.startswith('--gids='):
gids.extend(parse_gids(g[7:])) gids.extend(parse_gids(g[7:]))
continue continue
if g.startswith('--gids-file='): if g.startswith('--gids-file='):
for line in open(g[12:]).readlines(): with open(g[12:]) as f:
gids.extend(parse_gids(line.split('#')[0])) for line in f.readlines():
gids.extend(parse_gids(line.split('#')[0]))
continue continue
if g.startswith('--glyphs='): if g.startswith('--glyphs='):
if g[9:] == '*': if g[9:] == '*':
@ -3283,8 +3286,9 @@ def main(args=None):
glyphs.extend(parse_glyphs(g[9:])) glyphs.extend(parse_glyphs(g[9:]))
continue continue
if g.startswith('--glyphs-file='): if g.startswith('--glyphs-file='):
for line in open(g[14:]).readlines(): with open(g[14:]) as f:
glyphs.extend(parse_glyphs(line.split('#')[0])) for line in f.readlines():
glyphs.extend(parse_glyphs(line.split('#')[0]))
continue continue
glyphs.append(g) glyphs.append(g)

View File

@ -164,9 +164,8 @@ def readLWFN(path, onlyHeader=False):
elif code in [3, 5]: elif code in [3, 5]:
break break
elif code == 4: elif code == 4:
f = open(path, "rb") with open(path, "rb") as f:
data.append(f.read()) data.append(f.read())
f.close()
elif code == 0: elif code == 0:
pass # comment, ignore pass # comment, ignore
else: else:
@ -179,35 +178,32 @@ def readLWFN(path, onlyHeader=False):
def readPFB(path, onlyHeader=False): def readPFB(path, onlyHeader=False):
"""reads a PFB font file, returns raw data""" """reads a PFB font file, returns raw data"""
f = open(path, "rb")
data = [] data = []
while True: with open(path, "rb") as f:
if f.read(1) != bytechr(128): while True:
raise T1Error('corrupt PFB file') if f.read(1) != bytechr(128):
code = byteord(f.read(1)) raise T1Error('corrupt PFB file')
if code in [1, 2]: code = byteord(f.read(1))
chunklen = stringToLong(f.read(4)) if code in [1, 2]:
chunk = f.read(chunklen) chunklen = stringToLong(f.read(4))
assert len(chunk) == chunklen chunk = f.read(chunklen)
data.append(chunk) assert len(chunk) == chunklen
elif code == 3: data.append(chunk)
break elif code == 3:
else: break
raise T1Error('bad chunk code: ' + repr(code)) else:
if onlyHeader: raise T1Error('bad chunk code: ' + repr(code))
break if onlyHeader:
f.close() break
data = bytesjoin(data) data = bytesjoin(data)
assertType1(data) assertType1(data)
return data return data
def readOther(path): def readOther(path):
"""reads any (font) file, returns raw data""" """reads any (font) file, returns raw data"""
f = open(path, "rb") with open(path, "rb") as f:
data = f.read() data = f.read()
f.close()
assertType1(data) assertType1(data)
chunks = findEncryptedChunks(data) chunks = findEncryptedChunks(data)
data = [] data = []
for isEncrypted, chunk in chunks: for isEncrypted, chunk in chunks:
@ -244,8 +240,7 @@ def writeLWFN(path, data):
def writePFB(path, data): def writePFB(path, data):
chunks = findEncryptedChunks(data) chunks = findEncryptedChunks(data)
f = open(path, "wb") with open(path, "wb") as f:
try:
for isEncrypted, chunk in chunks: for isEncrypted, chunk in chunks:
if isEncrypted: if isEncrypted:
code = 2 code = 2
@ -255,13 +250,10 @@ def writePFB(path, data):
f.write(longToString(len(chunk))) f.write(longToString(len(chunk)))
f.write(chunk) f.write(chunk)
f.write(bytechr(128) + bytechr(3)) f.write(bytechr(128) + bytechr(3))
finally:
f.close()
def writeOther(path, data, dohex=False): def writeOther(path, data, dohex=False):
chunks = findEncryptedChunks(data) chunks = findEncryptedChunks(data)
f = open(path, "wb") with open(path, "wb") as f:
try:
hexlinelen = HEXLINELENGTH // 2 hexlinelen = HEXLINELENGTH // 2
for isEncrypted, chunk in chunks: for isEncrypted, chunk in chunks:
if isEncrypted: if isEncrypted:
@ -275,8 +267,6 @@ def writeOther(path, data, dohex=False):
chunk = chunk[hexlinelen:] chunk = chunk[hexlinelen:]
else: else:
f.write(chunk) f.write(chunk)
finally:
f.close()
# decryption tools # decryption tools

View File

@ -293,11 +293,11 @@ def ttCompile(input, output, options):
def guessFileType(fileName): def guessFileType(fileName):
base, ext = os.path.splitext(fileName) base, ext = os.path.splitext(fileName)
try: try:
f = open(fileName, "rb") with open(fileName, "rb") as f:
header = f.read(256)
except IOError: except IOError:
return None return None
header = f.read(256)
f.close()
if header.startswith(b'\xef\xbb\xbf<?xml'): if header.startswith(b'\xef\xbb\xbf<?xml'):
header = header.lstrip(b'\xef\xbb\xbf') header = header.lstrip(b'\xef\xbb\xbf')
cr, tp = getMacCreatorAndType(fileName) cr, tp = getMacCreatorAndType(fileName)

View File

@ -18,8 +18,11 @@ class _UnicodeCustom(object):
def __init__(self, f): def __init__(self, f):
if isinstance(f, basestring): if isinstance(f, basestring):
f = open(f) with open(f) as fd:
self.codes = _makeunicodes(f) codes = _makeunicodes(fd)
else:
codes = _makeunicodes(f)
self.codes = codes
def __getitem__(self, charCode): def __getitem__(self, charCode):
try: try:

View File

@ -30,9 +30,9 @@ modules.sort()
tables.sort() tables.sort()
file = open(os.path.join(tablesDir, "__init__.py"), "w") with open(os.path.join(tablesDir, "__init__.py"), "w") as file:
file.write(''' file.write('''
from __future__ import print_function, division, absolute_import from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import * from fontTools.misc.py23 import *
@ -45,21 +45,20 @@ def _moduleFinderHint():
""" """
''') ''')
for module in modules: for module in modules:
file.write("\tfrom . import %s\n" % module) file.write("\tfrom . import %s\n" % module)
file.write(''' file.write('''
if __name__ == "__main__": if __name__ == "__main__":
import doctest, sys import doctest, sys
sys.exit(doctest.testmod().failed) sys.exit(doctest.testmod().failed)
''') ''')
file.close()
begin = ".. begin table list\n.. code::\n" begin = ".. begin table list\n.. code::\n"
end = ".. end table list" end = ".. end table list"
doc = open(docFile).read() with open(docFile) as f:
doc = f.read()
beginPos = doc.find(begin) beginPos = doc.find(begin)
assert beginPos > 0 assert beginPos > 0
beginPos = beginPos + len(begin) + 1 beginPos = beginPos + len(begin) + 1
@ -70,4 +69,5 @@ blockquote = "\n".join(" "*4 + line for line in lines) + "\n"
doc = doc[:beginPos] + blockquote + doc[endPos:] doc = doc[:beginPos] + blockquote + doc[endPos:]
open(docFile, "w").write(doc) with open(docFile, "w") as f:
f.write(doc)

View File

@ -74,23 +74,22 @@ def main(args):
if not files: if not files:
usage() usage()
report = open("report.txt", "a+") with open("report.txt", "a+") as report:
options = ttx.Options(rawOptions, len(files)) options = ttx.Options(rawOptions, len(files))
for ttFile in files: for ttFile in files:
try: try:
roundTrip(ttFile, options, report) roundTrip(ttFile, options, report)
except KeyboardInterrupt: except KeyboardInterrupt:
print("(Cancelled)") print("(Cancelled)")
break break
except: except:
print("*** round tripping aborted ***") print("*** round tripping aborted ***")
traceback.print_exc() traceback.print_exc()
report.write("=============================================================\n") report.write("=============================================================\n")
report.write(" An exception occurred while round tripping") report.write(" An exception occurred while round tripping")
report.write(" \"%s\"\n" % ttFile) report.write(" \"%s\"\n" % ttFile)
traceback.print_exc(file=report) traceback.print_exc(file=report)
report.write("-------------------------------------------------------------\n") report.write("-------------------------------------------------------------\n")
report.close()
main(sys.argv[1:]) main(sys.argv[1:])

View File

@ -237,12 +237,10 @@ def test_unicodes(tmpdir):
new.read(testDocPath) new.read(testDocPath)
new.write(testDocPath2) new.write(testDocPath2)
# compare the file contents # compare the file contents
f1 = open(testDocPath, 'r', encoding='utf-8') with open(testDocPath, 'r', encoding='utf-8') as f1:
t1 = f1.read() t1 = f1.read()
f1.close() with open(testDocPath2, 'r', encoding='utf-8') as f2:
f2 = open(testDocPath2, 'r', encoding='utf-8') t2 = f2.read()
t2 = f2.read()
f2.close()
assert t1 == t2 assert t1 == t2
# check the unicode values read from the document # check the unicode values read from the document
assert new.instances[0].glyphs['arrow']['unicodes'] == [100,200,300] assert new.instances[0].glyphs['arrow']['unicodes'] == [100,200,300]
@ -337,12 +335,10 @@ def test_localisedNames(tmpdir):
new = DesignSpaceDocument() new = DesignSpaceDocument()
new.read(testDocPath) new.read(testDocPath)
new.write(testDocPath2) new.write(testDocPath2)
f1 = open(testDocPath, 'r', encoding='utf-8') with open(testDocPath, 'r', encoding='utf-8') as f1:
t1 = f1.read() t1 = f1.read()
f1.close() with open(testDocPath2, 'r', encoding='utf-8') as f2:
f2 = open(testDocPath2, 'r', encoding='utf-8') t2 = f2.read()
t2 = f2.read()
f2.close()
assert t1 == t2 assert t1 == t2
@ -761,14 +757,12 @@ def _addUnwrappedCondition(path):
# only for testing, so we can make an invalid designspace file # only for testing, so we can make an invalid designspace file
# older designspace files may have conditions that are not wrapped in a conditionset # older designspace files may have conditions that are not wrapped in a conditionset
# These can be read into a new conditionset. # These can be read into a new conditionset.
f = open(path, 'r', encoding='utf-8') with open(path, 'r', encoding='utf-8') as f:
d = f.read() d = f.read()
print(d) print(d)
f.close()
d = d.replace('<rule name="named.rule.1">', '<rule name="named.rule.1">\n\t<condition maximum="22" minimum="33" name="axisName_a" />') d = d.replace('<rule name="named.rule.1">', '<rule name="named.rule.1">\n\t<condition maximum="22" minimum="33" name="axisName_a" />')
f = open(path, 'w', encoding='utf-8') with open(path, 'w', encoding='utf-8') as f:
f.write(d) f.write(d)
f.close()
def test_documentLib(tmpdir): def test_documentLib(tmpdir):
# roundtrip test of the document lib with some nested data # roundtrip test of the document lib with some nested data