summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMickaƫl Schoentgen <contact@tiger-222.fr>2019-01-09 15:25:58 -0800
committerFacebook Github Bot <facebook-github-bot@users.noreply.github.com>2019-01-09 15:36:53 -0800
commit71c6e243731f238333259407f477e582ac3d978b (patch)
tree9f3f3969024146ad283d529884fc49c0296b0170
parenta1180d8e868eca4724ad43af370c751fa39fbe65 (diff)
downloadpytorch-71c6e243731f238333259407f477e582ac3d978b.tar.gz
pytorch-71c6e243731f238333259407f477e582ac3d978b.tar.bz2
pytorch-71c6e243731f238333259407f477e582ac3d978b.zip
Fix several ResourceWarning: unclosed file (#15746)
Summary: Hello, This is a patch to fix `ResourceWarning: unclosed file`. Pull Request resolved: https://github.com/pytorch/pytorch/pull/15746 Differential Revision: D13587286 Pulled By: soumith fbshipit-source-id: 08ac34c5b51d9334867f65a2927bff11511553f3
-rwxr-xr-x.jenkins/pytorch/win-build.sh4
-rw-r--r--aten/src/ATen/gen.py6
-rw-r--r--caffe2/contrib/cuda-convnet2/make-data/make-data.py6
-rw-r--r--caffe2/contrib/cuda-convnet2/python_util/util.py38
-rw-r--r--caffe2/perfkernels/hp_emblookup_codegen.py9
-rw-r--r--caffe2/python/caffe_translator.py10
-rw-r--r--caffe2/python/caffe_translator_test.py13
-rw-r--r--caffe2/python/utils.py3
-rw-r--r--test/run_test.py8
9 files changed, 42 insertions, 55 deletions
diff --git a/.jenkins/pytorch/win-build.sh b/.jenkins/pytorch/win-build.sh
index b225eb99f6..9ca9fde9f8 100755
--- a/.jenkins/pytorch/win-build.sh
+++ b/.jenkins/pytorch/win-build.sh
@@ -29,8 +29,8 @@ IMAGE_COMMIT_TAG = os.getenv('IMAGE_COMMIT_TAG')
session = boto3.session.Session()
s3 = session.resource('s3')
-data = open(sys.argv[1], 'rb')
-s3.Bucket('ossci-windows-build').put_object(Key='pytorch/'+IMAGE_COMMIT_TAG+'.7z', Body=data)
+with open(sys.argv[1], 'rb') as data:
+ s3.Bucket('ossci-windows-build').put_object(Key='pytorch/'+IMAGE_COMMIT_TAG+'.7z', Body=data)
object_acl = s3.ObjectAcl('ossci-windows-build','pytorch/'+IMAGE_COMMIT_TAG+'.7z')
response = object_acl.put(ACL='public-read')
diff --git a/aten/src/ATen/gen.py b/aten/src/ATen/gen.py
index 0a627ea43a..3615700710 100644
--- a/aten/src/ATen/gen.py
+++ b/aten/src/ATen/gen.py
@@ -428,8 +428,10 @@ def cmpfiles_with_eol_normalization(a, b, names):
results = ([], [], []) # match, mismatch, error
for x in names:
try:
- ax = open(os.path.join(a, x), 'r').read().replace('\r\n', '\n').replace('\r', '\n')
- bx = open(os.path.join(b, x), 'r').read().replace('\r\n', '\n').replace('\r', '\n')
+ with open(os.path.join(a, x)) as f:
+ ax = f.read().replace('\r\n', '\n').replace('\r', '\n')
+ with open(os.path.join(b, x)) as f:
+ bx = f.read().replace('\r\n', '\n').replace('\r', '\n')
if ax == bx:
results[0].append(x)
else:
diff --git a/caffe2/contrib/cuda-convnet2/make-data/make-data.py b/caffe2/contrib/cuda-convnet2/make-data/make-data.py
index 1861ceb4cf..69d8f57fc3 100644
--- a/caffe2/contrib/cuda-convnet2/make-data/make-data.py
+++ b/caffe2/contrib/cuda-convnet2/make-data/make-data.py
@@ -52,10 +52,8 @@ def pickle(filename, data):
cPickle.dump(data, fo, protocol=cPickle.HIGHEST_PROTOCOL)
def unpickle(filename):
- fo = open(filename, 'r')
- contents = cPickle.load(fo)
- fo.close()
- return contents
+ with open(filename) as fo:
+ return cPickle.load(fo)
def partition_list(l, partition_size):
divup = lambda a,b: (a + b - 1) / b
diff --git a/caffe2/contrib/cuda-convnet2/python_util/util.py b/caffe2/contrib/cuda-convnet2/python_util/util.py
index c0a8a4a078..e88c044579 100644
--- a/caffe2/contrib/cuda-convnet2/python_util/util.py
+++ b/caffe2/contrib/cuda-convnet2/python_util/util.py
@@ -28,34 +28,27 @@ def pickle(filename, data):
if type(filename) == str:
fo = open(filename, "w")
- cPickle.dump(data, fo, protocol=cPickle.HIGHEST_PROTOCOL)
- fo.close()
+ with fo:
+ cPickle.dump(data, fo, protocol=cPickle.HIGHEST_PROTOCOL)
def unpickle(filename):
if not os.path.exists(filename):
raise UnpickleError("Path '%s' does not exist." % filename)
- fo = open(filename, 'r')
- z = StringIO()
- file_size = os.fstat(fo.fileno()).st_size
- # Read 1GB at a time to avoid overflow
- while fo.tell() < file_size:
- z.write(fo.read(1 << 30))
- fo.close()
- dict = cPickle.loads(z.getvalue())
- z.close()
-
- return dict
+ with open(filename) as fo, StringIO() as z:
+ file_size = os.fstat(fo.fileno()).st_size
+ # Read 1GB at a time to avoid overflow
+ while fo.tell() < file_size:
+ z.write(fo.read(1 << 30))
+ return cPickle.loads(z.getvalue())
def is_intel_machine():
VENDOR_ID_REGEX = re.compile(r'^vendor_id\s+: (\S+)')
- f = open('/proc/cpuinfo')
- for line in f:
- m = VENDOR_ID_REGEX.match(line)
- if m:
- f.close()
- return m.group(1) == 'GenuineIntel'
- f.close()
+ with open('/proc/cpuinfo') as f:
+ for line in f:
+ m = VENDOR_ID_REGEX.match(line)
+ if m:
+ return m.group(1) == 'GenuineIntel'
return False
# Returns the CPUs associated with a given GPU
@@ -69,9 +62,8 @@ def get_cpus_for_gpu(gpu):
if line.startswith('Bus Location'):
bus_id = line.split(':', 1)[1].strip()
bus_id = bus_id[:7] + ':' + bus_id[8:]
- ff = open('/sys/module/nvidia/drivers/pci:nvidia/%s/local_cpulist' % bus_id)
- cpus_str = ff.readline()
- ff.close()
+ with open('/sys/module/nvidia/drivers/pci:nvidia/%s/local_cpulist' % bus_id) as ff:
+ cpus_str = ff.readline()
cpus = [cpu for s in cpus_str.split(',') for cpu in range(int(s.split('-')[0]),int(s.split('-')[1])+1)]
return cpus
return [-1]
diff --git a/caffe2/perfkernels/hp_emblookup_codegen.py b/caffe2/perfkernels/hp_emblookup_codegen.py
index c1cbd4f37e..20f759c710 100644
--- a/caffe2/perfkernels/hp_emblookup_codegen.py
+++ b/caffe2/perfkernels/hp_emblookup_codegen.py
@@ -311,7 +311,6 @@ elif opts.fused:
filename = "embedding_lookup_fused_8bit_rowwise_avx2.cc"
else:
filename = "embedding_lookup_avx2.cc"
-fout = open(filename, "w")
options = [
["int32_t", "int32_t", "float", "float", "float", "float"],
@@ -422,10 +421,10 @@ for o in options:
code.append("} // namespace caffe2")
-for c in code:
- # print(c, file = fout)
- fout.write(c + "\n")
-fout.close()
+with open(filename, "w") as fout:
+ for c in code:
+ # print(c, file = fout)
+ fout.write(c + "\n")
print("Created " + filename)
diff --git a/caffe2/python/caffe_translator.py b/caffe2/python/caffe_translator.py
index 0592ca053c..63392c73e3 100644
--- a/caffe2/python/caffe_translator.py
+++ b/caffe2/python/caffe_translator.py
@@ -911,12 +911,10 @@ if __name__ == '__main__':
output_init_net = args.init_net
output_predict_net = args.predict_net
- text_format.Merge(
- open(input_proto, 'r').read(), caffenet
- )
- caffenet_pretrained.ParseFromString(
- open(input_caffemodel, 'rb').read()
- )
+ with open(input_proto) as f:
+ text_format.Merge(f.read(), caffenet)
+ with open(input_caffemodel, 'rb') as f:
+ caffenet_pretrained.ParseFromString(f.read())
net, pretrained_params = TranslateModel(
caffenet, caffenet_pretrained, is_test=True,
remove_legacy_pad=args.remove_legacy_pad,
diff --git a/caffe2/python/caffe_translator_test.py b/caffe2/python/caffe_translator_test.py
index 70b3956bcf..ff24afe975 100644
--- a/caffe2/python/caffe_translator_test.py
+++ b/caffe2/python/caffe_translator_test.py
@@ -31,14 +31,11 @@ def setUpModule():
# We will do all the computation stuff in the global space.
caffenet = caffe_pb2.NetParameter()
caffenet_pretrained = caffe_pb2.NetParameter()
- text_format.Merge(
- open('data/testdata/caffe_translator/deploy.prototxt').read(), caffenet
- )
- caffenet_pretrained.ParseFromString(
- open(
- 'data/testdata/caffe_translator/bvlc_reference_caffenet.caffemodel')
- .read()
- )
+ with open('data/testdata/caffe_translator/deploy.prototxt') as f:
+ text_format.Merge(f.read(), caffenet)
+ with open('data/testdata/caffe_translator/'
+ 'bvlc_reference_caffenet.caffemodel') as f:
+ caffenet_pretrained.ParseFromString(f.read())
for remove_legacy_pad in [True, False]:
net, pretrained_params = caffe_translator.TranslateModel(
caffenet, caffenet_pretrained, is_test=True,
diff --git a/caffe2/python/utils.py b/caffe2/python/utils.py
index 0d1f9f823f..d5c6355c1e 100644
--- a/caffe2/python/utils.py
+++ b/caffe2/python/utils.py
@@ -231,7 +231,8 @@ def GetContentFromProtoString(s, function_map):
def ConvertProtoToBinary(proto_class, filename, out_filename):
"""Convert a text file of the given protobuf class to binary."""
- proto = TryReadProtoWithClass(proto_class, open(filename).read())
+ with open(filename) as f:
+ proto = TryReadProtoWithClass(proto_class, f.read())
with open(out_filename, 'w') as fid:
fid.write(proto.SerializeToString())
diff --git a/test/run_test.py b/test/run_test.py
index ff440600ce..cd11273e73 100644
--- a/test/run_test.py
+++ b/test/run_test.py
@@ -233,10 +233,10 @@ def test_distributed(executable, test_module, test_directory, options):
os.mkdir(os.path.join(tmp_dir, 'test_dir'))
if backend == 'mpi':
# test mpiexec for --noprefix option
- devnull = open(os.devnull, 'w')
- noprefix_opt = '--noprefix' if subprocess.call(
- 'mpiexec -n 1 --noprefix bash -c ""', shell=True,
- stdout=devnull, stderr=subprocess.STDOUT) == 0 else ''
+ with open(os.devnull, 'w') as devnull:
+ noprefix_opt = '--noprefix' if subprocess.call(
+ 'mpiexec -n 1 --noprefix bash -c ""', shell=True,
+ stdout=devnull, stderr=subprocess.STDOUT) == 0 else ''
mpiexec = ['mpiexec', '-n', '3', noprefix_opt] + executable