summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authoranderspapitto <anderspapitto@gmail.com>2018-06-20 14:45:26 -0700
committerSoumith Chintala <soumith@gmail.com>2018-06-20 17:45:26 -0400
commit48e90e3339b8b027e2525da6d1977204ecfbff73 (patch)
treed933c4e87a29b41796e4cfb19f349f2088304ed4
parent0acddd6cee32bc7c3715bc8b93d0a33ef19064b1 (diff)
downloadpytorch-48e90e3339b8b027e2525da6d1977204ecfbff73.tar.gz
pytorch-48e90e3339b8b027e2525da6d1977204ecfbff73.tar.bz2
pytorch-48e90e3339b8b027e2525da6d1977204ecfbff73.zip
Build system changes (#8627)
* All changes needed to get rid of process_github.sh * allow thnn_h_path
-rw-r--r--aten/src/ATen/cudnn/Handles.cpp9
-rw-r--r--setup.py4
-rw-r--r--test/common.py3
-rw-r--r--test/test_distributed.py3
-rw-r--r--test/test_torch.py11
-rw-r--r--test/test_utils.py1
-rw-r--r--tools/nnwrap/__init__.py6
-rw-r--r--tools/nnwrap/generate_wrappers.py19
-rw-r--r--tools/setup_helpers/generate_code.py1
-rw-r--r--torch/CMakeLists.txt4
-rw-r--r--torch/__init__.py6
-rw-r--r--torch/_thnn/utils.py12
-rw-r--r--torch/_utils_internal.py32
13 files changed, 85 insertions, 26 deletions
diff --git a/aten/src/ATen/cudnn/Handles.cpp b/aten/src/ATen/cudnn/Handles.cpp
index 04a81d5a60..36f0beec55 100644
--- a/aten/src/ATen/cudnn/Handles.cpp
+++ b/aten/src/ATen/cudnn/Handles.cpp
@@ -19,7 +19,16 @@ struct Handle {
}
~Handle() {
if (handle) {
+// this is because of something dumb in the ordering of
+// destruction. Sometimes atexit, the cuda context (or something)
+// would already be destroyed by the time this gets destroyed. It
+// happens in fbcode setting. @colesbury and I decided to not destroy
+// the handle as a workaround.
+// - @soumith
+#ifdef NO_CUDNN_DESTROY_HANDLE
+#else
cudnnDestroy(handle);
+#endif
}
}
};
diff --git a/setup.py b/setup.py
index 8387f4d1a6..a6c8e93945 100644
--- a/setup.py
+++ b/setup.py
@@ -379,8 +379,8 @@ class build_deps(PytorchCommand):
# Use copies instead of symbolic files.
# Windows has very poor support for them.
- sym_files = ['tools/shared/cwrap_common.py']
- orig_files = ['aten/src/ATen/common_with_cwrap.py']
+ sym_files = ['tools/shared/cwrap_common.py', 'tools/shared/_utils_internal.py']
+ orig_files = ['aten/src/ATen/common_with_cwrap.py', 'torch/_utils_internal.py']
for sym_file, orig_file in zip(sym_files, orig_files):
if os.path.exists(sym_file):
os.remove(sym_file)
diff --git a/test/common.py b/test/common.py
index 859ca5aa98..c87704fc25 100644
--- a/test/common.py
+++ b/test/common.py
@@ -27,6 +27,7 @@ import errno
import torch
import torch.cuda
+from torch._utils_internal import get_writable_path
from torch._six import string_classes
import torch.backends.cudnn
import torch.backends.mkl
@@ -505,7 +506,7 @@ def download_file(url, binary=True):
from urllib import request, error
filename = os.path.basename(urlsplit(url)[2])
- data_dir = os.path.join(os.path.dirname(__file__), 'data')
+ data_dir = get_writable_path(os.path.join(os.path.dirname(__file__), 'data'))
path = os.path.join(data_dir, filename)
if os.path.exists(path):
diff --git a/test/test_distributed.py b/test/test_distributed.py
index 543202a807..5f50165f0b 100644
--- a/test/test_distributed.py
+++ b/test/test_distributed.py
@@ -17,11 +17,12 @@ import torch.nn.functional as F
from torch.autograd import Variable
from common import TestCase
+from torch._utils_internal import TEST_MASTER_ADDR as MASTER_ADDR
+
BACKEND = os.environ['BACKEND']
TEMP_DIR = os.environ['TEMP_DIR']
INIT_METHOD = os.getenv('INIT_METHOD', 'env://')
MASTER_PORT = '29500'
-MASTER_ADDR = '127.0.0.1'
DEFAULT_TIMEOUT = 15
CUSTOMIZED_TIMEOUT = {'test_DistributedDataParallel': 25}
diff --git a/test/test_torch.py b/test/test_torch.py
index 4890907403..e3020f5379 100644
--- a/test/test_torch.py
+++ b/test/test_torch.py
@@ -13,6 +13,7 @@ import unittest
import warnings
import pickle
import gzip
+from torch._utils_internal import get_file_path, get_file_path_2
from torch.utils.dlpack import from_dlpack, to_dlpack
from torch._utils import _rebuild_tensor
from itertools import product, combinations
@@ -6656,7 +6657,10 @@ class TestTorch(TestCase):
return module
with filecontext_lambda() as checkpoint:
- fname = os.path.join(os.path.dirname(__file__), 'data/network1.py')
+ try:
+ fname = get_file_path_2(os.path.dirname(__file__), 'data', 'network1.py')
+ except IOError:
+ fname = get_file_path_2(os.path.dirname(__file__), 'data', 'network1.pyc')
module = import_module(tmpmodule_name, fname)
torch.save(module.Net(), checkpoint)
@@ -6669,7 +6673,10 @@ class TestTorch(TestCase):
self.assertEquals(len(w), 0)
# Replace the module with different source
- fname = os.path.join(os.path.dirname(__file__), 'data/network2.py')
+ try:
+ fname = get_file_path_2(os.path.dirname(__file__), 'data', 'network2.py')
+ except IOError:
+ fname = get_file_path_2(os.path.dirname(__file__), 'data', 'network2.pyc')
module = import_module(tmpmodule_name, fname)
checkpoint.seek(0)
with warnings.catch_warnings(record=True) as w:
diff --git a/test/test_utils.py b/test/test_utils.py
index cde57e6f92..af93652562 100644
--- a/test/test_utils.py
+++ b/test/test_utils.py
@@ -530,6 +530,7 @@ class TestLuaReader(TestCase):
return input, target.sub(1)
+@unittest.skipIf('SKIP_TEST_BOTTLENECK' in os.environ.keys(), 'SKIP_TEST_BOTTLENECK is set')
class TestBottleneck(TestCase):
def _run(self, command):
"""Returns (return-code, stdout, stderr)"""
diff --git a/tools/nnwrap/__init__.py b/tools/nnwrap/__init__.py
index 2a574f7299..d6457a58c4 100644
--- a/tools/nnwrap/__init__.py
+++ b/tools/nnwrap/__init__.py
@@ -1,5 +1 @@
-from .generate_wrappers import generate_wrappers, wrap_function
-try:
- from .generate_wrappers import import_module
-except ImportError:
- pass
+from .generate_wrappers import generate_wrappers, wrap_function, import_module
diff --git a/tools/nnwrap/generate_wrappers.py b/tools/nnwrap/generate_wrappers.py
index 1edbca928c..db4caf6bef 100644
--- a/tools/nnwrap/generate_wrappers.py
+++ b/tools/nnwrap/generate_wrappers.py
@@ -3,17 +3,16 @@ import sys
from string import Template, ascii_lowercase
from ..cwrap import cwrap
from ..cwrap.plugins import NNExtension, NullableArguments, AutoGPU
+from ..shared import import_module
-BASE_PATH = os.path.realpath(os.path.join(__file__, '..', '..', '..'))
-WRAPPER_PATH = os.path.join(BASE_PATH, 'torch', 'csrc', 'nn')
-THNN_UTILS_PATH = os.path.join(BASE_PATH, 'torch', '_thnn', 'utils.py')
+from ..shared._utils_internal import get_file_path
+THNN_H_PATH = get_file_path('torch', 'lib', 'THNN.h')
+THCUNN_H_PATH = get_file_path('torch', 'lib', 'THCUNN.h')
-try:
- from torch._thnn import utils as thnn_utils
-except ImportError:
- from ..shared import import_module
- thnn_utils = import_module('torch._thnn.utils', THNN_UTILS_PATH)
+THNN_UTILS_PATH = get_file_path('torch', '_thnn', 'utils.py')
+
+thnn_utils = import_module('torch._thnn.utils', THNN_UTILS_PATH)
FUNCTION_TEMPLATE = Template("""\
[[
@@ -105,7 +104,7 @@ def generate_wrappers(nn_root=None, install_dir=None, template_path=None):
def wrap_nn(thnn_h_path, install_dir, template_path):
wrapper = '#include <TH/TH.h>\n\n\n'
- nn_functions = thnn_utils.parse_header(thnn_h_path or thnn_utils.THNN_H_PATH)
+ nn_functions = thnn_utils.parse_header(thnn_h_path or THNN_H_PATH)
for fn in nn_functions:
for t in ['Float', 'Double']:
wrapper += wrap_function(fn.name, t, fn.arguments)
@@ -124,7 +123,7 @@ def wrap_nn(thnn_h_path, install_dir, template_path):
def wrap_cunn(thcunn_h_path, install_dir, template_path):
wrapper = '#include <TH/TH.h>\n'
wrapper += '#include <THC/THC.h>\n\n\n'
- cunn_functions = thnn_utils.parse_header(thcunn_h_path or thnn_utils.THCUNN_H_PATH)
+ cunn_functions = thnn_utils.parse_header(thcunn_h_path or THCUNN_H_PATH)
for fn in cunn_functions:
for t in ['CudaHalf', 'Cuda', 'CudaDouble']:
wrapper += wrap_function(fn.name, t, fn.arguments)
diff --git a/tools/setup_helpers/generate_code.py b/tools/setup_helpers/generate_code.py
index c9779fb869..d44e286d56 100644
--- a/tools/setup_helpers/generate_code.py
+++ b/tools/setup_helpers/generate_code.py
@@ -77,6 +77,7 @@ def generate_code(ninja_global=None,
sys.path.insert(0, root)
from tools.autograd.gen_autograd import gen_autograd
from tools.jit.gen_jit_dispatch import gen_jit_dispatch
+
from tools.nnwrap import generate_wrappers as generate_nn_wrappers
# Build THNN/THCUNN.cwrap and then THNN/THCUNN.cpp. These are primarily
diff --git a/torch/CMakeLists.txt b/torch/CMakeLists.txt
index 0c5bcd3a2c..ff26a2967f 100644
--- a/torch/CMakeLists.txt
+++ b/torch/CMakeLists.txt
@@ -137,6 +137,10 @@ configure_file("${CAFFE2_PATH}/aten/src/ATen/common_with_cwrap.py"
"${TOOLS_PATH}/shared/cwrap_common.py"
COPYONLY)
+configure_file("${CAFFE2_PATH}/torch/_utils_internal.py"
+ "${TOOLS_PATH}/shared/_utils_internal.py"
+ COPYONLY)
+
add_custom_command(
OUTPUT
"${TORCH_SRC_DIR}/csrc/nn/THNN.cpp"
diff --git a/torch/__init__.py b/torch/__init__.py
index 83b88c246b..88706460d8 100644
--- a/torch/__init__.py
+++ b/torch/__init__.py
@@ -8,9 +8,11 @@ It has a CUDA counterpart, that enables you to run your tensor computations
on an NVIDIA GPU with compute capability >= 3.0.
"""
+import os
import sys
import platform
from ._utils import _import_dotted_name
+from ._utils_internal import get_file_path, prepare_multiprocessing_environment
from .version import __version__
from ._six import string_classes as _string_classes
@@ -233,8 +235,8 @@ _tensor_classes = set()
def manager_path():
if platform.system() == 'Windows':
return b""
- import os
- path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'lib', 'torch_shm_manager')
+ path = get_file_path('torch', 'lib', 'torch_shm_manager')
+ prepare_multiprocessing_environment(get_file_path('torch'))
if not os.path.exists(path):
raise RuntimeError("Unable to find torch_shm_manager at " + path)
return path.encode('utf-8')
diff --git a/torch/_thnn/utils.py b/torch/_thnn/utils.py
index 84c93a8180..b3f09e46a4 100644
--- a/torch/_thnn/utils.py
+++ b/torch/_thnn/utils.py
@@ -4,9 +4,15 @@ import importlib
# in fbcode, this fails in some cases, but we don't need it, therefore the try-catch
try:
- THNN_H_PATH = os.path.join(os.path.dirname(__file__), '..', 'lib', 'THNN.h')
- THCUNN_H_PATH = os.path.join(os.path.dirname(__file__), '..', 'lib', 'THCUNN.h')
-except Exception:
+ # when compiling a cffi extension, this works. When compiling
+ # torch itself, it doesn't work because the parent module can't
+ # yet be imported. However that's fine because we don't need it in
+ # that case.
+ from .._utils_internal import get_file_path
+
+ THNN_H_PATH = get_file_path('torch', 'lib', 'THNN.h')
+ THCUNN_H_PATH = get_file_path('torch', 'lib', 'THCUNN.h')
+except Exception as e:
pass
diff --git a/torch/_utils_internal.py b/torch/_utils_internal.py
new file mode 100644
index 0000000000..611dc7827c
--- /dev/null
+++ b/torch/_utils_internal.py
@@ -0,0 +1,32 @@
+from __future__ import absolute_import, division, print_function, unicode_literals
+
+import os
+
+# this arbitrary-looking assortment of functionality is provided here
+# to have a central place for overrideable behavior. The motivating
+# use is the FB build environment, where this source file is replaced
+# by an equivalent.
+
+if os.path.basename(os.path.dirname(__file__)) == 'shared':
+ torch_parent = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
+else:
+ torch_parent = os.path.dirname(os.path.dirname(__file__))
+
+
+def get_file_path(*path_components):
+ return os.path.join(torch_parent, *path_components)
+
+
+def get_file_path_2(*path_components):
+ return os.path.join(*path_components)
+
+
+def get_writable_path(path):
+ return path
+
+
+def prepare_multiprocessing_environment(path):
+ pass
+
+
+TEST_MASTER_ADDR = '127.0.0.1'