summaryrefslogtreecommitdiff
path: root/test/test_distributions.py
diff options
context:
space:
mode:
authorTongzhou Wang <SsnL@users.noreply.github.com>2018-05-31 15:09:54 -0400
committerGitHub <noreply@github.com>2018-05-31 15:09:54 -0400
commit85ee94b7be86867a8afde51cae4ce0baff42d93b (patch)
tree022b7802eab8a68938a9aeaefe0d818065bf304d /test/test_distributions.py
parentbafec1637ee4562875c2c81a1e85c7f1c9e66050 (diff)
downloadpytorch-85ee94b7be86867a8afde51cae4ce0baff42d93b.tar.gz
pytorch-85ee94b7be86867a8afde51cae4ce0baff42d93b.tar.bz2
pytorch-85ee94b7be86867a8afde51cae4ce0baff42d93b.zip
Add memory leak check in CUDA tests (#7270)
* Add memory leak check in CUDA tests * Tracking multi-GPU too * fix run_test.py not running __name__ == '__main__' content; add test for make_cuda_memory_checked_test * add a comment * skip if cuda * 1. Change the wrapper to a method in common.py:TestCase 2. Refactor common constants/method that initialize CUDA context into common_cuda.py 3. Update some test files to use TEST_CUDA and TEST_MULTIGPU * Fix MaxUnpool3d forward memory leak * Fix MultiLabelMarginCriterion forward memory leak * Fix MultiMarginLoss backward memory leak * default doCUDAMemoryCheck to False * make the wrapper skip-able * use TEST_MULTIGPU * add align_corners=True/False tests for Upsample; fix TEST_CUDNN * finalize interface * VolumetricMaxUnpooling_updateOutput * fix test_nccl * rename THC caching allocator methods to be clearer * make the wrapped function a method * address comments; revert changes to aten/src/THC/THCCachingAllocator.cpp * fix renamed var
Diffstat (limited to 'test/test_distributions.py')
-rw-r--r--test/test_distributions.py5
1 files changed, 3 insertions, 2 deletions
diff --git a/test/test_distributions.py b/test/test_distributions.py
index e03b1f6fb3..fa43268d65 100644
--- a/test/test_distributions.py
+++ b/test/test_distributions.py
@@ -31,6 +31,7 @@ from random import shuffle
import torch
from common import TestCase, run_tests, set_rng_seed
+from common_cuda import TEST_CUDA
from torch.autograd import Variable, grad, gradcheck
from torch.distributions import (Bernoulli, Beta, Binomial, Categorical,
Cauchy, Chi2, Dirichlet, Distribution,
@@ -63,8 +64,6 @@ try:
except ImportError:
TEST_NUMPY = False
-TEST_CUDA = torch.cuda.is_available()
-
def pairwise(Dist, *params):
"""
@@ -578,6 +577,8 @@ def unwrap(value):
class TestDistributions(TestCase):
+ _do_cuda_memory_leak_check = True
+
def _gradcheck_log_prob(self, dist_ctor, ctor_params):
# performs gradient checks on log_prob
distribution = dist_ctor(*ctor_params)