diff options
Diffstat (limited to 'torch')
-rw-r--r-- | torch/backends/cudnn/__init__.py | 2 | ||||
-rw-r--r-- | torch/cuda/__init__.py | 2 | ||||
-rw-r--r-- | torch/distributions/transformed_distribution.py | 2 | ||||
-rw-r--r-- | torch/nn/functional.py | 2 | ||||
-rw-r--r-- | torch/utils/bottleneck/__main__.py | 2 | ||||
-rw-r--r-- | torch/utils/collect_env.py | 10 |
6 files changed, 10 insertions, 10 deletions
diff --git a/torch/backends/cudnn/__init__.py b/torch/backends/cudnn/__init__.py index edd313c80e..fdd1993cf5 100644 --- a/torch/backends/cudnn/__init__.py +++ b/torch/backends/cudnn/__init__.py @@ -359,7 +359,7 @@ class RNNDescriptor(object): def check_error(status): - if status is not 0: + if status != 0: raise CuDNNError(status) diff --git a/torch/cuda/__init__.py b/torch/cuda/__init__.py index 4f4519f459..ef55f017fe 100644 --- a/torch/cuda/__init__.py +++ b/torch/cuda/__init__.py @@ -221,7 +221,7 @@ class device(object): self.prev_idx = -1 def __enter__(self): - if self.idx is -1: + if self.idx == -1: return self.prev_idx = torch._C._cuda_getDevice() if self.prev_idx != self.idx: diff --git a/torch/distributions/transformed_distribution.py b/torch/distributions/transformed_distribution.py index a7c49b4f7b..46c4fbccb4 100644 --- a/torch/distributions/transformed_distribution.py +++ b/torch/distributions/transformed_distribution.py @@ -125,7 +125,7 @@ class TransformedDistribution(Distribution): sign = 1 for transform in self.transforms: sign = sign * transform.sign - if sign is 1: + if isinstance(sign, int) and sign == 1: return value return sign * (value - 0.5) + 0.5 diff --git a/torch/nn/functional.py b/torch/nn/functional.py index 6ca5b9791e..04e6382750 100644 --- a/torch/nn/functional.py +++ b/torch/nn/functional.py @@ -1824,7 +1824,7 @@ def nll_loss(input, target, weight=None, size_average=None, ignore_index=-100, input = input.contiguous().view(n, c, 1, -1) target = target.contiguous().view(n, 1, -1) reduction_enum = _Reduction.get_enum(reduction) - if reduction is not 'none': + if reduction != 'none': ret = torch._C._nn.nll_loss2d( input, target, weight, reduction_enum, ignore_index) else: diff --git a/torch/utils/bottleneck/__main__.py b/torch/utils/bottleneck/__main__.py index b4661de751..ae5de6b9da 100644 --- a/torch/utils/bottleneck/__main__.py +++ b/torch/utils/bottleneck/__main__.py @@ -130,7 +130,7 @@ def print_autograd_prof_summary(prof, mode, sortby='cpu_time', topk=15): print(warn.format(autograd_prof_sortby)) sortby = 'cpu_time' - if mode is 'CUDA': + if mode == 'CUDA': cuda_warning = ('\n\tBecause the autograd profiler uses the CUDA event API,\n' '\tthe CUDA time column reports approximately max(cuda_time, cpu_time).\n' '\tPlease ignore this output if your code does not use CUDA.\n') diff --git a/torch/utils/collect_env.py b/torch/utils/collect_env.py index b406629818..eed856d848 100644 --- a/torch/utils/collect_env.py +++ b/torch/utils/collect_env.py @@ -52,7 +52,7 @@ def run(command): def run_and_read_all(run_lambda, command): """Runs command using run_lambda; reads and returns entire output if rc is 0""" rc, out, _ = run_lambda(command) - if rc is not 0: + if rc != 0: return None return out @@ -60,7 +60,7 @@ def run_and_read_all(run_lambda, command): def run_and_parse_first_match(run_lambda, command, regex): """Runs command using run_lambda, returns the first regex match if it exists""" rc, out, _ = run_lambda(command) - if rc is not 0: + if rc != 0: return None match = re.search(regex, out) if match is None: @@ -98,7 +98,7 @@ def get_gpu_info(run_lambda): smi = get_nvidia_smi() uuid_regex = re.compile(r' \(UUID: .+?\)') rc, out, _ = run_lambda(smi + ' -L') - if rc is not 0: + if rc != 0: return None # Anonymize GPUs by removing their UUID return re.sub(uuid_regex, '', out) @@ -165,7 +165,7 @@ def check_release_file(run_lambda): def get_os(run_lambda): platform = get_platform() - if platform is 'win32' or platform is 'cygwin': + if platform == 'win32' or platform == 'cygwin': return get_windows_version(run_lambda) if platform == 'darwin': @@ -208,7 +208,7 @@ def get_pip_packages(run_lambda): out3 = run_with_pip('pip3') num_pips = len([x for x in [out2, out3] if x is not None]) - if num_pips is 0: + if num_pips == 0: return 'pip', out2 if num_pips == 1: |