diff options
author | James Reed <jamesreed@fb.com> | 2019-01-26 17:39:34 -0800 |
---|---|---|
committer | Facebook Github Bot <facebook-github-bot@users.noreply.github.com> | 2019-01-26 17:42:08 -0800 |
commit | de6bb3f3e393a1f837b5b99833a5fdb98350dca4 (patch) | |
tree | 4d1d11bcbe59658399a65ed1793dd26d7da42ed3 /test | |
parent | d1ed0176df9172fcef643172b756c8e32c43533e (diff) | |
download | pytorch-de6bb3f3e393a1f837b5b99833a5fdb98350dca4.tar.gz pytorch-de6bb3f3e393a1f837b5b99833a5fdb98350dca4.tar.bz2 pytorch-de6bb3f3e393a1f837b5b99833a5fdb98350dca4.zip |
Fix flake8 warnings/errors in test_jit.py (#16409)
Summary:
These were really annoying to see in the phabricator UI when trying to land PRs that touched test_jit.py, so this fixes them.
One remaining item is the T484 error. Locally, flake8 still chokes on that line even though I put the noqa comment there (and tried varying whitespaces around it etc). Not sure why it still persists...
Pull Request resolved: https://github.com/pytorch/pytorch/pull/16409
Differential Revision: D13832658
Pulled By: jamesr66a
fbshipit-source-id: 46356ba6444ae5ee1a141c28489bdcc7c99e39c0
Diffstat (limited to 'test')
-rw-r--r-- | test/expect/TestScript.test_loop_unroll_unused_counter.expect | 4 | ||||
-rw-r--r-- | test/expect/TestScript.test_loop_unrolling_nested.expect | 2 | ||||
-rw-r--r-- | test/test_jit.py | 130 |
3 files changed, 69 insertions, 67 deletions
diff --git a/test/expect/TestScript.test_loop_unroll_unused_counter.expect b/test/expect/TestScript.test_loop_unroll_unused_counter.expect index d0096c5daf..102405be6c 100644 --- a/test/expect/TestScript.test_loop_unroll_unused_counter.expect +++ b/test/expect/TestScript.test_loop_unroll_unused_counter.expect @@ -9,7 +9,7 @@ graph(%x : Tensor) { %8 : int = aten::mul(%6, %7) %9 : int = aten::sub(%4, %8) %y.3 : int = prim::Loop(%6, %1, %y.1) - block0(%i.1 : int, %12 : int) { + block0(%11 : int, %12 : int) { %y.12 : int = aten::add(%12, %3) %y.5 : int = aten::add(%y.12, %3) %y.6 : int = aten::add(%y.5, %3) @@ -21,7 +21,7 @@ graph(%x : Tensor) { -> (%1, %y.11) } %y : int = prim::Loop(%9, %1, %y.3) - block0(%i : int, %23 : int) { + block0(%22 : int, %23 : int) { %y.4 : int = aten::add(%23, %3) -> (%1, %y.4) } diff --git a/test/expect/TestScript.test_loop_unrolling_nested.expect b/test/expect/TestScript.test_loop_unrolling_nested.expect index 6204965e93..abbc0b559b 100644 --- a/test/expect/TestScript.test_loop_unrolling_nested.expect +++ b/test/expect/TestScript.test_loop_unrolling_nested.expect @@ -3,7 +3,7 @@ graph(%x : Tensor) { %y.1 : int = prim::Constant[value=0]() %3 : int = prim::Constant[value=10]() %y : int = prim::Loop(%3, %1, %y.1) - block0(%i : int, %6 : int) { + block0(%5 : int, %6 : int) { %7 : int = prim::Int(%x) %8 : int = prim::Constant[value=0]() %9 : int = prim::Constant[value=8]() diff --git a/test/test_jit.py b/test/test_jit.py index b4de6fb70c..bdd31f40f7 100644 --- a/test/test_jit.py +++ b/test/test_jit.py @@ -448,7 +448,7 @@ class JitTestCase(TestCase): vs = vs[:-drop] # we don't want all the grad for all the outputs to be the same # so we multiply each by a constant - return sum([math.log(i + 2) * v.sum() for i, v in enumerate(vs) if v is not None]) + return sum(math.log(i + 2) * v.sum() for i, v in enumerate(vs) if v is not None) if input_tensors is None: input_tensors = reference_tensors @@ -1153,7 +1153,7 @@ class TestJit(JitTestCase): float(z) # Warning 4. z.tolist() # Warning 5. z.numpy() # Warning 6. - for elem in torch.ones(4, 4): # Warning 7. + for _ in torch.ones(4, 4): # Warning 7. pass return z + 4 @@ -1752,7 +1752,7 @@ class TestJit(JitTestCase): def foo(bar, baz): baz = bar + 3 quick_brown_fox = torch.neg(baz) - for i in range(20): + for _ in range(20): yeet = quick_brown_fox - 3.14 return yeet @@ -2919,7 +2919,7 @@ class TestScript(JitTestCase): def bar(): a = torch.jit.annotate(List[int], []) - for i in range(10): + for _ in range(10): a.append(4) return a @@ -3428,45 +3428,45 @@ a") inp = consec((4, 8, 5)) to_check = [ # [[0, 2], [1, 3]] - ['[i, j]', dict(i=[0, 2], j=[1, 3])], + ['[i, j]', {'i': [0, 2], 'j': [1, 3]}], # [[0, 2], [1, 3], [1, 1]] - ['[i, j, k]', dict(i=[0, 2], j=[1, 3], k=[1, 1])], + ['[i, j, k]', {'i': [0, 2], 'j': [1, 3], 'k': [1, 1]}], # [[0, 2], 1, [1, 1]] - ['[i, j, k]', dict(i=[0, 2], j=1, k=[1, 1])], + ['[i, j, k]', {'i': [0, 2], 'j': 1, 'k': [1, 1]}], # [:, :, [0, 3, 4]] - ['[:, :, i]', dict(i=[0, 3, 4])], + ['[:, :, i]', {'i': [0, 3, 4]}], # [:, [2, 4, 5, 7], 2:4] - ['[:, i, 2:4]', dict(i=[0, 2, 3])], + ['[:, i, 2:4]', {'i': [0, 2, 3]}], # [[2, 3], :, :] - ['[i, :, :]', dict(i=[2, 3])], + ['[i, :, :]', {'i': [2, 3]}], # [:, [0, 2, 3], [1, 3, 4]] - ['[:, i, j]', dict(i=[0, 2, 3], j=[1, 3, 4])], + ['[:, i, j]', {'i': [0, 2, 3], 'j': [1, 3, 4]}], # [:, [0], [1, 2, 4]] - ['[:, i, j]', dict(i=[0], j=[1, 2, 4])], + ['[:, i, j]', {'i': [0], 'j': [1, 2, 4]}], # [:, [0, 1, 3], [4]] - ['[:, i, j]', dict(i=[0, 1, 3], j=[4])], + ['[:, i, j]', {'i': [0, 1, 3], 'j': [4]}], # [:, [[0, 1], [1, 0]], [[2, 3]]] - ['[:, i, j]', dict(i=[[0, 1], [1, 0]], j=[[2, 3]])], + ['[:, i, j]', {'i': [[0, 1], [1, 0]], 'j': [[2, 3]]}], # [:, [[0, 1], [2, 3]], [[0]]] - ['[:, i, j]', dict(i=[[0, 1], [2, 3]], j=[[0]])], + ['[:, i, j]', {'i': [[0, 1], [2, 3]], 'j': [[0]]}], # [:, [[5, 6]], [[0, 3], [4, 4]]] - ['[:, i, j]', dict(i=[[5, 6]], j=[[0, 3], [4, 4]])], + ['[:, i, j]', {'i': [[5, 6]], 'j': [[0, 3], [4, 4]]}], # [[0, 2, 3], [1, 3, 4], :] - ['[i, j, :]', dict(i=[0, 2, 3], j=[1, 3, 4])], + ['[i, j, :]', {'i': [0, 2, 3], 'j': [1, 3, 4]}], # [0, [1, 2, 4], :] - ['[i, j, :]', dict(i=0, j=[1, 2, 4])], + ['[i, j, :]', {'i': 0, 'j': [1, 2, 4]}], # [[0, 1, 3], 4, :] - ['[i, j, :]', dict(i=[0, 1, 3], j=4)], + ['[i, j, :]', {'i': [0, 1, 3], 'j': 4}], # [[[0, 1], [1, 0]], [[2, 1], [3, 5]], :] - ['[i, j, :]', dict(i=[[0, 1], [1, 0]], j=[[2, 1], [3, 5]])], + ['[i, j, :]', {'i': [[0, 1], [1, 0]], 'j': [[2, 1], [3, 5]]}], # [[[0, 1], [1, 0]], [[2, 3]], :] - ['[i, j, :]', dict(i=[[0, 1], [1, 0]], j=[[2, 3]])], + ['[i, j, :]', {'i': [[0, 1], [1, 0]], 'j': [[2, 3]]}], # [[[0, 1], [2, 3]], [[0]], :] - ['[i, j, :]', dict(i=[[0, 1], [2, 3]], j=[[0]])], + ['[i, j, :]', {'i': [[0, 1], [2, 3]], 'j': [[0]]}], # [[[2, 1]], [[0, 3], [4, 4]], :] - ['[i, j, :]', dict(i=[[2, 1]], j=[[0, 3], [4, 4]])], + ['[i, j, :]', {'i': [[2, 1]], 'j': [[0, 3], [4, 4]]}], # [[[2]], [[0, 3], [4, 1]], 0:2] - ['[i, j, 0:2]', dict(i=[[2]], j=[[0, 3], [4, 1]])], + ['[i, j, 0:2]', {'i': [[2]], 'j': [[0, 3], [4, 1]]}], ] for expr, argdict in to_check: @@ -3900,7 +3900,7 @@ a") def test_mutable_list_function_inline(self): @torch.jit.script def bar(y): - # type: (List[int]) + # type: (List[int]) -> None y.append(4) @torch.jit.script @@ -5180,7 +5180,7 @@ a") @torch.jit.script_method def forward(self, x, hiddens): - # type: (torch.Tensor, Tuple[torch.Tensor, torch.Tensor]) + # type: (torch.Tensor, Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor] return self.cell(x, hiddens) else: @@ -5191,7 +5191,7 @@ a") @torch.jit.script_method def forward(self, x, hiddens): - # type: (torch.Tensor, torch.Tensor) + # type: (torch.Tensor, torch.Tensor) -> torch.Tensor return self.cell(x, hiddens) cell = ScriptWrapper(cell) @@ -6116,10 +6116,12 @@ a") # type: (BroadcastingListx[int]) -> List[int] return x + # TODO: the type comment in this seems to trip up flake8 for some reason + # even though we have a noqa comment. Figure out why with self.assertRaisesRegex(RuntimeError, "Unknown type constructor"): @torch.jit.script def nested(x, y): - # type: (int, Tuple[int, int[2]]) -> List[int] + # type: (int, Tuple[int, int[2]]) -> List[int] # noqa: T484 return x def test_ntuple_builtins(self): @@ -6143,10 +6145,10 @@ a") @torch.jit.script def embedding_norm_script(input, embedding_matrix, max_norm): - # type: (Tensor, Tensor, float) + # type: (Tensor, Tensor, float) -> None F.embedding(input, embedding_matrix, max_norm=0.01) - for fun in [embedding_norm, embedding_norm_script]: + for _ in [embedding_norm, embedding_norm_script]: input = torch.tensor([[1, 2, 4, 5], [4, 3, 2, 9]]) embedding_matrix = torch.randn(10, 3) @@ -7327,7 +7329,7 @@ a") def test_loop_unrolling_const(self): def fn(): y = 0 - for i in range(10): + for _ in range(10): y += 1 return y @@ -7349,7 +7351,7 @@ a") def test_loop_unrolling_nested(self): def fn(x): y = 0 - for i in range(10): + for _ in range(10): for j in range(int(x)): y += j return y @@ -7362,7 +7364,7 @@ a") def test_loop_unroll_unused_counter(self): def fn(x): y = 0 - for i in range(int(x)): + for _ in range(int(x)): y += 1 return y @@ -7373,7 +7375,7 @@ a") def test_loop_unroll_negative(self): def fn(x): y = 0 - for i in range(int(x)): + for _ in range(int(x)): y += 1 return y @@ -7402,7 +7404,7 @@ a") class ReassignSelfLHS(torch.jit.ScriptModule): @torch.jit.script_method def forward(self, x): - for i in range(20): + for _ in range(20): self = x return self @@ -7414,7 +7416,7 @@ a") class ReassignSelfRHS(torch.jit.ScriptModule): @torch.jit.script_method def forward(self, x): - for i in range(20): + for _ in range(20): x = self return self @@ -7453,7 +7455,7 @@ a") with self.assertRaisesRegex(RuntimeError, r'range\(\) expects 1 argument but got 0'): @torch.jit.script def range_no_arg(x): - for i in range(): + for _ in range(): x += 1 return x @@ -9578,7 +9580,7 @@ a") with self.assertRaisesRegex(RuntimeError, "from a loop"): @torch.jit.script def nest_for_ret(x): - for i in range(3): + for _ in range(3): if bool(x < 3): return 4 return 5 @@ -10471,9 +10473,9 @@ def check_against_reference(self, func, reference_func, args, kwargs=None, def allSum(vs): if isinstance(vs, torch.Tensor): vs = (vs,) - return sum([(i + 1) * v.sum() - for i, v in enumerate(vs) - if v is not None and v.dtype.is_floating_point]) + return sum((i + 1) * v.sum() + for i, v in enumerate(vs) + if v is not None and v.dtype.is_floating_point) def clone_inputs(requires_grad): inputs = [ @@ -11717,27 +11719,27 @@ nn_functional_single_grad = frozenset('test_nn_' + name for name in [ # additional modules test # TODO: delete this list once we make all nn_tests work additional_module_tests = [ - dict( - module_name='Bilinear', - constructor_args=(S, S, M), - input_size=(S, S), - extra_args=((S, S),) - ), - dict( - module_name='RNNCell', - constructor_args=(S, S), - input_size=(S, S), - ), - dict( - module_name='LSTMCell', - constructor_args=(S, S), - input_size=(S, S), - ), - dict( - module_name='GRUCell', - constructor_args=(S, S), - input_size=(S, S), - ), + { + 'module_name': 'Bilinear', + 'constructor_args': (S, S, M), + 'input_size': (S, S), + 'extra_args': ((S, S),) + }, + { + 'module_name': 'RNNCell', + 'constructor_args': (S, S), + 'input_size': (S, S), + }, + { + 'module_name': 'LSTMCell', + 'constructor_args': (S, S), + 'input_size': (S, S), + }, + { + 'module_name': 'GRUCell', + 'constructor_args': (S, S), + 'input_size': (S, S), + }, ] @@ -12189,7 +12191,7 @@ class TestAsync(JitTestCase): super(Traced, self).__init__() def forward(self, x): - return tuple([torch.neg(x), x]) + return (torch.neg(x), x) class Module(torch.jit.ScriptModule): def __init__(self): @@ -12234,7 +12236,7 @@ class TestAsync(JitTestCase): self.assertGraphContainsExactly(module.graph, kind='aten::neg', num_kind_nodes=3, consider_subgraphs=True) y = torch.neg(x) - self.assertEqual(module(x), tuple([y, y, y, y, x, x])) + self.assertEqual(module(x), (y, y, y, y, x, x)) def test_async_script_error(self): x = torch.rand(3, 4) |