Skip to content

Commit 1f3544f

Browse files
authored
Merge pull request #103 from jithunnair-amd/skipIfRocm_refactor
Refactor unit test skip statements to use @skipIfRocm annotation
2 parents dee2f5b + d963986 commit 1f3544f

File tree

6 files changed

+54
-54
lines changed

6 files changed

+54
-54
lines changed

test/test_autograd.py

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
from torch.autograd.function import once_differentiable
1616
from torch.autograd.profiler import profile
1717
from common import TEST_MKL, TestCase, run_tests, skipIfNoLapack, \
18-
suppress_warnings, TEST_WITH_ROCM, skipIfRocm
18+
suppress_warnings, skipIfRocm
1919
from torch.autograd import Variable, Function, detect_anomaly
2020
from torch.autograd.function import InplaceFunction
2121
from torch.testing import make_non_contiguous, randn_like
@@ -975,7 +975,7 @@ def test_no_requires_grad_inplace(self):
975975
with self.assertRaises(RuntimeError):
976976
b.add_(5)
977977

978-
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
978+
@skipIfRocm
979979
def test_requires_grad_factory(self):
980980
x = torch.randn(2, 3)
981981
fns = [torch.ones_like, torch.testing.randn_like]
@@ -1375,7 +1375,7 @@ def __del__(self):
13751375
Variable(torch.randn(10, 10), _grad_fn=CollectOnDelete())
13761376

13771377
@unittest.skipIf(torch.cuda.device_count() < 2, "no multi-GPU")
1378-
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
1378+
@skipIfRocm
13791379
def test_unused_output_gpu(self):
13801380
from torch.nn.parallel._functions import Broadcast
13811381
x = Variable(torch.randn(5, 5).float().cuda(), requires_grad=True)
@@ -1404,7 +1404,7 @@ def backward(ctx, grad_output):
14041404
self.assertEqual(device[0], 1)
14051405

14061406
@unittest.skipIf(torch.cuda.device_count() < 2, "no multi-GPU")
1407-
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
1407+
@skipIfRocm
14081408
def test_inputbuffer_add_multigpu(self):
14091409
input = torch.randn(1).cuda(0).requires_grad_()
14101410
output = input.cuda(1) + input.cuda(1)
@@ -1454,7 +1454,7 @@ def test_detach_base(self):
14541454
self.assertIsNotNone(view.grad_fn)
14551455
self.assertIs(view._base, x)
14561456

1457-
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
1457+
@skipIfRocm
14581458
def _test_type_conversion_backward(self, t, ):
14591459
fvar = Variable(t(torch.randn(5, 5).float()), requires_grad=True)
14601460
fvar.double().sum().backward()
@@ -1578,7 +1578,7 @@ def test_pyscalar_conversions(self):
15781578
self._test_pyscalar_conversions(lambda x: x.cuda(), lambda x: long(x))
15791579

15801580
@unittest.skipIf(not torch.cuda.is_available(), "CUDA unavailable")
1581-
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
1581+
@skipIfRocm
15821582
def test_pin_memory(self):
15831583
x = torch.randn(2, 2, requires_grad=True)
15841584
self.assertEqual(x, x.pin_memory())
@@ -1914,7 +1914,7 @@ def test_cat_empty(self):
19141914
lambda a, b: torch.cat((a, b)),
19151915
True, f_args_variable, f_args_tensor)
19161916

1917-
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
1917+
@skipIfRocm
19181918
def test_potrf(self):
19191919
root = Variable(torch.tril(torch.rand(S, S)), requires_grad=True)
19201920

@@ -2074,7 +2074,7 @@ def run_test(input_size, exponent):
20742074
run_test((10, 10), torch.zeros(10, 10))
20752075
run_test((10,), 0)
20762076

2077-
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
2077+
@skipIfRocm
20782078
def test_pinverse(self):
20792079
# Why is pinverse tested this way, and not ordinarily as other linear algebra methods?
20802080
# 1. Pseudo-inverses are not generally continuous, which means that they are not differentiable
@@ -2187,7 +2187,7 @@ def test_where_functional(self):
21872187
self._test_where_functional(lambda t: t)
21882188

21892189
@unittest.skipIf(not torch.cuda.is_available(), "CUDA unavailable")
2190-
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
2190+
@skipIfRocm
21912191
def test_where_functional_cuda(self):
21922192
self._test_where_functional(lambda t: t.cuda())
21932193

@@ -2397,15 +2397,15 @@ def f3(dt):
23972397
f(dt)
23982398

23992399
@unittest.skipIf(not torch.cuda.is_available(), "CUDA unavailable")
2400-
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
2400+
@skipIfRocm
24012401
def test_set_requires_grad_only_for_floats_cuda(self):
24022402
self._test_set_requires_grad_only_for_floats(self, True)
24032403

24042404
def test_set_requires_grad_only_for_floats(self):
24052405
self._test_set_requires_grad_only_for_floats(self, False)
24062406

24072407
@unittest.skipIf(not torch.cuda.is_available(), "CUDA unavailable")
2408-
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
2408+
@skipIfRocm
24092409
def test_rnn_backward_to_input_but_not_parameters_cuda(self):
24102410
# this checks whether it is possible to not require
24112411
# weight parameters, but require inputs, see #7722
@@ -2457,7 +2457,7 @@ def backward(ctx, gO):
24572457
out.backward()
24582458
self.assertIn('MyFunc.apply', str(w[0].message))
24592459

2460-
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
2460+
@skipIfRocm
24612461
def test_symeig_no_eigenvectors(self):
24622462
A = torch.tensor([[1., 2.], [2., 4.]], dtype=torch.float32, requires_grad=True)
24632463
w, v = torch.symeig(A, eigenvectors=False)

test/test_dataloader.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
from torch.utils.data import Dataset, TensorDataset, DataLoader, ConcatDataset
1414
from torch.utils.data.dataset import random_split
1515
from torch.utils.data.dataloader import default_collate, ExceptionWrapper, MANAGER_STATUS_CHECK_INTERVAL
16-
from common import TestCase, run_tests, TEST_NUMPY, IS_WINDOWS, NO_MULTIPROCESSING_SPAWN, TEST_WITH_ROCM
16+
from common import TestCase, run_tests, TEST_NUMPY, IS_WINDOWS, NO_MULTIPROCESSING_SPAWN, skipIfRocm
1717

1818
# We cannot import TEST_CUDA from common_nn here, because if we do that,
1919
# the TEST_CUDNN line from common_nn will be executed multiple times
@@ -335,14 +335,14 @@ def test_growing_dataset(self):
335335
self.assertEqual(len(dataloader_shuffle), 5)
336336

337337
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
338-
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
338+
@skipIfRocm
339339
def test_sequential_pin_memory(self):
340340
loader = DataLoader(self.dataset, batch_size=2, pin_memory=True)
341341
for input, target in loader:
342342
self.assertTrue(input.is_pinned())
343343
self.assertTrue(target.is_pinned())
344344

345-
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
345+
@skipIfRocm
346346
def test_multiple_dataloaders(self):
347347
loader1_it = iter(DataLoader(self.dataset, num_workers=1))
348348
loader2_it = iter(DataLoader(self.dataset, num_workers=2))
@@ -443,7 +443,7 @@ def test_batch_sampler(self):
443443
self._test_batch_sampler(num_workers=4)
444444

445445
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
446-
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
446+
@skipIfRocm
447447
def test_shuffle_pin_memory(self):
448448
loader = DataLoader(self.dataset, batch_size=2, shuffle=True, num_workers=4, pin_memory=True)
449449
for input, target in loader:
@@ -476,7 +476,7 @@ def test_error_workers(self):
476476

477477
@unittest.skipIf(IS_WINDOWS, "FIXME: stuck test")
478478
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
479-
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
479+
@skipIfRocm
480480
def test_partial_workers(self):
481481
"check that workers exit even if the iterator is not exhausted"
482482
loader = iter(DataLoader(self.dataset, batch_size=2, num_workers=4, pin_memory=True))
@@ -530,7 +530,7 @@ def _is_process_alive(pid, pname):
530530
"spawn start method is not supported in Python 2, \
531531
but we need it for creating another process with CUDA")
532532
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
533-
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
533+
@skipIfRocm
534534
def test_main_process_unclean_exit(self):
535535
'''There might be ConnectionResetError or leaked semaphore warning (due to dirty process exit), \
536536
but they are all safe to ignore'''
@@ -634,7 +634,7 @@ def setUp(self):
634634
self.dataset = StringDataset()
635635

636636
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
637-
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
637+
@skipIfRocm
638638
def test_shuffle_pin_memory(self):
639639
loader = DataLoader(self.dataset, batch_size=2, shuffle=True, num_workers=4, pin_memory=True)
640640
for batch_ndx, (s, n) in enumerate(loader):
@@ -678,7 +678,7 @@ def test_sequential_batch(self):
678678
self.assertEqual(n[1], idx + 1)
679679

680680
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
681-
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
681+
@skipIfRocm
682682
def test_pin_memory(self):
683683
loader = DataLoader(self.dataset, batch_size=2, pin_memory=True)
684684
for batch_ndx, sample in enumerate(loader):
@@ -718,7 +718,7 @@ def _run_ind_worker_queue_test(self, batch_size, num_workers):
718718
if current_worker_idx == num_workers:
719719
current_worker_idx = 0
720720

721-
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
721+
@skipIfRocm
722722
def test_ind_worker_queue(self):
723723
for batch_size in (8, 16, 32, 64):
724724
for num_workers in range(1, 6):

test/test_jit.py

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
from torch.autograd.function import traceable
1010
from torch.testing import assert_allclose
1111
from torch.onnx import OperatorExportTypes
12-
from common import TestCase, run_tests, IS_WINDOWS, TEST_WITH_UBSAN, TEST_WITH_ROCM
12+
from common import TestCase, run_tests, IS_WINDOWS, TEST_WITH_UBSAN, skipIfRocm
1313
from textwrap import dedent
1414
import os
1515
import io
@@ -401,7 +401,7 @@ def forward(self, x):
401401
# TODO: Fuser doesn't work at all when inputs require grad. Fix that
402402
@unittest.skipIf(IS_WINDOWS, "NYI: fuser support for Windows")
403403
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
404-
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
404+
@skipIfRocm
405405
def test_lstm_fusion_cuda(self):
406406
inputs = get_lstm_inputs('cuda')
407407
ge = self.checkTrace(LSTMCellF, inputs)
@@ -425,15 +425,15 @@ def test_lstm_fusion_cpu(self):
425425

426426
@unittest.skipIf(IS_WINDOWS, "NYI: fuser support for Windows")
427427
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
428-
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
428+
@skipIfRocm
429429
def test_lstm_fusion_concat(self):
430430
inputs = get_lstm_inputs('cuda')
431431
ge = self.checkTrace(LSTMCellC, inputs)
432432
self.assertExpectedGraph(ge.graph_for(*inputs))
433433

434434
@unittest.skipIf(IS_WINDOWS, "NYI: fuser support for Windows")
435435
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
436-
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
436+
@skipIfRocm
437437
def test_concat_fusion(self):
438438
hx = torch.randn(3, 20, dtype=torch.float, device='cuda')
439439
cx = torch.randn(3, 20, dtype=torch.float, device='cuda')
@@ -446,7 +446,7 @@ def foo(hx, cx):
446446

447447
@unittest.skipIf(IS_WINDOWS, "NYI: fuser support for Windows")
448448
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
449-
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
449+
@skipIfRocm
450450
def test_fusion_distribute(self):
451451
def f(x, y):
452452
z1, z2 = (x + y).chunk(2, dim=1)
@@ -491,7 +491,7 @@ def fn_test_comparison_gt_lt(x, y):
491491

492492
@unittest.skipIf(IS_WINDOWS, "NYI: fuser support for Windows")
493493
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
494-
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
494+
@skipIfRocm
495495
def test_comparison_gt_lt(self):
496496
x = torch.randn(4, 4, dtype=torch.float, device='cuda')
497497
y = torch.randn(4, 4, dtype=torch.float, device='cuda')
@@ -500,7 +500,7 @@ def test_comparison_gt_lt(self):
500500

501501
@unittest.skipIf(IS_WINDOWS, "NYI: fuser support for Windows")
502502
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
503-
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
503+
@skipIfRocm
504504
def test_comparison_ge_le(self):
505505
def f(x, y):
506506
mask = (x >= 0).type_as(x)
@@ -520,7 +520,7 @@ def fn_test_relu(x, y):
520520

521521
@unittest.skipIf(IS_WINDOWS, "NYI: fuser support for Windows")
522522
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
523-
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
523+
@skipIfRocm
524524
def test_relu(self):
525525
x = torch.randn(4, 4, dtype=torch.float, device='cuda')
526526
y = torch.randn(4, 4, dtype=torch.float, device='cuda')
@@ -543,7 +543,7 @@ def fn_test_exp(x, y):
543543

544544
@unittest.skipIf(IS_WINDOWS, "NYI: fuser support for Windows")
545545
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
546-
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
546+
@skipIfRocm
547547
def test_exp(self):
548548
x = torch.randn(4, 4, dtype=torch.float, device='cuda')
549549
y = torch.randn(4, 4, dtype=torch.float, device='cuda')
@@ -888,7 +888,7 @@ def doit(x, y):
888888

889889
@unittest.skipIf(IS_WINDOWS, "NYI: fuser support for Windows")
890890
@unittest.skipIf(not RUN_CUDA, "cpp tests require CUDA")
891-
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
891+
@skipIfRocm
892892
def test_cpp(self):
893893
# rather than rebuild assertExpected in cpp,
894894
# just glob all the cpp outputs into one file for now
@@ -1008,7 +1008,7 @@ def test_ge_optimized(self):
10081008

10091009
@unittest.skipIf(IS_WINDOWS, "NYI: fuser support for Windows")
10101010
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
1011-
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
1011+
@skipIfRocm
10121012
def test_ge_cuda(self):
10131013
self.run_ge_tests(True, True)
10141014

@@ -1045,7 +1045,7 @@ def foo(a):
10451045

10461046
@unittest.skipIf(IS_WINDOWS, "NYI: fuser support for Windows")
10471047
@unittest.skipIf(not RUN_CUDA, "calls .cuda()")
1048-
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
1048+
@skipIfRocm
10491049
def test_traced_module(self):
10501050
class Model(nn.Module):
10511051
def __init__(self, num_features, num_layers):
@@ -2804,7 +2804,7 @@ def test_tensor_number_math(self):
28042804
self._test_tensor_number_math()
28052805

28062806
@unittest.skipIf(not RUN_CUDA, "No CUDA")
2807-
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
2807+
@skipIfRocm
28082808
def test_tensor_number_math_cuda(self):
28092809
self._test_tensor_number_math(device='cuda')
28102810

test/test_optim.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
from torch.autograd import Variable
1212
from torch import sparse
1313
from torch.optim.lr_scheduler import LambdaLR, StepLR, MultiStepLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau
14-
from common import TestCase, run_tests, TEST_WITH_UBSAN, TEST_WITH_ROCM
14+
from common import TestCase, run_tests, TEST_WITH_UBSAN, skipIfRocm
1515

1616

1717
def rosenbrock(tensor):
@@ -236,7 +236,7 @@ def _build_params_dict(self, weight, bias, **kwargs):
236236
def _build_params_dict_single(self, weight, bias, **kwargs):
237237
return [dict(params=bias, **kwargs)]
238238

239-
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
239+
@skipIfRocm
240240
def test_sgd(self):
241241
self._test_rosenbrock(
242242
lambda params: optim.SGD(params, lr=1e-3),
@@ -273,7 +273,7 @@ def test_sgd_sparse(self):
273273
lambda params: optim.SGD(params, lr=5e-3)
274274
)
275275

276-
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
276+
@skipIfRocm
277277
def test_adam(self):
278278
self._test_rosenbrock(
279279
lambda params: optim.Adam(params, lr=1e-2),
@@ -311,7 +311,7 @@ def test_sparse_adam(self):
311311
with self.assertRaisesRegex(ValueError, "Invalid beta parameter at index 0: 1.0"):
312312
optim.SparseAdam(None, lr=1e-2, betas=(1.0, 0.0))
313313

314-
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
314+
@skipIfRocm
315315
def test_adadelta(self):
316316
self._test_rosenbrock(
317317
lambda params: optim.Adadelta(params),
@@ -335,7 +335,7 @@ def test_adadelta(self):
335335
with self.assertRaisesRegex(ValueError, "Invalid rho value: 1.1"):
336336
optim.Adadelta(None, lr=1e-2, rho=1.1)
337337

338-
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
338+
@skipIfRocm
339339
def test_adagrad(self):
340340
self._test_rosenbrock(
341341
lambda params: optim.Adagrad(params, lr=1e-1),
@@ -369,7 +369,7 @@ def test_adagrad_sparse(self):
369369
lambda params: optim.Adagrad(params, lr=1e-1)
370370
)
371371

372-
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
372+
@skipIfRocm
373373
def test_adamax(self):
374374
self._test_rosenbrock(
375375
lambda params: optim.Adamax(params, lr=1e-1),
@@ -394,7 +394,7 @@ def test_adamax(self):
394394
with self.assertRaisesRegex(ValueError, "Invalid beta parameter at index 1: 1.0"):
395395
optim.Adamax(None, lr=1e-2, betas=(0.0, 1.0))
396396

397-
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
397+
@skipIfRocm
398398
def test_rmsprop(self):
399399
self._test_rosenbrock(
400400
lambda params: optim.RMSprop(params, lr=1e-2),
@@ -419,7 +419,7 @@ def test_rmsprop(self):
419419
with self.assertRaisesRegex(ValueError, "Invalid momentum value: -1.0"):
420420
optim.RMSprop(None, lr=1e-2, momentum=-1.0)
421421

422-
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
422+
@skipIfRocm
423423
def test_asgd(self):
424424
self._test_rosenbrock(
425425
lambda params: optim.ASGD(params, lr=1e-3),
@@ -444,7 +444,7 @@ def test_asgd(self):
444444
with self.assertRaisesRegex(ValueError, "Invalid weight_decay value: -0.5"):
445445
optim.ASGD(None, lr=1e-2, weight_decay=-0.5)
446446

447-
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
447+
@skipIfRocm
448448
def test_rprop(self):
449449
self._test_rosenbrock(
450450
lambda params: optim.Rprop(params, lr=1e-3),
@@ -469,7 +469,7 @@ def test_rprop(self):
469469
with self.assertRaisesRegex(ValueError, "Invalid eta values: 1.0, 0.5"):
470470
optim.Rprop(None, lr=1e-2, etas=(1.0, 0.5))
471471

472-
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
472+
@skipIfRocm
473473
def test_lbfgs(self):
474474
self._test_rosenbrock(
475475
lambda params: optim.LBFGS(params),

0 commit comments

Comments
 (0)