Skip to content

re-enable tests that pass in test_cuda and test_torch. #194

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 8 commits into from
Sep 14, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
118 changes: 51 additions & 67 deletions test/test_cuda.py
Original file line number Diff line number Diff line change
Expand Up @@ -248,60 +248,47 @@ def tmp(t):
# - disable inplace test, if set to True, no inplace test will be done (default=False)
# - decorator, e.g., unittest.skipIf (default is no decorator)
tests = [
('add', small_3d, lambda t: [number(3.14, 3, t)], '', types, False,
"skipIfRocm:ByteTensor,CharTensor,HalfTensor,ShortTensor"),
('add', small_3d, lambda t: [number(3.14, 3, t)]),
('add', small_3d, lambda t: [small_3d_positive(t)], 'tensor'),
('add', small_3d, lambda t: [number(0.2, 2, t), small_3d_positive(t)], 'scalar_tensor'),
('sub', small_3d, lambda t: [number(3.14, 3, t)], '', types, False,
"skipIfRocm:ByteTensor,CharTensor,HalfTensor,ShortTensor"),
('sub', small_3d, lambda t: [number(3.14, 3, t)]),
('sub', small_3d, lambda t: [small_3d_positive(t)], 'tensor'),
('mul', small_3d, lambda t: [number(3.14, 3, t)], '', types, False,
"skipIfRocm:ByteTensor,CharTensor,HalfTensor,ShortTensor"),
"skipIfRocm:CharTensor,ShortTensor"),
('mul', small_3d, lambda t: [small_3d_positive(t)], 'tensor'),
('div', small_3d, lambda t: [number(3.14, 3, t)], '', types, False,
"skipIfRocm:ByteTensor,CharTensor,FloatTensor,HalfTensor,ShortTensor"),
('div', small_3d, lambda t: [number(3.14, 3, t)]),
('div', small_3d, lambda t: [small_3d_positive(t)], 'tensor'),
('pow', small_3d, lambda t: [number(3.14, 3, t)], None, float_types, False, "skipIfRocm:HalfTensor"),
('pow', small_3d, lambda t: [number(1., 1, t)], 'pow1', types, False, "skipIfRocm:HalfTensor"),
('pow', small_3d, lambda t: [number(2., 2, t)], 'pow2', types, False, "skipIfRocm:HalfTensor"),
('pow', small_3d, lambda t: [number(3., 3, t)], 'pow3', types, False, "skipIfRocm:HalfTensor"),
('pow', small_3d, lambda t: [number(-1., -1, t)], 'pow-1', float_types, False, "skipIfRocm:HalfTensor"),
('pow', small_3d, lambda t: [number(3.14, 3, t)], None, float_types),
('pow', small_3d, lambda t: [number(1., 1, t)], 'pow1'),
('pow', small_3d, lambda t: [number(2., 2, t)], 'pow2'),
('pow', small_3d, lambda t: [number(3., 3, t)], 'pow3'),
('pow', small_3d, lambda t: [number(-1., -1, t)], 'pow-1', float_types),
# HalfTensor gives bad result at pow-2 with data sampled from torch.randn
('pow', small_3d, lambda t: [number(-2., -2, t)], 'pow-2', float_types_no_half,
False, "skipIfRocm:HalfTensor,FloatTensor"),
('pow', small_3d, lambda t: [tensor_abs_(small_3d(t))], 'tensor', float_types, False, "skipIfRocm:HalfTensor"),
('addbmm', small_2d, lambda t: [small_3d(t), small_3d(t)], None, float_types, False, "skipIfRocm:HalfTensor"),
('addbmm', small_2d, lambda t: [number(0.4, 2, t), small_3d(t), small_3d(t)], 'scalar',
types, False, "skipIfRocm:HalfTensor"),
('addbmm', small_2d, lambda t: [number(0.5, 3, t), number(0.4, 2, t), small_3d(t), small_3d(t)], 'two_scalars',
types, False, "skipIfRocm:HalfTensor"),
('pow', small_3d, lambda t: [number(-2., -2, t)], 'pow-2', float_types_no_half, False,
"skipIfRocm:FloatTensor"),
('pow', small_3d, lambda t: [tensor_abs_(small_3d(t))], 'tensor', float_types),
('addbmm', small_2d, lambda t: [small_3d(t), small_3d(t)], None, float_types),
('addbmm', small_2d, lambda t: [number(0.4, 2, t), small_3d(t), small_3d(t)], 'scalar'),
('addbmm', small_2d, lambda t: [number(0.5, 3, t), number(0.4, 2, t), small_3d(t), small_3d(t)], 'two_scalars'),
('baddbmm', small_3d, lambda t: [small_3d(t), small_3d(t)], '', types, False, "skipIfRocm:HalfTensor"),
('baddbmm', small_3d, lambda t: [number(0.4, 2, t), small_3d(t), small_3d(t)], 'scalar',
types, False, "skipIfRocm:HalfTensor"),
('baddbmm', small_3d, lambda t: [number(0.5, 3, t), number(0.4, 2, t), small_3d(t), small_3d(t)], 'two_scalars',
types, False, "skipIfRocm:HalfTensor"),
('addcdiv', small_2d_lapack, lambda t: [tensor_mul(small_2d_lapack(t), 2), small_2d_lapack(t)], '',
types, False, "skipIfRocm:HalfTensor"),
('baddbmm', small_3d, lambda t: [number(0.4, 2, t), small_3d(t), small_3d(t)], 'scalar'),
('baddbmm', small_3d, lambda t: [number(0.5, 3, t), number(0.4, 2, t), small_3d(t), small_3d(t)], 'two_scalars'),
('addcdiv', small_2d_lapack, lambda t: [tensor_mul(small_2d_lapack(t), 2), small_2d_lapack(t)]),
('addcdiv', small_2d_lapack, lambda t: [number(2.8, 1, t), tensor_mul(small_2d_lapack(t), 2), small_2d_lapack(t)],
'scalar', types, False, "skipIfRocm:HalfTensor"),
('addcmul', small_3d, lambda t: [small_3d(t), small_3d(t)], '', types, False, "skipIfRocm:HalfTensor"),
('addcmul', small_3d, lambda t: [number(0.4, 2, t), small_3d(t), small_3d(t)], 'scalar',
types, False, "skipIfRocm:HalfTensor"),
('addmm', medium_2d, lambda t: [medium_2d(t), medium_2d(t)], '', types, False, "skipIfRocm:HalfTensor"),
('addmm', medium_2d, lambda t: [number(0.4, 2, t), medium_2d(t), medium_2d(t)], 'scalar',
types, False, "skipIfRocm:HalfTensor"),
('addmm', medium_2d, lambda t: [number(0.5, 3, t), number(0.4, 2, t), medium_2d(t), medium_2d(t)], 'two_scalars',
types, False, "skipIfRocm:HalfTensor"),
'scalar'),
('addcmul', small_3d, lambda t: [small_3d(t), small_3d(t)]),
('addcmul', small_3d, lambda t: [number(0.4, 2, t), small_3d(t), small_3d(t)], 'scalar'),
('addmm', medium_2d, lambda t: [medium_2d(t), medium_2d(t)]),
('addmm', medium_2d, lambda t: [number(0.4, 2, t), medium_2d(t), medium_2d(t)], 'scalar'),
('addmm', medium_2d, lambda t: [number(0.5, 3, t), number(0.4, 2, t), medium_2d(t), medium_2d(t)], 'two_scalars'),
('addmv', medium_1d, lambda t: [medium_2d(t), medium_1d(t)], '', types, False, "skipIfRocm:HalfTensor"),
('addmv', medium_1d, lambda t: [number(0.4, 2, t), medium_2d(t), medium_1d(t)], 'scalar',
types, False, "skipIfRocm:HalfTensor"),
('addmv', medium_1d, lambda t: [number(0.5, 3, t), number(0.4, 2, t), medium_2d(t), medium_1d(t)], 'two_scalars',
types, False, "skipIfRocm:HalfTensor"),
('addr', medium_2d, lambda t: [medium_1d(t), medium_1d(t)], '', types, False, "skipIfRocm:HalfTensor"),
('addr', medium_2d, lambda t: [number(0.4, 2, t), medium_1d(t), medium_1d(t)], 'scalar',
types, False, "skipIfRocm:HalfTensor"),
('addr', medium_2d, lambda t: [number(0.5, 3, t), number(0.4, 2, t), medium_1d(t), medium_1d(t)], 'two_scalars',
types, False, "skipIfRocm:HalfTensor"),
('addr', medium_2d, lambda t: [medium_1d(t), medium_1d(t)]),
('addr', medium_2d, lambda t: [number(0.4, 2, t), medium_1d(t), medium_1d(t)], 'scalar'),
('addr', medium_2d, lambda t: [number(0.5, 3, t), number(0.4, 2, t), medium_1d(t), medium_1d(t)], 'two_scalars'),
('atan2', medium_2d, lambda t: [medium_2d(t)], None, float_types + [torch.HalfTensor]),
('fmod', small_3d, lambda t: [3], 'value', types, False, "skipIfRocm:HalfTensor"),
('fmod', small_3d, lambda t: [small_3d_positive(t)], 'tensor'),
Expand All @@ -313,14 +300,14 @@ def tmp(t):
('clone', medium_2d, lambda t: [],),
('contiguous', medium_2d, lambda t: [],),
('cross', new_t(M, 3, M), lambda t: [new_t(M, 3, M)(t)],),
('cumprod', small_3d, lambda t: [1], '', types, False, "skipIfRocm:HalfTensor"),
('cumprod', small_3d, lambda t: [-1], 'neg_dim', types, False, "skipIfRocm:HalfTensor"),
('cumsum', small_3d, lambda t: [1], '', types, False, "skipIfRocm:HalfTensor"),
('cumsum', small_3d, lambda t: [-1], 'neg_dim', types, False, "skipIfRocm:HalfTensor"),
('cumprod', small_3d, lambda t: [1]),
('cumprod', small_3d, lambda t: [-1], 'neg_dim'),
('cumsum', small_3d, lambda t: [1]),
('cumsum', small_3d, lambda t: [-1], 'neg_dim'),
('dim', small_3d, lambda t: [],),
('dist', small_2d, lambda t: [small_2d(t)], '', types, False, "skipIfRocm:HalfTensor"),
('dist', small_2d, lambda t: [small_2d(t), 3], '3_norm', types, False, "skipIfRocm:HalfTensor"),
('dist', small_2d, lambda t: [small_2d(t), 2.5], '2_5_norm', types, False, "skipIfRocm:HalfTensor"),
('dist', small_2d, lambda t: [small_2d(t)]),
('dist', small_2d, lambda t: [small_2d(t), 3], '3_norm'),
('dist', small_2d, lambda t: [small_2d(t), 2.5], '2_5_norm'),
('dot', medium_1d, lambda t: [medium_1d(t)], '', types, False, "skipIfRocm:HalfTensor"),
('element_size', medium_1d, lambda t: [],),
('eq', small_3d_ones, lambda t: [small_3d(t)],),
Expand All @@ -331,7 +318,7 @@ def tmp(t):
('equal', small_3d_ones, lambda t: [small_3d(t)],),
('expand', new_t(M, 1, M), lambda t: [M, 4, M],),
('expand_as', new_t(M, 1, M), lambda t: [new_t(M, 4, M)(t)],),
('fill', medium_2d, lambda t: [number(3.14, 3, t)], '', types, False, "skipIfRocm:HalfTensor"),
('fill', medium_2d, lambda t: [number(3.14, 3, t)]),
('ge', medium_2d, lambda t: [medium_2d(t)],),
('le', medium_2d, lambda t: [medium_2d(t)],),
('gt', medium_2d, lambda t: [medium_2d(t)],),
Expand All @@ -345,33 +332,31 @@ def tmp(t):
('kthvalue', small_3d_unique, lambda t: [3],),
('kthvalue', small_3d_unique, lambda t: [3, 1], 'dim'),
('kthvalue', small_3d_unique, lambda t: [3, -1], 'neg_dim'),
('lerp', small_3d, lambda t: [small_3d(t), 0.3], '', types, False, "skipIfRocm:HalfTensor"),
('max', small_3d_unique, lambda t: [], '', types, False, "skipIfRocm:HalfTensor"),
('lerp', small_3d, lambda t: [small_3d(t), 0.3]),
('max', small_3d_unique, lambda t: []),
('max', small_3d_unique, lambda t: [1], 'dim', types, False, skipIfRocm),
('max', small_3d_unique, lambda t: [-1], 'neg_dim', types, False, skipIfRocm),
('max', medium_2d, lambda t: [medium_2d(t)], 'elementwise'),
('min', small_3d_unique, lambda t: [], '', types, False, "skipIfRocm:HalfTensor"),
('min', small_3d_unique, lambda t: [1], 'dim', types, False, skipIfRocm),
('min', small_3d_unique, lambda t: [-1], 'neg_dim', types, False, skipIfRocm),
('min', medium_2d, lambda t: [medium_2d(t)], 'elementwise'),
('mean', small_3d, lambda t: [], '', types, False, "skipIfRocm:HalfTensor"),
('mean', small_3d, lambda t: [-1], 'neg_dim', types, False, "skipIfRocm:DoubleTensor,FloatTensor,HalfTensor"),
('mean', small_3d, lambda t: [1], 'dim', types, False, "skipIfRocm:DoubleTensor,FloatTensor,HalfTensor"),
('mean', small_3d, lambda t: []),
('mean', small_3d, lambda t: [-1], 'neg_dim', types, False, "skipIfRocm:DoubleTensor,FloatTensor"),
('mean', small_3d, lambda t: [1], 'dim', types, False, "skipIfRocm:DoubleTensor,FloatTensor"),
('mode', small_3d, lambda t: [], '', types, False, skipIfRocm),
('mode', small_3d, lambda t: [1], 'dim', types, False, skipIfRocm),
('mode', small_3d, lambda t: [-1], 'neg_dim', types, False, skipIfRocm),
('mvlgamma', lambda t: tensor_clamp(small_2d(t), 0.1, 10), lambda t: [1], '2d_p=1', float_types_no_half,
False, "skipIfRocm:DoubleTensor,FloatTensor"),
('mvlgamma', lambda t: tensor_clamp(small_2d(t), 0.6, 10), lambda t: [2], '2d_p=2', float_types_no_half,
False, "skipIfRocm:DoubleTensor,FloatTensor"),
('mvlgamma', lambda t: tensor_clamp(small_2d(t), 0.1, 10), lambda t: [1], '2d_p=1', float_types_no_half),
('mvlgamma', lambda t: tensor_clamp(small_2d(t), 0.6, 10), lambda t: [2], '2d_p=2', float_types_no_half),
('remainder', small_3d, lambda t: [3], 'value', types, False, "skipIfRocm:HalfTensor"),
('remainder', small_3d, lambda t: [-3], 'negative_value', signed_types),
('remainder', small_3d, lambda t: [small_3d_positive(t)], 'tensor'),
('remainder', small_3d, lambda t: [constant_tensor_sub(0, small_3d_positive(t))], 'negative_tensor', signed_types),
('std', small_3d, lambda t: [], '', types, False, "skipIfRocm:HalfTensor"),
('std', small_3d, lambda t: []),
('std', small_3d, lambda t: [1], 'dim'),
('std', small_3d, lambda t: [-1], 'neg_dim'),
('var', small_3d, lambda t: [], '', types, False, "skipIfRocm:HalfTensor"),
('var', small_3d, lambda t: []),
('var', small_3d, lambda t: [1], 'dim'),
('var', small_3d, lambda t: [-1], 'neg_dim'),
('ndimension', small_3d, lambda t: [],),
Expand All @@ -380,20 +365,19 @@ def tmp(t):
('narrow', small_3d, lambda t: [1, 3, 2],),
('narrow', small_3d, lambda t: [-1, 3, 2], 'neg_dim'),
('nonzero', small_3d, lambda t: [], '', types, False, skipIfRocm),
('norm', small_3d, lambda t: [], '', types, False, "skipIfRocm:HalfTensor"),
('norm', small_3d, lambda t: [3], '3_norm', types, False, "skipIfRocm:HalfTensor"),
('norm', small_3d, lambda t: [3, 0], '3_norm_dim', types, False, "skipIfRocm:HalfTensor,DoubleTensor,FloatTensor"),
('norm', small_3d, lambda t: [3, -2], '3_norm_neg_dim', types,
False, "skipIfRocm:HalfTensor,DoubleTensor,FloatTensor"),
('norm', small_3d, lambda t: []),
('norm', small_3d, lambda t: [3], '3_norm'),
('norm', small_3d, lambda t: [3, 0], '3_norm_dim'),
('norm', small_3d, lambda t: [3, -2], '3_norm_neg_dim'),
('ones', small_3d, lambda t: [1, 2, 3, 4, 5],),
('permute', new_t(1, 2, 3, 4), lambda t: [2, 1, 3, 0],),
('put_', new_t(2, 5, 3), lambda t: [long_type(t)([[0], [-2]]), t([[3], [4]])], '', types, False, skipIfRocm),
('put_', new_t(2, 3), lambda t: [long_type(t)([]), t([])], 'empty'),
('put_', new_t(2, 2), lambda t: [long_type(t)([[1], [-3]]), t([[1], [2]]), True], 'accumulate'),
('prod', small_2d_oneish, lambda t: [], '', types, False, "skipIfRocm:HalfTensor"),
('prod', small_2d_oneish, lambda t: []),
('prod', small_3d, lambda t: [1], 'dim', types, False, skipIfRocm),
('prod', small_3d, lambda t: [-1], 'neg_dim', types, False, skipIfRocm),
('sum', small_2d, lambda t: [], '', types, False, "skipIfRocm:HalfTensor"),
('sum', small_2d, lambda t: []),
('sum', small_3d, lambda t: [1], 'dim', types, False, skipIfRocm),
('sum', small_3d, lambda t: [-1], 'neg_dim', types, False, skipIfRocm),
('renorm', small_3d, lambda t: [2, 1, 1], '2_norm', types, False, "skipIfRocm:HalfTensor,DoubleTensor,FloatTensor"),
Expand Down Expand Up @@ -424,7 +408,7 @@ def tmp(t):
('topk', small_3d_unique, lambda t: [2, 1, False, True], 'dim_sort', types, False, skipIfRocm),
('topk', small_3d_unique, lambda t: [2, -1, False, True], 'neg_dim_sort', types, False, skipIfRocm),
('topk', small_3d_unique, lambda t: [2, 1, True, True], 'dim_desc_sort', types, False, skipIfRocm),
('trace', medium_2d, lambda t: [], '', types, False, "skipIfRocm:HalfTensor"),
('trace', medium_2d, lambda t: []),
('tril', medium_2d, lambda t: [],),
('tril', medium_2d_expanded, lambda t: [], 'zero_stride', types, True),
('tril', medium_2d, lambda t: [2], 'positive'),
Expand Down
7 changes: 1 addition & 6 deletions test/test_torch.py
Original file line number Diff line number Diff line change
Expand Up @@ -1078,7 +1078,6 @@ def test_pairwise_distance_empty(self):
self.assertEqual(torch.zeros(0, device=device), torch.pairwise_distance(x, y))
self.assertEqual(torch.zeros((0, 1), device=device), torch.pairwise_distance(x, y, keepdim=True))

@skipIfRocm
def test_pdist_empty(self):
devices = ['cpu']
for device in devices:
Expand All @@ -1094,7 +1093,6 @@ def test_pdist_empty(self):
x = torch.randn(shape, device=device)
self.assertEqual(torch.zeros(3, device=device), torch.pdist(x))

@skipIfRocm
@unittest.skipIf(not TEST_SCIPY, "Scipy not found")
def test_pdist_scipy(self):
from scipy.spatial.distance import pdist
Expand Down Expand Up @@ -2277,7 +2275,6 @@ def get_int64_dtype(dtype):
dtype=int64_dtype, layout=layout, device=device, requires_grad=False),
int64_dtype, layout, device, fv + 5, False)

@skipIfRocm
def test_empty_full(self):
self._test_empty_full(self, torch.testing.get_all_dtypes(), torch.strided, torch.device('cpu'))
if torch.cuda.device_count() > 0:
Expand Down Expand Up @@ -3479,6 +3476,7 @@ def test_topk_arguments(self):
self.assertRaises(TypeError, lambda: q.topk(4, True))

@unittest.skipIf(not torch.cuda.is_available(), 'no CUDA')
@skipIfRocm
def test_topk_noncontiguous_gpu(self):
t = torch.randn(20, device="cuda")[::2]
top1, idx1 = t.topk(5)
Expand Down Expand Up @@ -3731,7 +3729,6 @@ def test_narrow(self):
self.assertEqual(x.narrow(-1, -1, 1), torch.Tensor([[2], [5], [8]]))
self.assertEqual(x.narrow(-2, -1, 1), torch.Tensor([[6, 7, 8]]))

@skipIfRocm
def test_narrow_empty(self):
devices = ['cpu'] if not torch.cuda.is_available() else ['cpu', 'cuda']
for device in devices:
Expand Down Expand Up @@ -4022,7 +4019,6 @@ def _test_gesv_batched_dims(self, cast):
self.assertEqual(x.data, cast(x_exp))

@skipIfNoLapack
@skipIfRocm
def test_gesv_batched_dims(self):
self._test_gesv_batched_dims(self, lambda t: t)

Expand Down Expand Up @@ -7496,7 +7492,6 @@ def test_serialize_device(self):
self.assertEqual(device, device_copied)

@unittest.skipIf(not torch.cuda.is_available(), 'no CUDA')
@skipIfRocm
def test_half_tensor_cuda(self):
x = torch.randn(5, 5).half()
self.assertEqual(x.cuda(), x)
Expand Down