From 3f436fb1fe1a839f6f5d981e3ec626cd840045df Mon Sep 17 00:00:00 2001 From: Leonard Lausen Date: Wed, 21 Oct 2020 21:15:28 -0600 Subject: [PATCH] Remove duplicate @with_seed decorators (#19336) * Remove duplicate @with_seed decorators function_scope_seed pytest autuse fixture defined in conftest.py already manages the seeds of every test function. * Fix * Remove @with_seed from newly introduced tests --- conftest.py | 13 +- tests/nightly/common.py | 100 ++++++++++ tests/nightly/estimator/test_estimator_cnn.py | 7 +- tests/nightly/estimator/test_sentiment_rnn.py | 7 +- tests/nightly/test_large_array.py | 3 - tests/nightly/test_large_vector.py | 3 - tests/nightly/test_np_large_array.py | 1 - tests/nightly/test_np_random.py | 11 +- tests/python/gpu/test_amp.py | 4 +- tests/python/gpu/test_fusion.py | 7 - tests/python/gpu/test_gluon_gpu.py | 19 +- tests/python/gpu/test_gluon_model_zoo_gpu.py | 3 - tests/python/gpu/test_gluon_transforms.py | 6 +- tests/python/gpu/test_kvstore_gpu.py | 2 - tests/python/gpu/test_numpy_fallback.py | 3 +- tests/python/gpu/test_numpy_op.py | 3 +- tests/python/gpu/test_operator_gpu.py | 61 +----- tests/python/mkl/test_amp.py | 2 - tests/python/mkl/test_bf16_operator.py | 12 -- tests/python/mkl/test_mkldnn.py | 24 +-- tests/python/mkl/test_subgraph.py | 1 - tests/python/unittest/common.py | 76 -------- .../python/unittest/onnx/mxnet_export_test.py | 7 - tests/python/unittest/test_autograd.py | 20 +- .../unittest/test_contrib_control_flow.py | 12 -- .../test_contrib_gluon_data_vision.py | 4 - tests/python/unittest/test_contrib_intgemm.py | 5 - .../python/unittest/test_contrib_operator.py | 5 +- .../python/unittest/test_contrib_optimizer.py | 3 +- tests/python/unittest/test_contrib_stes_op.py | 4 +- tests/python/unittest/test_dynamic_shape.py | 1 - tests/python/unittest/test_exc_handling.py | 12 -- tests/python/unittest/test_executor.py | 5 - tests/python/unittest/test_gluon.py | 121 +----------- tests/python/unittest/test_gluon_data.py | 18 -- .../python/unittest/test_gluon_data_vision.py | 21 +-- tests/python/unittest/test_gluon_model_zoo.py | 3 - .../unittest/test_gluon_probability_v1.py | 35 ---- .../unittest/test_gluon_probability_v2.py | 34 ---- tests/python/unittest/test_gluon_rnn.py | 16 +- tests/python/unittest/test_gluon_trainer.py | 13 +- .../python/unittest/test_higher_order_grad.py | 33 +--- tests/python/unittest/test_image.py | 18 +- tests/python/unittest/test_infer_type.py | 4 +- tests/python/unittest/test_kvstore.py | 12 +- tests/python/unittest/test_kvstore_custom.py | 9 +- tests/python/unittest/test_loss.py | 7 +- tests/python/unittest/test_metric.py | 2 +- tests/python/unittest/test_ndarray.py | 57 +----- .../test_numpy_contrib_gluon_data_vision.py | 5 - .../unittest/test_numpy_default_dtype.py | 2 - tests/python/unittest/test_numpy_gluon.py | 16 -- .../unittest/test_numpy_gluon_data_vision.py | 22 +-- .../unittest/test_numpy_interoperability.py | 6 +- tests/python/unittest/test_numpy_loss.py | 7 +- tests/python/unittest/test_numpy_ndarray.py | 25 +-- tests/python/unittest/test_numpy_op.py | 165 +--------------- tests/python/unittest/test_operator.py | 176 +----------------- tests/python/unittest/test_optimizer.py | 25 +-- tests/python/unittest/test_random.py | 26 +-- tests/python/unittest/test_recordio.py | 4 - tests/python/unittest/test_sparse_ndarray.py | 41 +--- tests/python/unittest/test_sparse_operator.py | 25 +-- tests/python/unittest/test_subgraph.py | 2 - tests/python/unittest/test_tvm_op.py | 2 - 65 files changed, 158 insertions(+), 1240 deletions(-) create mode 100644 tests/nightly/common.py diff --git a/conftest.py b/conftest.py index 91b34128c5b5..c8814c4d8bcc 100644 --- a/conftest.py +++ b/conftest.py @@ -204,7 +204,7 @@ def test_not_ok_with_random_data(): except: logging.warning('Unable to import numpy/mxnet. Skip setting function-level seed.') - seed_message = 'np/mx/python random seeds are set to {}, use MXNET_TEST_SEED={} to reproduce.' + seed_message = 'Setting np/mx/python random seeds to {}. Use MXNET_TEST_SEED={} to reproduce.' seed_message = seed_message.format(seed, seed) # Always log seed on DEBUG log level. This makes sure we can find out the @@ -215,12 +215,13 @@ def test_not_ok_with_random_data(): yield # run the test if request.node.rep_setup.failed: - logging.info("Setting up a test failed: {}", request.node.nodeid) + logging.error("Setting up a test failed: {}", request.node.nodeid) elif request.node.rep_call.outcome == 'failed': - # Either request.node.rep_setup.failed or request.node.rep_setup.passed - # should be True + # Either request.node.rep_setup.failed or request.node.rep_setup.passed should be True assert request.node.rep_setup.passed - # On failure also log seed on INFO log level - logging.info(seed_message) + # On failure also log seed on WARNING log level + error_message = 'Error seen with seeded test, use MXNET_TEST_SEED={} to reproduce' + error_message = error_message.format(seed) + logging.warning(error_message) random.setstate(old_state) diff --git a/tests/nightly/common.py b/tests/nightly/common.py new file mode 100644 index 000000000000..3f9e2642dbe2 --- /dev/null +++ b/tests/nightly/common.py @@ -0,0 +1,100 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import functools +import logging +import os +import random + +import mxnet as mx +import numpy as np + + +def with_seed(seed=None): + """ + A decorator for test functions that manages rng seeds. + + Parameters + ---------- + + seed : the seed to pass to np.random and mx.random + + + This tests decorator sets the np, mx and python random seeds identically + prior to each test, then outputs those seeds if the test fails or + if the test requires a fixed seed (as a reminder to make the test + more robust against random data). + + @with_seed() + def test_ok_with_random_data(): + ... + + @with_seed(1234) + def test_not_ok_with_random_data(): + ... + + Use of the @with_seed() decorator for all tests creates + tests isolation and reproducability of failures. When a + test fails, the decorator outputs the seed used. The user + can then set the environment variable MXNET_TEST_SEED to + the value reported, then rerun the test with: + + pytest --verbose --capture=no :: + + To run a test repeatedly, set MXNET_TEST_COUNT= in the environment. + To see the seeds of even the passing tests, add '--log-level=DEBUG' to pytest. + """ + def test_helper(orig_test): + @functools.wraps(orig_test) + def test_new(*args, **kwargs): + test_count = int(os.getenv('MXNET_TEST_COUNT', '1')) + env_seed_str = os.getenv('MXNET_TEST_SEED') + for i in range(test_count): + if seed is not None: + this_test_seed = seed + log_level = logging.INFO + elif env_seed_str is not None: + this_test_seed = int(env_seed_str) + log_level = logging.INFO + else: + this_test_seed = np.random.randint(0, np.iinfo(np.int32).max) + log_level = logging.DEBUG + post_test_state = np.random.get_state() + np.random.seed(this_test_seed) + mx.random.seed(this_test_seed) + random.seed(this_test_seed) + # 'pytest --logging-level=DEBUG' shows this msg even with an ensuing core dump. + test_count_msg = '{} of {}: '.format(i+1,test_count) if test_count > 1 else '' + pre_test_msg = ('{}Setting test np/mx/python random seeds, use MXNET_TEST_SEED={}' + ' to reproduce.').format(test_count_msg, this_test_seed) + on_err_test_msg = ('{}Error seen with seeded test, use MXNET_TEST_SEED={}' + ' to reproduce.').format(test_count_msg, this_test_seed) + logging.log(log_level, pre_test_msg) + try: + orig_test(*args, **kwargs) + except: + # With exceptions, repeat test_msg at WARNING level to be sure it's seen. + if log_level < logging.WARNING: + logging.warning(on_err_test_msg) + raise + finally: + # Provide test-isolation for any test having this decorator + mx.nd.waitall() + np.random.set_state(post_test_state) + return test_new + return test_helper + diff --git a/tests/nightly/estimator/test_estimator_cnn.py b/tests/nightly/estimator/test_estimator_cnn.py index b3b0d536af24..199f191bedd0 100644 --- a/tests/nightly/estimator/test_estimator_cnn.py +++ b/tests/nightly/estimator/test_estimator_cnn.py @@ -27,9 +27,6 @@ from mxnet.gluon.contrib.estimator import estimator from mxnet.gluon.model_zoo import vision -# use with_seed decorator in python/unittest/common.py -sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', 'python', 'unittest')) -from common import with_seed import pytest @@ -93,7 +90,6 @@ def get_net(model_name, context): return net, input_shape, label_shape, loss_axis -@with_seed() def test_estimator_cpu(): ''' Test estimator by doing one pass over each model with synthetic data @@ -125,8 +121,7 @@ def test_estimator_cpu(): epochs=1) -# using fixed seed to reduce flakiness in accuracy assertion -@with_seed(7) +@pytest.mark.seed(7) # using fixed seed to reduce flakiness in accuracy assertion @pytest.mark.skipif(mx.context.num_gpus() < 1, reason="skip if no GPU") def test_estimator_gpu(): ''' diff --git a/tests/nightly/estimator/test_sentiment_rnn.py b/tests/nightly/estimator/test_sentiment_rnn.py index 30f5114b2c10..12b993c90333 100644 --- a/tests/nightly/estimator/test_sentiment_rnn.py +++ b/tests/nightly/estimator/test_sentiment_rnn.py @@ -32,9 +32,6 @@ from mxnet.gluon import nn, rnn from mxnet.gluon.contrib.estimator import estimator -# use with_seed decorator in python/unittest/common.py -sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', 'python', 'unittest')) -from common import with_seed import pytest @@ -205,7 +202,6 @@ def run(net, train_dataloader, test_dataloader, num_epochs, ctx, lr): return acc -@with_seed() def test_estimator_cpu(): ''' Test estimator by doing one pass over each model with synthetic data @@ -241,8 +237,7 @@ def test_estimator_cpu(): run(net, train_dataloader, val_dataloader, num_epochs=num_epochs, ctx=ctx, lr=lr) -# using fixed seed to reduce flakiness in accuracy assertion -@with_seed(7) +@pytest.mark.seed(7) # using fixed seed to reduce flakiness in accuracy assertion @pytest.mark.skipif(mx.context.num_gpus() < 1, reason="skip if no GPU") def test_estimator_gpu(): ''' diff --git a/tests/nightly/test_large_array.py b/tests/nightly/test_large_array.py index 3bd7ac6524ff..841a0b905e2d 100644 --- a/tests/nightly/test_large_array.py +++ b/tests/nightly/test_large_array.py @@ -22,9 +22,6 @@ import numpy as np import mxnet as mx -curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) -sys.path.append(os.path.join(curr_path, '../python/unittest/')) - from mxnet.test_utils import rand_ndarray, assert_almost_equal, rand_coord_2d, default_context, check_symbolic_forward, create_2d_tensor from mxnet import gluon, nd from common import with_seed diff --git a/tests/nightly/test_large_vector.py b/tests/nightly/test_large_vector.py index e95b411974b2..01d75286b9e6 100644 --- a/tests/nightly/test_large_vector.py +++ b/tests/nightly/test_large_vector.py @@ -22,9 +22,6 @@ import numpy as np import mxnet as mx -curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) -sys.path.append(os.path.join(curr_path, '../python/unittest/')) - from mxnet.test_utils import rand_ndarray, assert_almost_equal, rand_coord_2d, create_vector from mxnet import gluon, nd from common import with_seed diff --git a/tests/nightly/test_np_large_array.py b/tests/nightly/test_np_large_array.py index 4721688e33a8..d9c99d965e3c 100644 --- a/tests/nightly/test_np_large_array.py +++ b/tests/nightly/test_np_large_array.py @@ -27,7 +27,6 @@ from mxnet.test_utils import rand_ndarray, assert_almost_equal, rand_coord_2d, default_context, check_symbolic_forward, create_2d_np_tensor, use_np from mxnet import gluon, np, npx -from common import with_seed import pytest from tests.python.unittest.common import assertRaises from mxnet.base import MXNetError diff --git a/tests/nightly/test_np_random.py b/tests/nightly/test_np_random.py index 0f7ca9c7d681..ccb74e16e6c9 100644 --- a/tests/nightly/test_np_random.py +++ b/tests/nightly/test_np_random.py @@ -16,8 +16,6 @@ # under the License. # pylint: skip-file -from __future__ import absolute_import -from __future__ import division import itertools import os import sys @@ -32,7 +30,7 @@ from mxnet import np, npx, autograd from mxnet.gluon import HybridBlock from mxnet.test_utils import same, assert_almost_equal, rand_shape_nd, rand_ndarray, use_np -from common import with_seed, retry +from common import retry from mxnet.test_utils import verify_generator, gen_buckets_probs_with_ppf, assert_exception, is_op_runnable, collapse_sum_like from mxnet.ndarray.ndarray import py_slice from mxnet.base import integer_types @@ -40,7 +38,6 @@ @retry(5) -@with_seed() @use_np def test_np_exponential(): samples = 1000000 @@ -56,7 +53,6 @@ def test_np_exponential(): @retry(5) -@with_seed() @use_np def test_np_uniform(): types = [None, "float32", "float64"] @@ -76,7 +72,6 @@ def test_np_uniform(): @retry(5) -@with_seed() @use_np def test_np_logistic(): samples = 1000000 @@ -93,7 +88,6 @@ def test_np_logistic(): @retry(5) -@with_seed() @use_np def test_np_gumbel(): samples = 1000000 @@ -109,7 +103,6 @@ def test_np_gumbel(): @retry(5) -@with_seed() @use_np def test_np_normal(): types = [None, "float32", "float64"] @@ -129,7 +122,6 @@ def test_np_normal(): @retry(5) -@with_seed() @use_np def test_np_gamma(): types = [None, "float32", "float64"] @@ -156,7 +148,6 @@ def generator_mx(x): return np.random.gamma( @retry(5) -@with_seed() @use_np def test_np_laplace(): types = [None, "float32", "float64"] diff --git a/tests/python/gpu/test_amp.py b/tests/python/gpu/test_amp.py index 237cb1ba29c8..89687bfcf823 100644 --- a/tests/python/gpu/test_amp.py +++ b/tests/python/gpu/test_amp.py @@ -31,7 +31,7 @@ from mxnet.operator import get_all_registered_operators_grouped curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) sys.path.insert(0, os.path.join(curr_path, '../unittest')) -from common import with_seed, assert_raises_cudnn_not_satisfied +from common import assert_raises_cudnn_not_satisfied sys.path.insert(0, os.path.join(curr_path, '../train')) set_default_context(mx.gpu(0)) @@ -96,7 +96,6 @@ def test_amp_coverage(amp_tests): f"python/mxnet/amp/lists/symbol_fp16.py) - please add them. " \ f"\n{guidelines}" -@with_seed() @pytest.mark.skip(reason='Error during waitall(). Tracked in #18099') @assert_raises_cudnn_not_satisfied(min_version='5.1.10') def test_amp_conversion_rnn(amp_tests): @@ -112,7 +111,6 @@ def test_amp_conversion_rnn(amp_tests): mx.test_utils.assert_almost_equal(out.asnumpy(), out2.asnumpy(), atol=1e-2, rtol=1e-2) -@with_seed() def test_fp16_casting(amp_tests): data = mx.sym.var("data") out1 = mx.sym.amp_cast(data, dtype="float16") diff --git a/tests/python/gpu/test_fusion.py b/tests/python/gpu/test_fusion.py index 8594a49032e3..4bb3a862144a 100644 --- a/tests/python/gpu/test_fusion.py +++ b/tests/python/gpu/test_fusion.py @@ -26,7 +26,6 @@ curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) sys.path.insert(0, os.path.join(curr_path, '../unittest')) -from common import with_seed def check_fused_symbol(sym, **kwargs): inputs = sym.list_inputs() @@ -250,14 +249,12 @@ def check_leakyrelu_ops(): check_fused_symbol(mx.sym.LeakyReLU(a+b, act_type='gelu'), a=arr1, b=arr2) -@with_seed() def test_fusion(): check_unary_ops() check_binary_ops() check_other_ops() check_leakyrelu_ops() -@with_seed() def test_fusion_compiler_cache(): # Stresses the internal cache of CUfunctions by creating the same kernel multiple times and # on multiple GPUs if available. @@ -276,7 +273,6 @@ def test_fusion_compiler_cache(): if num_gpus > 1: check_fused_symbol(a+b, ctx=mx.gpu(1), a=arr1, b=arr2) -@with_seed() @use_np def test_fusion_boolean_inputs(): from mxnet.gluon import HybridBlock @@ -296,7 +292,6 @@ def hybrid_forward(self, F, valid_length): out = foo(mx.np.ones((10,), ctx=mx.gpu(), dtype=np.bool)) mx.npx.waitall() -@with_seed() def test_fusion_different_dimensions(): from mxnet.gluon import HybridBlock @@ -320,7 +315,6 @@ def hybrid_forward(self, F, x): assert np.all(out.asnumpy() == np.ones((10,10))) assert out.shape == (10,10,1) -@with_seed() def test_input_reorder(): class Block(gluon.HybridBlock): def __init__(self, **kwargs): @@ -354,7 +348,6 @@ def hybrid_forward(self, F, x, y, z): for key in ['result'] + list(range(len(arg_data))): assert_allclose(arrays['0'][key].asnumpy(), arrays['1'][key].asnumpy()) -@with_seed() def test_fusion_cycle(): class Test(gluon.nn.HybridBlock): def __init__(self, **kwargs): diff --git a/tests/python/gpu/test_gluon_gpu.py b/tests/python/gpu/test_gluon_gpu.py index b47cc9e84046..777e8938f684 100644 --- a/tests/python/gpu/test_gluon_gpu.py +++ b/tests/python/gpu/test_gluon_gpu.py @@ -30,7 +30,7 @@ curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) sys.path.insert(0, os.path.join(curr_path, '../unittest')) -from common import with_seed, assert_raises_cudnn_not_satisfied, run_in_spawned_process +from common import assert_raises_cudnn_not_satisfied, run_in_spawned_process from test_gluon import * from test_loss import * from test_numpy_loss import * @@ -56,7 +56,6 @@ def check_rnn_layer(layer): assert_almost_equal(g, c) -@with_seed() def check_rnn_layer_w_rand_inputs(layer): layer.initialize(ctx=[mx.cpu(0), mx.gpu(0)]) x = mx.nd.uniform(shape=(10, 16, 30)) @@ -75,7 +74,6 @@ def check_rnn_layer_w_rand_inputs(layer): assert_almost_equal(g, c) -@with_seed() @assert_raises_cudnn_not_satisfied(min_version='7.2.1') def test_lstmp(): hidden_size, projection_size = 3, 2 @@ -129,7 +127,6 @@ def test_lstmp(): lstm_layer.load_parameters('gpu_tmp.params') -@with_seed() @assert_raises_cudnn_not_satisfied(min_version='7.2.1') def test_lstm_clip(): hidden_size, projection_size = 4096, 2048 @@ -154,7 +151,6 @@ def test_lstm_clip(): assert not np.isnan(cell_states).any() -@with_seed() @assert_raises_cudnn_not_satisfied(min_version='5.1.10') def test_rnn_layer(): check_rnn_layer(gluon.rnn.RNN(100, num_layers=3)) @@ -279,24 +275,20 @@ def check_layer_bidirectional_varseqlen(size, in_size): rtol=1e-2, atol=1e-6) -@with_seed() @assert_raises_cudnn_not_satisfied(min_version='5.1.10') def test_layer_bidirectional(): check_layer_bidirectional(7, 5, 0) -@with_seed() @assert_raises_cudnn_not_satisfied(min_version='7.2.1') def test_layer_bidirectional_proj(): check_layer_bidirectional(7, 5, 3) -@with_seed() @assert_raises_cudnn_not_satisfied(min_version='7.2.1') def test_layer_bidirectional_varseqlength(): check_layer_bidirectional_varseqlen(7, 5) -@with_seed() @assert_raises_cudnn_not_satisfied(min_version='5.1.10') def test_rnn_layer_begin_state_type(): fake_data = nd.random.uniform(shape=(3, 5, 7), dtype='float16') @@ -329,7 +321,6 @@ def test_gluon_ctc_consistency(): assert_almost_equal(cpu_data.grad, gpu_data.grad, atol=1e-3, rtol=1e-3) -@with_seed() def test_global_norm_clip_multi_device(): for check_isfinite in [True, False]: x1 = mx.nd.ones((3, 3), ctx=mx.gpu(0)) @@ -410,7 +401,6 @@ def _syncParameters(bn1, bn2, ctx): input2grad = mx.nd.concat(*[output.grad.as_in_context(input.context) for output in inputs2], dim=0) assert_almost_equal(input1.grad, input2grad, atol=1e-3, rtol=1e-3) -@with_seed() def test_sync_batchnorm(): def get_num_devices(): for i in range(100): @@ -427,7 +417,6 @@ def get_num_devices(): _check_batchnorm_result(mx.nd.random.uniform(shape=(4, 1, 4, 4)), num_devices=ndev, cuda=True) -@with_seed() def test_symbol_block_fp16(tmpdir): # Test case to verify if initializing the SymbolBlock from a model with params # other than fp32 param dtype. @@ -461,7 +450,6 @@ def test_symbol_block_fp16(tmpdir): assert np.dtype(net_fp16.params[name].dtype) == np.dtype(np.float16) -@with_seed() @pytest.mark.serial def test_large_models(): ctx = default_context() @@ -586,13 +574,11 @@ def _test_bulking(test_bulking_func): 'The fully-bulked exec time is slower than a half-bulked time by {} secs! {}' \ .format(fully_bulked_time - fastest_half_bulked_time, times_str) -@with_seed() @pytest.mark.skip(reason='skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/14970') def test_bulking_gluon_gpu(): _test_bulking(_test_bulking_in_process) -@with_seed() def test_hybridblock_mix_ctx_raise(): class FooHybrid(gluon.HybridBlock): def hybrid_forward(self, F, a, b): @@ -606,7 +592,6 @@ def hybrid_forward(self, F, a, b): pytest.raises(ValueError, lambda: foo_hybrid(mx.nd.ones((10,), ctx=mx.gpu()), mx.nd.ones((10,), ctx=mx.cpu()))) -@with_seed() def test_symbol_block_symbolic_bn_fp16_cast(): with mx.gpu(0): net = mx.gluon.nn.HybridSequential() @@ -625,7 +610,6 @@ def test_symbol_block_symbolic_bn_fp16_cast(): y1 = net(x) assert np.dtype(y1.dtype).name == 'float16' -@with_seed() def test_gemms_true_fp16(): ctx = mx.gpu(0) input = mx.nd.random.uniform(shape=(1, 512), dtype='float16', ctx=ctx) @@ -647,7 +631,6 @@ def test_gemms_true_fp16(): assert_almost_equal(ref_results.asnumpy(), results_trueFP16.asnumpy(), atol=atol, rtol=rtol) -@with_seed() def test_cudnn_dropout_reproducibility(): d = nn.Dropout(0.5) d.initialize() diff --git a/tests/python/gpu/test_gluon_model_zoo_gpu.py b/tests/python/gpu/test_gluon_model_zoo_gpu.py index 53c863fd4c32..36026894ea03 100644 --- a/tests/python/gpu/test_gluon_model_zoo_gpu.py +++ b/tests/python/gpu/test_gluon_model_zoo_gpu.py @@ -28,7 +28,6 @@ import pytest curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) sys.path.insert(0, os.path.join(curr_path, '../unittest')) -from common import with_seed def eprint(*args, **kwargs): print(*args, file=sys.stderr, **kwargs) @@ -38,7 +37,6 @@ def download_data(): return mx.test_utils.download( 'https://repo.mxnet.io/gluon/dataset/test/val-5k-256-9e70d85e0.rec', VAL_DATA) -@with_seed() @pytest.mark.serial @pytest.mark.parametrize('model_name', ['resnet50_v1', 'vgg19_bn', 'alexnet', 'densenet201', 'squeezenet1.0', 'mobilenet0.25']) def test_inference(model_name): @@ -100,7 +98,6 @@ def get_nn_model(name): # Seed 1521019752 produced a failure on the Py2 MKLDNN-GPU CI runner # on 2/16/2018 that was not reproducible. Problem could be timing related or # based on non-deterministic algo selection. -@with_seed() @pytest.mark.serial def test_training(): # We use network models without dropout for testing. diff --git a/tests/python/gpu/test_gluon_transforms.py b/tests/python/gpu/test_gluon_transforms.py index e4838b070d35..c005fbb48b25 100644 --- a/tests/python/gpu/test_gluon_transforms.py +++ b/tests/python/gpu/test_gluon_transforms.py @@ -27,22 +27,19 @@ from mxnet.test_utils import almost_equal, same curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) sys.path.insert(0, os.path.join(curr_path, '../unittest')) -from common import assertRaises, with_seed +from common import assertRaises from test_gluon_data_vision import test_to_tensor, test_normalize, test_crop_resize set_default_context(mx.gpu(0)) -@with_seed() def test_normalize_gpu(): test_normalize() -@with_seed() def test_to_tensor_gpu(): test_to_tensor() -@with_seed() def test_resize_gpu(): # Test with normal case 3D input float type data_in_3d = nd.random.uniform(0, 255, (300, 300, 3)) @@ -89,6 +86,5 @@ def py_bilinear_resize_nhwc(x, outputHeight, outputWidth): w1lambda*x[b][h1+h1p][w1+w1p][c]) return y -@with_seed() def test_crop_resize_gpu(): test_crop_resize() diff --git a/tests/python/gpu/test_kvstore_gpu.py b/tests/python/gpu/test_kvstore_gpu.py index 6c67531d6555..4a2530a8a3bb 100644 --- a/tests/python/gpu/test_kvstore_gpu.py +++ b/tests/python/gpu/test_kvstore_gpu.py @@ -24,7 +24,6 @@ from mxnet.test_utils import assert_almost_equal, default_context, environment curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) sys.path.insert(0, os.path.join(curr_path, '../unittest')) -from common import with_seed shape = (4, 4) keys = [5, 7, 11] @@ -43,7 +42,6 @@ def init_kv_with_str(stype='default', kv_type='local'): # 2. Test seed 1155716252 (module seed 1032824746) resulted in py3-mkldnn-gpu have error # src/operator/nn/mkldnn/mkldnn_base.cc:567: Check failed: similar # Both of them are not reproducible, so this test is back on random seeds. -@with_seed() @pytest.mark.skipif(mx.context.num_gpus() < 2, reason="test_rsp_push_pull needs more than 1 GPU") @pytest.mark.skip("Flaky test https://github.com/apache/incubator-mxnet/issues/14189") @pytest.mark.serial diff --git a/tests/python/gpu/test_numpy_fallback.py b/tests/python/gpu/test_numpy_fallback.py index 7faab740278b..dc367b03139c 100644 --- a/tests/python/gpu/test_numpy_fallback.py +++ b/tests/python/gpu/test_numpy_fallback.py @@ -32,7 +32,7 @@ import os curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) sys.path.insert(0, os.path.join(curr_path, '../unittest')) -from common import assertRaises, with_seed +from common import assertRaises import random from mxnet.test_utils import verify_generator, gen_buckets_probs_with_ppf from mxnet.numpy_op_signature import _get_builtin_op @@ -40,7 +40,6 @@ set_default_context(mx.gpu(0)) -@with_seed() @use_np @pytest.mark.serial def test_np_fallback_decorator(): diff --git a/tests/python/gpu/test_numpy_op.py b/tests/python/gpu/test_numpy_op.py index 48c305125366..07c7558443f0 100644 --- a/tests/python/gpu/test_numpy_op.py +++ b/tests/python/gpu/test_numpy_op.py @@ -24,11 +24,10 @@ import os curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) sys.path.insert(0, os.path.join(curr_path, '../unittest')) -from common import assertRaises, with_seed +from common import assertRaises set_default_context(mx.gpu(0)) -@with_seed() @use_np def test_np_einsum(): class TestEinsum(HybridBlock): diff --git a/tests/python/gpu/test_operator_gpu.py b/tests/python/gpu/test_operator_gpu.py index c9ab4c23b7b7..beb4e8192ef8 100644 --- a/tests/python/gpu/test_operator_gpu.py +++ b/tests/python/gpu/test_operator_gpu.py @@ -34,7 +34,7 @@ curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) sys.path.insert(0, os.path.join(curr_path, '../unittest')) -from common import with_seed, assert_raises_cudnn_not_satisfied, assert_raises_cuda_not_satisfied +from common import assert_raises_cudnn_not_satisfied, assert_raises_cuda_not_satisfied from common import run_in_spawned_process from test_operator import check_sequence_reverse, allclose_function from test_operator import * @@ -88,7 +88,6 @@ def check_countsketch(in_dim,out_dim,n): check_symbolic_backward(sym, locations, [out_grad], [a], rtol=1e-3, atol=1e-5, ctx=mx.gpu(0)) -@with_seed() @pytest.mark.serial def test_countsketch(): minindim = 40 @@ -180,7 +179,6 @@ def check_fft(shape): a = np.fft.ifft(out_grad_complex, n=None, axis=-1, norm=None) assert_almost_equal(a.real, exe.grad_arrays[0]/shape[3],rtol=1e-3, atol=1e-5) -@with_seed() def test_fft(): nrepeat = 2 maxdim = 10 @@ -204,7 +202,6 @@ def check_multi_sum_sq(dtype, shapes, ctx, tol1, tol2): dtype='float32', ctx=ctx) assert_almost_equal(ref_sum_sq.asnumpy(), sum_sq.asnumpy(), atol=tol1, rtol=tol1) -@with_seed() @pytest.mark.serial def test_multi_sum_sq(): min_nparam = 100 @@ -262,7 +259,6 @@ def check_fast_lars(w_dtype, g_dtype, shapes, ctx, tol1, tol2): ref_new_lrs[i] = lrs[i] assert_almost_equal(ref_new_lrs.asnumpy(), mx_new_lrs.asnumpy(), atol=tol2, rtol=tol2) -@with_seed() @pytest.mark.serial def test_fast_lars(): min_nparam = 50 @@ -359,7 +355,6 @@ def _assert_all_almost_equal(lhs_list, rhs_list, rtol, atol): if use_master_weights: _assert_all_almost_equal(mx_p_w32, mx_w32, 1e-5, 1e-6) -@with_seed() def test_preloaded_multi_sgd(): dtypes = ['float16', 'float32'] momentums = [None, 0.9] @@ -376,7 +371,6 @@ def test_preloaded_multi_sgd(): check_preloaded_multi_sgd(dtype, shapes, momentum, use_master_weights) -@with_seed() @pytest.mark.serial def test_batchnorm_with_type(): ctx_list_v2_2D = [ @@ -423,7 +417,6 @@ def test_batchnorm_with_type(): check_consistency(sym, ctx_list_v2_3D) -@with_seed() @pytest.mark.serial def test_batchnorm_versions(): def test_batchnorm_versions_helper(batchnorm_op_list, data, fix_gamma, use_global_stats): @@ -490,7 +483,7 @@ def test_3d_batchnorm(fix_gamma, use_global_stats): test_3d_batchnorm(True, True) -@with_seed(1234) +@pytest.mark.seed(1234) @assert_raises_cudnn_not_satisfied(min_version='5.1.10') @pytest.mark.serial def test_convolution_with_type(): @@ -535,7 +528,6 @@ def check_consistency_NxM(sym_list, ctx_list): @pytest.mark.skip(reason="test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/10141") -@with_seed() @pytest.mark.serial def test_convolution_options(): # 1D convolution @@ -603,7 +595,6 @@ def test_convolution_options(): check_consistency_NxM([sym, sym_no_cudnn], ctx_list) -@with_seed() @pytest.mark.serial def test_conv_deconv_guards(): # Test cases for convolution and deconvolution via strided fft. Ensure that the framework @@ -654,7 +645,6 @@ def _conv_with_num_streams(seed): @pytest.mark.skip(reason="skipping for now due to severe flakiness") -@with_seed() def test_convolution_multiple_streams(): for num_streams in ['1', '2']: for engine in ['NaiveEngine', 'ThreadedEngine', 'ThreadedEnginePerDevice']: @@ -666,7 +656,6 @@ def test_convolution_multiple_streams(): # This test is designed to expose an issue with cudnn v7.1.4 algo find() when invoked with large c. # Algos returned by find() can fail to run with grad_req='add' (wgrad kernel beta parameter == 1.0f). -@with_seed() @pytest.mark.serial def test_convolution_large_c(): problematic_c = 64 * 1024 @@ -696,7 +685,6 @@ def test_2D_with_width(width, grad_req): # This test is designed to expose an issue with cudnn v7.1.4 algo find() when invoked with large c. # Algos returned by find() can fail to run with grad_req='add' (wgrad kernel beta parameter == 1.0f). -@with_seed() @pytest.mark.serial def test_deconvolution_large_c(): problematic_c = 64 * 1024 @@ -724,7 +712,6 @@ def test_2D_with_width(width, grad_req): test_2D_with_width(width, req) -@with_seed() @pytest.mark.serial def test_convolution_versions(): # 2D convolution NCHW @@ -749,7 +736,6 @@ def test_convolution_versions(): # More max-pooling strides and pads to test cudnn pooling implementation code paths -@with_seed() @pytest.mark.serial def test_pooling_nhwc_with_convention(): def make_pooling_syms(**kwargs): @@ -800,7 +786,6 @@ def test_pooling_with_type(): check_consistency(sym, ctx_list, rand_type=np.float16) -@with_seed() @pytest.mark.serial def test_deconvolution_with_type(): # Test basic deconvolution without exercising stride, pad or dilation. @@ -837,7 +822,6 @@ def test_deconvolution_with_type(): check_consistency(sym, ctx_list, rtol=tol, atol=tol, grad_req="add") -@with_seed() @pytest.mark.serial def test_deconvolution_options(): @@ -894,7 +878,7 @@ def test_deconvolution_options(): # check_consistency_NxM([sym, sym_no_cudnn], ctx_list) -@with_seed(1234) +@pytest.mark.seed(1234) def test_bilinear_sampler_with_type(): data = mx.sym.Variable('data') grid = mx.sym.Variable('grid') @@ -913,7 +897,6 @@ def test_bilinear_sampler_with_type(): check_consistency(sym, ctx_list, grad_req="add") -@with_seed() def test_grid_generator_with_type(): data = mx.sym.Variable('data') sym = mx.sym.GridGenerator(data=data, transform_type='affine', target_shape=(20, 20)) @@ -929,7 +912,6 @@ def test_grid_generator_with_type(): check_consistency(sym, ctx_list, grad_req="add") -@with_seed() def test_spatial_transformer_with_type(): data = mx.sym.Variable('data') loc = mx.sym.Flatten(data) @@ -947,7 +929,6 @@ def test_spatial_transformer_with_type(): check_consistency(sym, ctx_list) check_consistency(sym, ctx_list, grad_req="add") -@with_seed() def test_pooling_with_type2(): # While the float32 and float64 output is reliably consistent, float16 departs occasionally. # We compare cpu and gpu results only within a given precision. @@ -967,7 +948,6 @@ def test_pooling_with_type2(): sym = mx.sym.Pooling(name='pool', kernel=(3,3), pad=(1,1), pool_type='sum') check_consistency(sym, ctx_list) -@with_seed() def test_pooling_nhwc_with_type(): def make_pooling_syms(**kwargs): # Conventional NCHW layout pooling @@ -994,7 +974,6 @@ def make_pooling_syms(**kwargs): check_consistency_NxM(symlist, ctx_list) -@with_seed() @pytest.mark.serial def test_pooling_versions(): @@ -1137,7 +1116,6 @@ def test_pooling_dim(dim, pool_type, dtype, pool_op_list, p_value=2, count_inclu test_pooling_dim(dim, 'lp', dtype, std_pool_op_list, p_value=3) -@with_seed() def test_pooling_full_2d(): def test_pooling_full_2d_type(pool_type): data = (2, 2, 10, 10) @@ -1166,7 +1144,6 @@ def test_pooling_full_2d_type(pool_type): test_pooling_full_2d_type('sum') -@with_seed() @pytest.mark.serial def test_flatten_slice_after_conv(): ctx_list = [] @@ -1181,7 +1158,6 @@ def test_flatten_slice_after_conv(): check_consistency(slice_sym, ctx_list, scale=0.5) -@with_seed() def test_bilinear_resize_op(): ctx_list = [{'ctx': mx.cpu(0), 'data': (2, 2, 20, 20), 'type_dict': {'data': np.float32}}, {'ctx': mx.gpu(0), 'data': (2, 2, 20, 20), 'type_dict': {'data': np.float32}}] @@ -1205,7 +1181,6 @@ def test_bilinear_resize_op(): sym = mx.sym.contrib.BilinearResize2D(data, None, scale_height=0.5, scale_width=2, mode='to_even_up', align_corners=False) check_consistency(sym, ctx_list) -@with_seed() @pytest.mark.serial def test_global_pooling(): def test_1d_pooling(pool_type, p_value=2): @@ -1322,7 +1297,6 @@ def test_2d_pooling(pool_type, p_value=2): test_2d_pooling('lp', p_value=3) -@with_seed() def test_upsampling_with_type(): sym = mx.sym.UpSampling(scale=2, num_filter=2, name='up', sample_type='nearest', num_args=1) ctx_list = [{'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float64}}, @@ -1333,7 +1307,6 @@ def test_upsampling_with_type(): check_consistency(sym, ctx_list) -@with_seed() def test_upsampling_bilinear_with_type(): sym = mx.sym.UpSampling(scale=2, num_filter=2, name='up', sample_type='bilinear', num_args=1) ctx_list = [{'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float64}}, @@ -1344,7 +1317,6 @@ def test_upsampling_bilinear_with_type(): check_consistency(sym, ctx_list) -@with_seed() def test_concat_with_type(): sym = mx.sym.Concat(name='concat', num_args=2) ctx_list = [{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10), @@ -1360,7 +1332,6 @@ def test_concat_with_type(): check_consistency(sym, ctx_list) -@with_seed() def test_elementwisesum_with_type(): dev_types = [[mx.gpu(0), [np.float64, np.float32, np.float16]], [mx.cpu(0), [np.float64, np.float32]] ] @@ -1382,7 +1353,6 @@ def test_elementwisesum_with_type(): check_consistency(sym, ctx_list) -@with_seed() def test_reshape_with_type(): sym = mx.sym.Reshape(name='reshape', shape=(-1,1,1,0)) ctx_list = [{'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float64}}, @@ -1393,7 +1363,6 @@ def test_reshape_with_type(): check_consistency(sym, ctx_list) -@with_seed() def test_blockgrad_with_type(): sym = mx.sym.BlockGrad(name='bg') ctx_list = [{'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float64}}, @@ -1404,7 +1373,6 @@ def test_blockgrad_with_type(): check_consistency(sym, ctx_list) -@with_seed() def test_swapaxis_with_type(): sym = mx.sym.SwapAxis(name='swap', dim1=1) ctx_list = [{'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float64}}, @@ -1415,7 +1383,6 @@ def test_swapaxis_with_type(): check_consistency(sym, ctx_list) -@with_seed() def test_fullyconnected_with_type(): sym = mx.sym.FullyConnected(num_hidden=3, name='inner') ctx_list = [{'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float64}}, @@ -1431,7 +1398,6 @@ def test_fullyconnected_with_type(): check_consistency(sym, ctx_list) -@with_seed() def test_activation_with_type(): act_types = ['relu', 'sigmoid', 'tanh', 'softrelu', 'softsign'] shape = (2, 2, 10, 10) @@ -1446,7 +1412,6 @@ def test_activation_with_type(): check_consistency(sym, ctx_list) -@with_seed() def test_lrn(): sym = mx.sym.LRN(alpha=0.0001, beta=0.75, knorm=2, nsize=5, name='lrn') ctx_list = [{'ctx': mx.gpu(0), 'lrn_data': (2, 6, 10, 10), 'type_dict': {'lrn_data': np.float32}}, @@ -1454,7 +1419,6 @@ def test_lrn(): check_consistency(sym, ctx_list) -@with_seed() @pytest.mark.skipif(os.environ.get('MXNET_ENGINE_TYPE') == 'NaiveEngine', reason="Testing with naive engine consistently triggers illegal memory access. Tracked in #17713") def test_embedding_with_type(): @@ -1485,7 +1449,6 @@ def test_embedding_helper(data_types, weight_types, low_pad, high_pad): test_embedding_helper(data_types, weight_types, 0, 5) -@with_seed() def test_take_with_type(): sym = mx.sym.take(name='take') for safe_accumulation in ['0', '1', None]: @@ -1574,7 +1537,6 @@ def test_take_with_type(): grad_req={'take_indices': 'null','take_a': 'write'}, arg_params=arg_params) -@with_seed() @pytest.mark.serial def test_psroipooling_with_type(): arg_params = { @@ -1600,7 +1562,6 @@ def test_psroipooling_with_type(): 'psroipool_rois': 'null'}, arg_params=arg_params) -@with_seed() @pytest.mark.serial def test_deformable_psroipooling_with_type(): tol = {np.dtype(np.float32): 1e-1, @@ -1658,7 +1619,6 @@ def test_deformable_psroipooling_with_type(): 'deformable_psroipool_trans': 'write'}, arg_params=arg_params) -@with_seed() @pytest.mark.serial def test_deformable_convolution_with_type(): tol = {np.dtype(np.float32): 1e-1, @@ -1693,7 +1653,6 @@ def test_deformable_convolution_with_type(): 'deformable_conv_bias': 'null'}) -@with_seed() def test_deformable_convolution_options(): tol = {np.dtype(np.float32): 1e-1, np.dtype(np.float64): 1e-3} @@ -1819,13 +1778,11 @@ def check_rnn_layer_w_rand_inputs(layer): for g, c in zip(gs, cs): assert_almost_equal(g, c, rtol=1e-2, atol=1e-6) -@with_seed() @pytest.mark.serial def test_sequence_reverse(): check_sequence_reverse(mx.gpu(0)) -@with_seed() @pytest.mark.serial def test_autograd_save_memory(): x = mx.nd.zeros((128, 512, 512), ctx=mx.gpu(0)) @@ -1838,7 +1795,6 @@ def test_autograd_save_memory(): x.backward() -@with_seed() @pytest.mark.serial def test_cuda_rtc(): source = r''' @@ -1869,7 +1825,6 @@ def test_cuda_rtc(): assert (y.asnumpy() == 12).all() -@with_seed() @pytest.mark.serial def test_cross_device_autograd(): x = mx.nd.random.uniform(shape=(10,)) @@ -1897,7 +1852,6 @@ def test_cross_device_autograd(): assert_almost_equal(dx, x.grad) -@with_seed() @pytest.mark.serial def test_multi_proposal_op(): # paramters @@ -2033,7 +1987,6 @@ def test_incorrect_gpu(): # Try setting dev_id to a really big number pytest.raises(MXNetError, mx.nd.ones, (2,2), ctx=mx.gpu(100001)) -@with_seed() def test_batchnorm_backwards_notrain(): for ctx in [mx.cpu(0), mx.gpu(0)]: for cudnn_o in [False, True]: @@ -2051,7 +2004,6 @@ def test_batchnorm_backwards_notrain(): loss=y.square().sum() loss.backward(train_mode=False) -@with_seed() def test_create_sparse_ndarray_gpu_to_cpu(): dim0 = 10 dim1 = 5 @@ -2069,7 +2021,6 @@ def test_create_sparse_ndarray_gpu_to_cpu(): assert(same(rsp_copy.asnumpy(), rsp_created.asnumpy())) -@with_seed() def test_softmax_activation(): gpu_a = mx.nd.array([[3., 0.5, -0.5, 2., 7.], [2., -.4, 7., 3., 0.2]], ctx=mx.gpu(0)) @@ -2088,7 +2039,6 @@ def test_softmax_activation(): assert_almost_equal(cpu_a.grad, gpu_a.grad, atol = 1e-3, rtol = 1e-3) -@with_seed() @pytest.mark.serial @pytest.mark.serial def test_bilinear_sampler_versions(): @@ -2192,7 +2142,6 @@ def _test_bulking_in_process(seed, time_per_iteration): time_per_iteration.value = (time.time() - start) / num_iterations -@with_seed() @pytest.mark.skip(reason='skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/16517') def test_bulking_operator_gpu(): _test_bulking(_test_bulking_in_process) @@ -2236,7 +2185,6 @@ def test_bulking(): .format(fully_bulked_time - fastest_half_bulked_time, times_str) -@with_seed() @pytest.mark.serial def test_allclose_function_gpu(): allclose_function([mx.cpu(), mx.gpu(0)]) @@ -2282,7 +2230,6 @@ def run_math(op, shape, dtype="float32", check_value=True): elif op == 'square': math_square(shape=shape, dtype=dtype, check_value=check_value) -@with_seed() @pytest.mark.serial def test_math(): ops = ['log', 'erf', 'square'] @@ -2294,7 +2241,6 @@ def test_math(): for op in ops: run_math(op, shape, dtype, check_value=check_value) -@with_seed() @pytest.mark.serial def test_arange_like_dtype(): dtypes = [np.float16, np.float32, np.float64] @@ -2320,7 +2266,6 @@ def test_fp16_spmm(): out_np = mx.nd.dot(inp, weight) assert_almost_equal(out.asnumpy(), out_np, rtol=1e-3, atol=1e-5) -@with_seed() @pytest.mark.serial @pytest.mark.parametrize('dtype', ["float16", "float32", "float64"]) def test_split_v2_fwd(dtype): diff --git a/tests/python/mkl/test_amp.py b/tests/python/mkl/test_amp.py index 52b62865b167..2b67a41fc73b 100644 --- a/tests/python/mkl/test_amp.py +++ b/tests/python/mkl/test_amp.py @@ -30,7 +30,6 @@ from mxnet.gluon import SymbolBlock, nn, rnn curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) sys.path.insert(0, os.path.join(curr_path, '../unittest')) -from common import with_seed bfloat16 = np.dtype([('bfloat16', np.uint16)]) @@ -94,7 +93,6 @@ def test_amp_coverage(): - If you are not sure which list to choose, FP32_FUNCS is the safest option""") -@with_seed() def test_bf16_casting(): data = mx.sym.var("data") out1 = mx.sym.amp_cast(data, dtype=bfloat16) diff --git a/tests/python/mkl/test_bf16_operator.py b/tests/python/mkl/test_bf16_operator.py index 5cd9dbc09822..14eaa1a2b287 100644 --- a/tests/python/mkl/test_bf16_operator.py +++ b/tests/python/mkl/test_bf16_operator.py @@ -30,7 +30,6 @@ from mxnet.gluon import SymbolBlock, nn, rnn curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) sys.path.insert(0, os.path.join(curr_path, '../unittest')) -from common import with_seed import pytest bfloat16 = np.dtype([('bfloat16', np.uint16)]) @@ -115,7 +114,6 @@ def check_operator_accuracy(sym_fp32, sym_bf16, data_shape, num_input_data=1, bf output_bf16_2_fp32 = mx.nd.amp_cast(output_bf16, dtype="float32") assert_almost_equal_with_err(output_bf16_2_fp32, output_fp32, rtol=rtol, atol=atol, etol=etol) -@with_seed() def test_bf16_bn(): data_sym_fp32 = mx.sym.Variable(name='data') data_sym_bf16 = mx.sym.Variable(name='data', dtype=bfloat16) @@ -127,7 +125,6 @@ def test_bf16_bn(): check_operator_accuracy(sym_fp32=bn_fp32, sym_bf16=bn_bf16, data_shape=(3, 32, 28, 28), bf16_use_fp32_params=True, etol=1e-2) check_operator_accuracy(sym_fp32=bn_fp32, sym_bf16=bn_bf16, data_shape=(32, 16, 64, 64), bf16_use_fp32_params=True, etol=1e-2) -@with_seed() def test_bf16_conv(): data_sym_fp32 = mx.sym.Variable(name='data') data_sym_bf16 = mx.sym.Variable(name='data', dtype=bfloat16) @@ -144,7 +141,6 @@ def test_bf16_conv(): check_operator_accuracy(sym_fp32=conv_fp32, sym_bf16=conv_bf16, data_shape=(3, 32, 28, 28), bf16_use_fp32_params=False) check_operator_accuracy(sym_fp32=conv_fp32, sym_bf16=conv_bf16, data_shape=(128, 56, 14, 14), bf16_use_fp32_params=False) -@with_seed() def test_bf16_fc(): data_sym_fp32 = mx.sym.Variable(name='data') data_sym_bf16 = mx.sym.Variable(name='data', dtype=bfloat16) @@ -159,7 +155,6 @@ def test_bf16_fc(): fc_bf16 = mx.sym.FullyConnected(data_sym_bf16, **fc_params) check_operator_accuracy(fc_fp32, fc_bf16, data_shape=(3, 3, 16, 16), bf16_use_fp32_params=False) -@with_seed() def test_bf16_pooling(): pool_params = {"kernel": (3, 3), "stride": (1, 1), "pad": (0, 0), "name": "pool"} data_shapes = [(3, 16, 28, 28), (3, 32, 7, 7)] @@ -174,7 +169,6 @@ def test_bf16_pooling(): pool_bf16 = mx.sym.Pooling(data_sym_bf16, **pool_params) check_operator_accuracy(pool_fp32, pool_bf16, data_shape=new_params[0], bf16_use_fp32_params=False) -@with_seed() def test_bf16_activation(): data_sym_fp32 = mx.sym.Variable(name='data') data_sym_bf16 = mx.sym.Variable(name='data', dtype=bfloat16) @@ -187,7 +181,6 @@ def test_bf16_activation(): check_operator_accuracy(act_fp32, act_bf16, data_shape, bf16_use_fp32_params=True) -@with_seed() def test_bf16_elemwiseadd(): dshape = rand_shape_nd(4) @@ -202,7 +195,6 @@ def test_bf16_elemwiseadd(): check_operator_accuracy(sym_fp32, sym_bf16, dshape, num_input_data=2, bf16_use_fp32_params=True) @pytest.mark.skip(reason="env dependent, need check further.") -@with_seed() def test_bf16_concat(): dshape = rand_shape_nd(4) a_shape = tuple(dshape) @@ -220,7 +212,6 @@ def test_bf16_concat(): check_operator_accuracy(concat_sym_fp32, concat_sym_bf16, dshape, num_input_data=2, bf16_use_fp32_params=True) -@with_seed() def test_bf16_abs(): dshapes = [(16,), (3, 16), (3, 16, 16), (3, 16, 16, 16)] for data_shape in dshapes: @@ -231,7 +222,6 @@ def test_bf16_abs(): check_operator_accuracy(sym_fp32, sym_bf16, data_shape, bf16_use_fp32_params=True) -@with_seed() def test_bf16_sqrt(): dshapes = [(16,), (3, 16), (3, 16, 16), (3, 16, 16, 16)] for data_shape in dshapes: @@ -242,7 +232,6 @@ def test_bf16_sqrt(): check_operator_accuracy(sym_fp32, sym_bf16, data_shape, bf16_use_fp32_params=True) -@with_seed() def test_bf16_square(): dshapes = [(16,), (3, 16), (3, 16, 16), (3, 16, 16, 16)] for data_shape in dshapes: @@ -253,7 +242,6 @@ def test_bf16_square(): check_operator_accuracy(sym_fp32, sym_bf16, data_shape, bf16_use_fp32_params=True) -@with_seed() def test_bf16_flatten_slice_after_conv(): data_fp32 = mx.symbol.Variable('data') data_bf16 = mx.symbol.Variable('data', dtype=bfloat16) diff --git a/tests/python/mkl/test_mkldnn.py b/tests/python/mkl/test_mkldnn.py index 115baea380f2..4c11cecdc3ce 100644 --- a/tests/python/mkl/test_mkldnn.py +++ b/tests/python/mkl/test_mkldnn.py @@ -29,9 +29,8 @@ from mxnet.test_utils import * curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) sys.path.append(os.path.join(curr_path, '../unittest/')) -from common import with_seed -@with_seed(1234) +@pytest.mark.seed(1234) def test_mkldnn_ndarray_slice(): ctx = mx.cpu() net = gluon.nn.HybridSequential() @@ -43,7 +42,7 @@ def test_mkldnn_ndarray_slice(): # trigger computation on ndarray slice assert_almost_equal(y[0].asnumpy()[0, 0, 0], np.array(0.056331709)) -@with_seed(1234) +@pytest.mark.seed(1234) def test_mkldnn_engine_threading(): net = gluon.nn.HybridSequential() net.add(gluon.nn.Conv2D(channels=32, kernel_size=3, activation=None)) @@ -68,7 +67,6 @@ def __getitem__(self, key): assert_almost_equal(y[0, 0, 0, 0], np.array(0.056331709)) break -@with_seed() def test_mkldnn_reshape(): def test_reshape_after_conv(dst_shape): shape = (1,1,4,4) @@ -100,7 +98,6 @@ def test_reshape_after_conv(dst_shape): test_reshape_after_conv(test_case) -@with_seed() def test_reshape_before_conv(): class Net(gluon.HybridBlock): """ @@ -133,7 +130,6 @@ def hybrid_forward(self, F, x, *args, **kwargs): assert_almost_equal(out1, out2, rtol=1e-5, atol=1e-6) -@with_seed() def test_slice_before_conv(): class Net(gluon.HybridBlock): """ @@ -166,7 +162,6 @@ def hybrid_forward(self, F, x, *args, **kwargs): assert_almost_equal(out1, out2, rtol=1e-5, atol=1e-6) -@with_seed() def test_slice_reshape_before_conv(): class Net(gluon.HybridBlock): """ @@ -199,7 +194,6 @@ def hybrid_forward(self, F, x, *args, **kwargs): assert_almost_equal(out1, out2, rtol=1e-5, atol=1e-6) -@with_seed() def test_flatten_slice_after_conv(): data = mx.symbol.Variable('data') weight = mx.symbol.Variable('weight') @@ -255,7 +249,6 @@ def test_mkldnn_sum_inplace_with_cpu_layout(): assert_almost_equal(out[0].asnumpy()[0, 0, 0], 1.0) -@with_seed() def test_batchnorm(): def check_batchnorm_training(stype): for shape in [(2, 3), (2, 3, 2, 2)]: @@ -281,7 +274,6 @@ def check_batchnorm_training(stype): for stype in stypes: check_batchnorm_training(stype) -@with_seed() def test_batchnorm_relu_fusion(): def check_batchnorm_relu_fusion(shape): x = mx.sym.Variable('x') @@ -341,7 +333,6 @@ def forward(self, x): check_batchnorm_relu_fusion_gluon((1, 3, 224, 224)) check_batchnorm_relu_fusion_gluon((8, 3, 224, 224)) -@with_seed() def test_softmax(): def check_softmax_training(stype): for shape in [(2, 3), (2, 3, 2, 2)]: @@ -358,7 +349,6 @@ def check_softmax_training(stype): check_softmax_training(stype) -@with_seed() def test_pooling(): def check_pooling_training(stype): for shape in [(3, 3, 10), (3, 3, 20, 20), (3, 3, 10, 20, 20)]: @@ -381,7 +371,6 @@ def check_pooling_training(stype): check_pooling_training(stype) -@with_seed() def test_activation(): def check_activation_training(stype): for shape in [(2, 3, 3), (2, 3, 2, 2)]: @@ -402,7 +391,6 @@ def check_activation_training(stype): check_activation_training(stype) -@with_seed() def test_convolution(): def check_convolution_training(stype): for shape in [(3, 3, 10), (3, 3, 10, 10), (3, 3, 10, 10, 10)]: @@ -430,7 +418,6 @@ def check_convolution_training(stype): check_convolution_training(stype) -@with_seed() @pytest.mark.skip(reason="Flaky test https://github.com/apache/incubator-mxnet/issues/12579") def test_Deconvolution(): def check_Deconvolution_training(stype): @@ -456,7 +443,6 @@ def check_Deconvolution_training(stype): check_Deconvolution_training(stype) -@with_seed() def test_LRN(): def check_LRN_training(stype): for shape in [(3, 4, 5, 5)]: @@ -472,7 +458,6 @@ def check_LRN_training(stype): check_LRN_training(stype) -@with_seed() def test_fullyconnected(): def check_fullyconnected_training(stype): data_shape = rand_shape_nd(2) @@ -505,7 +490,6 @@ def softmax_forward(input_data, true_output): softmax_forward(mx.nd.array([[[[-3.4e38,-3.4e38]]]]), np.array([1.0,1.0])) softmax_forward(mx.nd.array([[[[3.4e38,3.4e38]]]]), np.array([1.0,1.0])) -@with_seed() def test_non_mkldnn_fcomputeex(): # test special case where MKLDNN formatted NDArray feeds into non-mkldnn fcomputeex operator # conv is example where MKLDNN NDArray is created from regular NDArrays @@ -549,7 +533,6 @@ def backward(self, req, out_grad, in_data, out_data, in_grad, aux): exec1 = custom._bind(mx.cpu(), args={'data': mx.nd.ones([10,3,96,96]), 'conv_weight': mx.nd.ones([8,3,5,5])}) exec1.forward()[0].wait_to_read() -@with_seed() def test_conv_transpose(): axes = [(0,2,1,3), (0,2,3,1), (1,2,3,0), (3,2,1,0)] a = np.random.rand(10, 16, 50, 50) @@ -566,7 +549,6 @@ def test_conv_transpose(): # This test case is contributed by @awsbillz in https://github.com/apache/incubator-mxnet/issues/14766 -@with_seed() def test_reshape_transpose_6d(): class Reshape2D(gluon.HybridBlock): def __init__(self, factor): @@ -601,7 +583,6 @@ def hybrid_forward(self, F, x): output = net(data) a = output.asnumpy() -@with_seed() def test_concat(): def ref_concat(a, b, axis): return np.concatenate((a, b), axis=axis) @@ -636,7 +617,6 @@ def check_concat_training(stype): for stype in stypes: check_concat_training(stype) -@with_seed() def test_elemwise_add(): def ref_add(a, b): return np.add(a, b) diff --git a/tests/python/mkl/test_subgraph.py b/tests/python/mkl/test_subgraph.py index ca68d6069390..4a10cef71467 100644 --- a/tests/python/mkl/test_subgraph.py +++ b/tests/python/mkl/test_subgraph.py @@ -21,7 +21,6 @@ import numpy as np import unittest import ctypes -from common import with_seed import pytest def test_float64_fallback(): diff --git a/tests/python/unittest/common.py b/tests/python/unittest/common.py index e13082c652a5..7566b4bbc7d2 100644 --- a/tests/python/unittest/common.py +++ b/tests/python/unittest/common.py @@ -160,82 +160,6 @@ def assert_raises_cuda_not_satisfied(min_version): }) -def with_seed(seed=None): - """ - A decorator for test functions that manages rng seeds. - - Parameters - ---------- - - seed : the seed to pass to np.random and mx.random - - - This tests decorator sets the np, mx and python random seeds identically - prior to each test, then outputs those seeds if the test fails or - if the test requires a fixed seed (as a reminder to make the test - more robust against random data). - - @with_seed() - def test_ok_with_random_data(): - ... - - @with_seed(1234) - def test_not_ok_with_random_data(): - ... - - Use of the @with_seed() decorator for all tests creates - tests isolation and reproducability of failures. When a - test fails, the decorator outputs the seed used. The user - can then set the environment variable MXNET_TEST_SEED to - the value reported, then rerun the test with: - - pytest --verbose --capture=no :: - - To run a test repeatedly, set MXNET_TEST_COUNT= in the environment. - To see the seeds of even the passing tests, add '--log-level=DEBUG' to pytest. - """ - def test_helper(orig_test): - @functools.wraps(orig_test) - def test_new(*args, **kwargs): - test_count = int(os.getenv('MXNET_TEST_COUNT', '1')) - env_seed_str = os.getenv('MXNET_TEST_SEED') - for i in range(test_count): - if seed is not None: - this_test_seed = seed - log_level = logging.INFO - elif env_seed_str is not None: - this_test_seed = int(env_seed_str) - log_level = logging.INFO - else: - this_test_seed = np.random.randint(0, np.iinfo(np.int32).max) - log_level = logging.DEBUG - post_test_state = np.random.get_state() - np.random.seed(this_test_seed) - mx.random.seed(this_test_seed) - random.seed(this_test_seed) - logger = default_logger() - # 'pytest --logging-level=DEBUG' shows this msg even with an ensuing core dump. - test_count_msg = '{} of {}: '.format(i+1,test_count) if test_count > 1 else '' - pre_test_msg = ('{}Setting test np/mx/python random seeds, use MXNET_TEST_SEED={}' - ' to reproduce.').format(test_count_msg, this_test_seed) - on_err_test_msg = ('{}Error seen with seeded test, use MXNET_TEST_SEED={}' - ' to reproduce.').format(test_count_msg, this_test_seed) - logger.log(log_level, pre_test_msg) - try: - orig_test(*args, **kwargs) - except: - # With exceptions, repeat test_msg at WARNING level to be sure it's seen. - if log_level < logging.WARNING: - logger.warning(on_err_test_msg) - raise - finally: - # Provide test-isolation for any test having this decorator - mx.nd.waitall() - np.random.set_state(post_test_state) - return test_new - return test_helper - - def with_environment(*args_): """ Helper function that takes a dictionary of environment variables and their diff --git a/tests/python/unittest/onnx/mxnet_export_test.py b/tests/python/unittest/onnx/mxnet_export_test.py index 1f402090e3b7..baa74768e612 100644 --- a/tests/python/unittest/onnx/mxnet_export_test.py +++ b/tests/python/unittest/onnx/mxnet_export_test.py @@ -23,7 +23,6 @@ import tempfile curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) sys.path.insert(0, os.path.join(curr_path, '..')) -from common import with_seed from mxnet import nd, sym from mxnet.test_utils import set_default_context from mxnet.gluon import nn @@ -96,13 +95,11 @@ class TestExport(unittest.TestCase): def setUp(self): set_default_context(mx.cpu(0)) - @with_seed() def test_onnx_export_single_output(self): net = nn.HybridSequential() net.add(nn.Dense(100, activation='relu'), nn.Dense(10)) _check_onnx_export(net) - @with_seed() def test_onnx_export_multi_output(self): class MultiOutputBlock(nn.HybridBlock): def __init__(self): @@ -119,25 +116,21 @@ def hybrid_forward(self, F, x): assert len(sym.Group(net(sym.Variable('data'))).list_outputs()) == 10 _check_onnx_export(net, group_outputs=True) - @with_seed() def test_onnx_export_list_shape(self): net = nn.HybridSequential() net.add(nn.Dense(100, activation='relu'), nn.Dense(10)) _check_onnx_export(net, shape_type=list) - @with_seed() def test_onnx_export_extra_params(self): net = nn.HybridSequential() net.add(nn.Dense(100, activation='relu'), nn.Dense(10)) _check_onnx_export(net, extra_params={'extra_param': nd.array([1, 2])}) - @with_seed() def test_onnx_export_slice(self): net = nn.HybridSequential() net.add(nn.Dense(100, activation='relu'), SplitConcatBlock(), nn.Dense(10)) _check_onnx_export(net) - @with_seed() def test_onnx_export_slice_changing_shape(self): net = nn.HybridSequential() net.add(nn.Dense(100, activation='relu'), SplitConcatBlock(), diff --git a/tests/python/unittest/test_autograd.py b/tests/python/unittest/test_autograd.py index 522c9bd71d69..b9d11f5af62d 100644 --- a/tests/python/unittest/test_autograd.py +++ b/tests/python/unittest/test_autograd.py @@ -21,7 +21,7 @@ from mxnet.autograd import * from mxnet.test_utils import * -from common import with_seed, xfail_when_nonstandard_decimal_separator +from common import xfail_when_nonstandard_decimal_separator from mxnet.test_utils import environment import pytest @@ -111,7 +111,6 @@ def autograd_assert(*args, **kwargs): assert same(a.asnumpy(), b.asnumpy()) @xfail_when_nonstandard_decimal_separator -@with_seed() def test_unary_func(): def check_unary_func(x): f_exp = lambda x: nd.exp(x) @@ -129,7 +128,6 @@ def check_unary_func(x): for stype in stypes: check_unary_func(uniform.tostype(stype)) -@with_seed() def test_binary_func(): def check_binary_func(x, y): f_add = lambda x, y: x+y @@ -152,7 +150,6 @@ def check_binary_func(x, y): check_binary_func(x, y) -@with_seed() def test_operator_with_state(): def f_fc(a, b, weight, bias): x = a*b @@ -169,7 +166,6 @@ def f_fc(a, b, weight, bias): grad_vals, outputs = grad_func(a, b, weight, bias) # (TODO) assert -@with_seed() def test_argnum(): def f_with_mode(a, b, mode): if mode: @@ -187,7 +183,6 @@ def f_with_mode(a, b, mode): argnum=[0, 1], func=f_with_mode, grad_func=f_mul_grad) -@with_seed() def test_training(): x = nd.ones((10, 10)) with record(): @@ -198,7 +193,6 @@ def test_training(): assert (y.asnumpy() == x.asnumpy()).all() -@with_seed() def test_out_grads(): x = nd.ones((3, 5)) dx = nd.zeros_like(x) @@ -217,7 +211,6 @@ def test_out_grads(): [5,4,3,2,1]])).all() -@with_seed() def test_detach_updated_grad(): x = nd.ones((2, 2)) dx = nd.zeros_like(x) @@ -250,7 +243,6 @@ def test_detach_updated_grad(): assert x._fresh_grad == False -@with_seed() def test_retain_grad(): x = mx.nd.ones((2, 2)) dx = mx.nd.zeros((2, 2)) @@ -282,7 +274,6 @@ def test_retain_grad(): "differentiating the same graph twice without retain_graph should fail") -@with_seed() def test_attach_grad(): def check_attach_grad(x): assert x.grad is None @@ -299,7 +290,6 @@ def check_attach_grad(x): check_attach_grad(x) -@with_seed() def test_is_train(): x = mx.nd.ones((10, 10)) x.attach_grad() @@ -346,7 +336,6 @@ def test_is_train(): y = mx.nd.Dropout(x, p=0.5) assert y.asnumpy().max() == 2 and y.asnumpy().min() == 0 -@with_seed() @pytest.mark.garbage_expected def test_function(): class func(Function): @@ -383,7 +372,6 @@ def backward(self, dm, dn): assert_almost_equal(y.grad.asnumpy(), dy1, atol=atol) -@with_seed() @pytest.mark.garbage_expected def test_function1(): class Foo(mx.autograd.Function): @@ -405,7 +393,6 @@ def backward(self, dY): X.wait_to_read() -@with_seed() @pytest.mark.garbage_expected @use_np def test_np_function(): @@ -443,7 +430,6 @@ def backward(self, dm, dn): assert_almost_equal(y.grad.asnumpy(), dy1, atol=atol) -@with_seed() @pytest.mark.garbage_expected @use_np def test_np_function1(): @@ -466,7 +452,6 @@ def backward(self, dY): X.wait_to_read() -@with_seed() @pytest.mark.garbage_expected def test_get_symbol(): x = mx.nd.ones((1,)) @@ -481,7 +466,6 @@ def test_get_symbol(): y = x*x + 2*z - 1 assert len(get_symbol(y).list_arguments()) == 2 -@with_seed() @pytest.mark.garbage_expected def test_grad_with_stype(): def check_grad_with_stype(array_stype, grad_stype, expected_stype): @@ -501,7 +485,6 @@ def check_grad_with_stype(array_stype, grad_stype, expected_stype): # check the stype of the gradient when provided check_grad_with_stype(stype, grad_stype, grad_stype) -@with_seed() @pytest.mark.garbage_expected def test_sparse_dot_grad(): def check_sparse_dot_grad(rhs): @@ -525,7 +508,6 @@ def check_sparse_dot_grad(rhs): dns.attach_grad(stype='row_sparse') check_sparse_dot_grad(dns) -@with_seed() def test_gradient(): x = mx.nd.ones((1,)) x.attach_grad() diff --git a/tests/python/unittest/test_contrib_control_flow.py b/tests/python/unittest/test_contrib_control_flow.py index 962ce6239115..7c45984ec46e 100644 --- a/tests/python/unittest/test_contrib_control_flow.py +++ b/tests/python/unittest/test_contrib_control_flow.py @@ -25,10 +25,8 @@ from mxnet.test_utils import * from mxnet.base import _as_list from mxnet.attribute import AttrScope -from common import with_seed -@with_seed() def test_while_loop_simple_forward(): class _TestBlock(gluon.HybridBlock): @@ -251,7 +249,6 @@ def _zeros_like_dict(name_list): assert_almost_equal(imp_grad, sym_grad, rtol=1e-3, atol=1e-3) -@with_seed() @pytest.mark.skip(reason="Bug in while loop op, tracked at incubator-mxnet/issues/18575") def test_while_loop_for_foreach(): @@ -793,7 +790,6 @@ def step(loop, free): ) -@with_seed() def test_while_loop_nested(): def _to_np_list(arrays): @@ -988,7 +984,6 @@ def _get_symbolic_result(out_grads): assert_almost_equal(imp_grad, sym_grad, rtol=1e-3, atol=1e-3) -@with_seed() def test_cond(): # whether there are free variables in three graphs # whether these three graphs contain input_vars @@ -1112,7 +1107,6 @@ def check_contrib_rnn(cell_type, num_states): rtol=1e-3, atol=1e-3) -@with_seed() def test_contrib_rnn(): cell_types = [(gluon.rnn.RNNCell, 1), (gluon.rnn.LSTMCell, 2), (gluon.rnn.GRUCell, 1)] @@ -1120,7 +1114,6 @@ def test_contrib_rnn(): check_contrib_rnn(cell_type, num_states) -@with_seed() def test_foreach(): v3 = mx.sym.var("v0") v4 = mx.sym.var("v1") @@ -1440,7 +1433,6 @@ def step17(in1, states, free): verify_foreach(step17, [v3, v4], [v5], [], arrs, states, [], out_grads, False) -@with_seed() def test_foreach_nested(): # Test nested foreach. def step_in(in1, states): @@ -1494,7 +1486,6 @@ def step_nd(in1, states): assert_almost_equal(state.grad.asnumpy(), state_grad.asnumpy(), rtol=1e-3, atol=1e-3) -@with_seed() def test_cut_subgraph_foreach(): class TestLayer(gluon.HybridBlock): def __init__(self): @@ -1529,7 +1520,6 @@ def step2(data, states): assert_almost_equal(res1.asnumpy(), res2.asnumpy(), rtol=1e-3, atol=1e-3) -@with_seed() def test_uniq_name(): class ForeachLayer1(gluon.HybridBlock): def __init__(self): @@ -1615,7 +1605,6 @@ def step2(state1, state2): assert_almost_equal(res1.asnumpy(), res2.asnumpy(), rtol=0.001, atol=0.0001) -@with_seed() def test_cut_subgraph_while_loop(): class TestLayer(gluon.HybridBlock): def __init__(self): @@ -1649,7 +1638,6 @@ def hybrid_forward(self, F, data): assert_almost_equal(res1.asnumpy(), res2.asnumpy(), rtol=1e-3, atol=1e-3) -@with_seed() def test_cut_subgraph_cond(): class TestLayer(gluon.HybridBlock): def __init__(self): diff --git a/tests/python/unittest/test_contrib_gluon_data_vision.py b/tests/python/unittest/test_contrib_gluon_data_vision.py index 8b88d51f5c8d..1bbf5e8dd747 100644 --- a/tests/python/unittest/test_contrib_gluon_data_vision.py +++ b/tests/python/unittest/test_contrib_gluon_data_vision.py @@ -19,7 +19,6 @@ import numpy as np import scipy.ndimage from mxnet.test_utils import * -from common import assertRaises, with_seed import shutil import tempfile import unittest @@ -63,7 +62,6 @@ def tearDown(self): print("cleanup {}".format(self.IMAGES_DIR)) shutil.rmtree(self.IMAGES_DIR) - @with_seed() def test_imageiter(self): im_list = [[np.random.randint(0, 5), x] for x in self.IMAGES] os.makedirs('./data', exist_ok=True) @@ -96,7 +94,6 @@ def test_imageiter(self): for batch in it: pass - @with_seed() def test_image_bbox_iter(self): im_list = [_generate_objects() + [x] for x in self.IMAGES] det_iter = mx.gluon.contrib.data.vision.ImageBboxDataLoader(2, (3, 300, 300), imglist=im_list, path_root='') @@ -133,7 +130,6 @@ def test_image_bbox_iter(self): ] - @with_seed() def test_bbox_augmenters(self): # only test if all augmenters will work # TODO(Joshua Zhang): verify the augmenter outputs diff --git a/tests/python/unittest/test_contrib_intgemm.py b/tests/python/unittest/test_contrib_intgemm.py index ea33f49f5138..ff1559274240 100644 --- a/tests/python/unittest/test_contrib_intgemm.py +++ b/tests/python/unittest/test_contrib_intgemm.py @@ -18,12 +18,10 @@ import mxnet as mx from mxnet import np, npx from mxnet.test_utils import same, use_np, assert_almost_equal -from common import with_seed import random import pytest @use_np -@with_seed() @pytest.mark.parametrize('shape', [(3, 2), (9,17), (2, 7, 1, 8)] + [(i,) for i in range(1,65)]) def test_contrib_intgemm_maxabsolute(shape): @@ -41,7 +39,6 @@ def test_contrib_intgemm_maxabsolute(shape): assert same(fast, slow) @use_np -@with_seed() @pytest.mark.parametrize('shape', [(i,) for i in range(1, 67)] + [(2,3), (130, 12)]) @pytest.mark.parametrize('max_quant', [2.0]) def test_contrib_intgemm_prepare_data(shape, max_quant): @@ -70,7 +67,6 @@ def test_contrib_intgemm_prepare_data(shape, max_quant): assert same(test, ref.as_np_ndarray()) @use_np -@with_seed() @pytest.mark.parametrize('shape', [(8, 64), (16, 64), (8, 128), (16, 128), (2, 4, 64)]) @pytest.mark.parametrize('max_quant', [0.2, 3.0]) @pytest.mark.parametrize('api', [(mx.nd.contrib, mx.nd), (npx, np)]) @@ -92,7 +88,6 @@ def test_contrib_intgemm_weight_consistent(shape, max_quant, api): assert same(direct, indirect) @use_np -@with_seed() @pytest.mark.parametrize('indices', [ [0,1,2,3,4,5,6,7], [1,2,1,2,1,2,1,2], diff --git a/tests/python/unittest/test_contrib_operator.py b/tests/python/unittest/test_contrib_operator.py index e3f59de8a633..c0c14b7add8f 100644 --- a/tests/python/unittest/test_contrib_operator.py +++ b/tests/python/unittest/test_contrib_operator.py @@ -23,8 +23,7 @@ import itertools from numpy.testing import assert_allclose, assert_array_equal from mxnet.test_utils import * -from common import with_seed, assert_raises_cudnn_not_satisfied, \ - xfail_when_nonstandard_decimal_separator +from common import assert_raises_cudnn_not_satisfied, xfail_when_nonstandard_decimal_separator import unittest def test_box_nms_op(): @@ -354,7 +353,6 @@ def test_box_decode_op(): assert_allclose(Y.asnumpy(), np.array([[[-0.0562755, -0.00865743, 0.26227552, 0.42465743], \ [0.13240421, 0.17859563, 0.93759584, 1.1174043 ]]]), atol=1e-5, rtol=1e-5) -@with_seed() def test_op_mrcnn_mask_target(): if default_context().device_type != 'gpu': return @@ -411,7 +409,6 @@ def test_op_mrcnn_mask_target(): assert_almost_equal(mask_targets.asnumpy(), gt_mask_targets.asnumpy()) assert_almost_equal(mask_cls.asnumpy(), gt_mask_cls.asnumpy()) -@with_seed() def test_dynamic_reshape(): def dynamic_reshape_testcases(src_shape, shape_arg, dst_shape): data = mx.sym.Variable('data') diff --git a/tests/python/unittest/test_contrib_optimizer.py b/tests/python/unittest/test_contrib_optimizer.py index cb90f429c864..f0fbb7b7aaec 100644 --- a/tests/python/unittest/test_contrib_optimizer.py +++ b/tests/python/unittest/test_contrib_optimizer.py @@ -23,7 +23,7 @@ curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) sys.path.insert(0, os.path.join(curr_path, '../unittest')) -from common import with_seed, xfail_when_nonstandard_decimal_separator +from common import xfail_when_nonstandard_decimal_separator @xfail_when_nonstandard_decimal_separator @@ -63,7 +63,6 @@ def test_group_adagrad(): @xfail_when_nonstandard_decimal_separator -@with_seed() @pytest.mark.serial def test_adamw(): def get_refs(m, v, weight, grad_rescale, beta1, beta2, lr, eta, wd, epsilon, clip_grad=-1): diff --git a/tests/python/unittest/test_contrib_stes_op.py b/tests/python/unittest/test_contrib_stes_op.py index 26ab6f9491e4..5d20fd15f98c 100644 --- a/tests/python/unittest/test_contrib_stes_op.py +++ b/tests/python/unittest/test_contrib_stes_op.py @@ -15,7 +15,7 @@ # specific language governing permissions and limitations # under the License. -from common import with_seed, xfail_when_nonstandard_decimal_separator +from common import xfail_when_nonstandard_decimal_separator import mxnet as mx from mxnet import nd, autograd, gluon from mxnet.test_utils import default_context @@ -97,7 +97,6 @@ def check_ste(net_type_str, w_init, hybridize, in_data, ctx=None): str(net.expected_grads(in_data, w_init)) @xfail_when_nonstandard_decimal_separator -@with_seed() def test_contrib_round_ste(): # Test with random data in_data = nd.uniform(-10, 10, shape=30) # 10 and 30 are arbitrary numbers @@ -119,7 +118,6 @@ def test_contrib_round_ste(): @xfail_when_nonstandard_decimal_separator -@with_seed() def test_contrib_sign_ste(): in_data = nd.uniform(-10, 10, shape=30) # 10 and 30 are arbitrary numbers w_init = float(nd.uniform(-10, 10, shape=1).asscalar()) diff --git a/tests/python/unittest/test_dynamic_shape.py b/tests/python/unittest/test_dynamic_shape.py index b61fbeef6b74..408c3b8ddfe2 100644 --- a/tests/python/unittest/test_dynamic_shape.py +++ b/tests/python/unittest/test_dynamic_shape.py @@ -22,7 +22,6 @@ from mxnet.test_utils import * from mxnet.base import _as_list from mxnet.attribute import AttrScope -from common import with_seed def test_dynamic_shape(): diff --git a/tests/python/unittest/test_exc_handling.py b/tests/python/unittest/test_exc_handling.py index e9b60d20f194..938a86957c71 100644 --- a/tests/python/unittest/test_exc_handling.py +++ b/tests/python/unittest/test_exc_handling.py @@ -19,14 +19,12 @@ import mxnet as mx import numpy as np from mxnet import gluon -from common import with_seed from mxnet.gluon import nn from mxnet.base import MXNetError from mxnet.test_utils import assert_exception, default_context, set_default_context, use_np import pytest -@with_seed() @pytest.mark.skipif(os.environ.get('MXNET_ENGINE_TYPE') == 'NaiveEngine', reason="This test assumes asynchronous execution.") def test_exc_imperative(): @@ -40,7 +38,6 @@ def imperative(exec_numpy=True): imperative(exec_numpy=False) pytest.raises(MXNetError, imperative, exec_numpy=True) -@with_seed() def test_exc_symbolic(): def symbolic(exec_backward=True, waitall=True): x = mx.sym.Variable('x') @@ -78,7 +75,6 @@ def symbolic(exec_backward=True, waitall=True): pytest.raises(MXNetError, symbolic, exec_backward=False, waitall=True) pytest.raises(MXNetError, symbolic, exec_backward=True, waitall=True) -@with_seed() @pytest.mark.skipif(os.environ.get('MXNET_ENGINE_TYPE') == 'NaiveEngine', reason="This test assumes asynchronous execution.") def test_exc_gluon(): @@ -102,7 +98,6 @@ def gluon(exec_wait=True, waitall=False): pytest.raises(MXNetError, gluon, waitall=True) -@with_seed() def test_exc_multiple_waits(): def multiple_waits(waitall=False): # Test calling failed op followed by wait_to_read or waitall twice @@ -131,7 +126,6 @@ def multiple_waits(waitall=False): multiple_waits(waitall=False) multiple_waits(waitall=True) -@with_seed() @pytest.mark.skipif(os.environ.get('MXNET_ENGINE_TYPE') == 'NaiveEngine', reason="This test assumes asynchronous execution.") def test_exc_post_fail(): @@ -150,7 +144,6 @@ def post_fail(waitall=False): post_fail(waitall=False) post_fail(waitall=True) -@with_seed() def test_exc_mutable_var_fail(): def mutable_var_check(waitall=False): a, b = mx.nd.random_normal(0, -1, (2, 2)).copyto(default_context()) @@ -162,7 +155,6 @@ def mutable_var_check(waitall=False): pytest.raises(MXNetError, mutable_var_check, waitall=False) pytest.raises(MXNetError, mutable_var_check, waitall=True) -@with_seed() def test_multiple_waitalls(): caught = False try: @@ -173,7 +165,6 @@ def test_multiple_waitalls(): assert caught, "No exception thrown" mx.nd.waitall() -@with_seed() def run_training_iteration(data): output = net(data) @@ -189,7 +180,6 @@ def run_training_iteration(data): mx.profiler.set_state("stop") -@with_seed() def test_opencv_exception(): def check_resize(): img = mx.nd.ones((1200, 1600, 3)) @@ -198,7 +188,6 @@ def check_resize(): pytest.raises(MXNetError, check_resize) -@with_seed() def test_np_reshape_exception(): a = mx.np.ones((10, 10)) a.reshape((-1,)).asnumpy() # Check no-raise @@ -207,7 +196,6 @@ def test_np_reshape_exception(): pytest.raises(MXNetError, lambda: mx.np.reshape(a, (-1, 3))) -@with_seed() @use_np def test_np_random_incorrect_named_arguments(): random_ops = ['uniform', 'normal', 'randint', 'choice'] diff --git a/tests/python/unittest/test_executor.py b/tests/python/unittest/test_executor.py index 20dcaa0003d5..fe30b3bb9dad 100644 --- a/tests/python/unittest/test_executor.py +++ b/tests/python/unittest/test_executor.py @@ -17,7 +17,6 @@ import numpy as np import mxnet as mx -from common import with_seed from mxnet.test_utils import assert_almost_equal, environment @@ -72,7 +71,6 @@ def check_bind_with_uniform(uf, gf, dim, sf=None, lshape=None, rshape=None): assert_almost_equal(rhs_grad.asnumpy(), rhs_grad2, rtol=1e-5, atol=1e-5) -@with_seed() def test_bind(): for enable_bulking in ['0', '1']: with environment({'MXNET_EXEC_BULK_EXEC_INFERENCE': enable_bulking, @@ -106,7 +104,6 @@ def test_bind(): # @roywei: Removing fixed seed as flakiness in this test is fixed # tracked at https://github.com/apache/incubator-mxnet/issues/11686 -@with_seed() def test_dot(): nrepeat = 10 maxdim = 4 @@ -128,7 +125,6 @@ def test_dot(): sf = mx.symbol.dot) -@with_seed() def test_reshape(): x = mx.sym.Variable('x') y = mx.sym.FullyConnected(x, num_hidden=4) @@ -152,7 +148,6 @@ def test_reshape(): # weight ndarray is shared between exe and new_exe assert np.all(exe.arg_arrays[1].asnumpy() == 1) -@with_seed() def test_cached_op_init(): def check_init(static_alloc, static_shape): out = mx.sym.zeros((3,3)) diff --git a/tests/python/unittest/test_gluon.py b/tests/python/unittest/test_gluon.py index 3e3b9edc6e05..5f27521e0ffa 100644 --- a/tests/python/unittest/test_gluon.py +++ b/tests/python/unittest/test_gluon.py @@ -27,8 +27,8 @@ from mxnet.ndarray.ndarray import _STORAGE_TYPE_STR_TO_ID from mxnet.test_utils import use_np import mxnet.numpy as _mx_np -from common import (with_seed, assertRaises, - assert_raises_cudnn_not_satisfied, xfail_when_nonstandard_decimal_separator, environment) +from common import assertRaises, assert_raises_cudnn_not_satisfied, \ + xfail_when_nonstandard_decimal_separator, environment import numpy as np from numpy.testing import assert_array_equal import pytest @@ -38,7 +38,6 @@ import random import tempfile -@with_seed() def test_parameter(): p = gluon.Parameter('weight', shape=(10, 10)) p.initialize(init='xavier', ctx=[mx.cpu(0), mx.cpu(1)]) @@ -52,17 +51,14 @@ def test_parameter(): p.reset_ctx(ctx=[mx.cpu(1), mx.cpu(2)]) assert p.list_ctx() == [mx.cpu(1), mx.cpu(2)] -@with_seed() def test_invalid_parameter_stype(): with pytest.raises(AssertionError): p = gluon.Parameter('weight', shape=(10, 10), stype='invalid') -@with_seed() def test_invalid_parameter_grad_stype(): with pytest.raises(AssertionError): p = gluon.Parameter('weight', shape=(10, 10), grad_stype='invalid') -@with_seed() def test_sparse_parameter(): p = gluon.Parameter('weight', shape=(10, 10), stype='row_sparse', grad_stype='row_sparse') p.initialize(init='xavier', ctx=[mx.cpu(0), mx.cpu(1)]) @@ -82,7 +78,6 @@ def test_sparse_parameter(): p.reset_ctx(ctx=[mx.cpu(1), mx.cpu(2)]) assert p.list_ctx() == [mx.cpu(1), mx.cpu(2)] -@with_seed() def test_parameter_invalid_access(): # cannot call data on row_sparse parameters p0 = gluon.Parameter('weight', shape=(10, 10), stype='row_sparse', grad_stype='row_sparse') @@ -97,7 +92,6 @@ def test_parameter_invalid_access(): assertRaises(RuntimeError, p1.list_row_sparse_data, row_id) -@with_seed() def test_parameter_row_sparse_data(): ctx0 = mx.cpu(1) ctx1 = mx.cpu(2) @@ -123,7 +117,6 @@ def test_parameter_row_sparse_data(): mx.test_utils.assert_almost_equal(retained_2[0].asnumpy(), retained_target_2.asnumpy()) -@with_seed() def test_constant(): class Test(gluon.HybridBlock): def __init__(self, **kwargs): @@ -151,7 +144,6 @@ def hybrid_forward(self, F, x, const): assert (x.grad.asnumpy() == 1).all() -@with_seed() def test_parameter_sharing(): class Net(gluon.Block): def __init__(self, in_units=0, **kwargs): @@ -183,7 +175,6 @@ def forward(self, x): net6.load_parameters('net4.params', mx.cpu()) -@with_seed() def test_parameter_str(): class Net(gluon.Block): def __init__(self, **kwargs): @@ -198,7 +189,6 @@ def __init__(self, **kwargs): assert 'float32' in lines[0] -@with_seed() def test_collect_parameters(): net = nn.HybridSequential() net.add(nn.Conv2D(10, 3)) @@ -210,7 +200,6 @@ def test_collect_parameters(): assert set(net.collect_params('0.bias|1.bias').keys()) == \ set(['0.bias', '1.bias']) -@with_seed() def test_basic(): model = nn.Sequential() model.add(nn.Dense(128, activation='tanh', in_units=10, flatten=False)) @@ -235,7 +224,6 @@ def test_basic(): assert list(model.collect_params().values())[0]._grad is not None -@with_seed() def test_dense(): model = nn.Dense(128, activation='tanh', in_units=10, flatten=False) inputs = mx.sym.Variable('data') @@ -258,7 +246,6 @@ def test_hybrid_sequential_unique_internals(): assert len(set(s.name for s in net(mx.sym.Variable('data')).get_internals())) == 8 -@with_seed() @pytest.mark.parametrize('compute_before_cast', [True, False]) def test_symbol_block(tmpdir, compute_before_cast): model = nn.HybridSequential() @@ -350,7 +337,6 @@ def hybrid_forward(self, F, x): prediction = net_fp64.forward(fp32_data) assert np.dtype(prediction.dtype) == np.dtype(np.float32) -@with_seed() def test_sparse_symbol_block(): data = mx.sym.var('data') weight = mx.sym.var('weight', stype='row_sparse') @@ -360,7 +346,6 @@ def test_sparse_symbol_block(): # an exception is expected when creating a SparseBlock w/ sparse param net = gluon.SymbolBlock(out, data) -@with_seed() def test_sparse_hybrid_block(): params = {} params['weight'] = gluon.Parameter('weight', shape=(5,5), stype='row_sparse', dtype='float32') @@ -372,7 +357,6 @@ def test_sparse_hybrid_block(): # an exception is expected when forwarding a HybridBlock w/ sparse param y = net(x) -@with_seed() def test_hybrid_block_none_args(): class Foo(gluon.HybridBlock): def hybrid_forward(self, F, a, b): @@ -459,7 +443,6 @@ def hybrid_forward(self, F, a, b): pytest.raises(ValueError, lambda: foo1(mx.nd.ones((10,)), mx.nd.ones((10,)))) -@with_seed() def test_hybrid_block_hybrid_no_hybrid(): class FooHybrid(gluon.HybridBlock): def hybrid_forward(self, F, a, b): @@ -504,7 +487,6 @@ def forward(self, a, b): mx.nd.ones((10,), ctx=mx.cpu(2)))) -@with_seed() def check_layer_forward(layer, dshape): print("checking layer {}\nshape: {}.".format(layer, dshape)) layer.initialize() @@ -528,7 +510,6 @@ def check_layer_forward(layer, dshape): mx.test_utils.assert_almost_equal(np_out, out.asnumpy(), rtol=1e-5, atol=1e-6) mx.test_utils.assert_almost_equal(np_dx, x.grad.asnumpy(), rtol=1e-5, atol=1e-6) -@with_seed() @pytest.mark.parametrize('layer,shape', [ (nn.Conv1D(16, 3, in_channels=4), (1, 4, 10)), (nn.Conv1D(16, 3, groups=2, in_channels=4), (1, 4, 10)), @@ -548,7 +529,6 @@ def check_layer_forward(layer, dshape): def test_conv(layer, shape): check_layer_forward(layer, shape) -@with_seed() @pytest.mark.parametrize('layer,shape', [ (nn.Conv2D(16, (3, 3), layout='NHWC', in_channels=4), (1, 10, 10, 4)), # (nn.Conv3D(16, (3, 3, 3), layout='NDHWC', in_channels=4), (1, 10, 10, 10, 4)), @@ -560,7 +540,6 @@ def test_conv_nhwc(layer, shape): check_layer_forward(layer, shape) -@with_seed() def test_deconv(): # layers1d = [ # nn.Conv1DTranspose(16, 3, in_channels=4), @@ -602,7 +581,6 @@ def test_deconv(): # # check_layer_forward(layer, (1, 10, 10, 10, 4)) -@with_seed() def test_pool(): # transpose shape to bring feature dimension 'c' from 2nd position to last def transpose(shape): @@ -675,7 +653,6 @@ def transpose(shape): assert (layer(x).shape==ceil_out_shape) -@with_seed() @pytest.mark.parametrize('variable', ['running_var', 'running_mean']) def test_batchnorm_backward_synchronization(variable): """ @@ -701,14 +678,12 @@ def test_batchnorm_backward_synchronization(variable): raise AssertionError("Two consecutive reads of " + variable + " give different results") -@with_seed() def test_batchnorm(): layer = nn.BatchNorm(in_channels=10) check_layer_forward(layer, (2, 10, 10, 10)) @xfail_when_nonstandard_decimal_separator -@with_seed() def test_sync_batchnorm(): def _check_batchnorm_result(input, num_devices=1, cuda=False): from mxnet.gluon.utils import split_and_load @@ -830,12 +805,10 @@ def _syncParameters(bn1, bn2, ctx): num_devices=ndev, cuda=cuda) -@with_seed() def test_instancenorm(): layer = nn.InstanceNorm(in_channels=10) check_layer_forward(layer, (2, 10, 10, 10)) -@with_seed() def test_layernorm(): layer = nn.LayerNorm(in_channels=10) check_layer_forward(layer, (2, 10, 10, 10)) @@ -847,7 +820,6 @@ def test_layernorm(): layer.hybridize() pytest.raises(MXNetError, lambda: layer(mx.nd.ones((2, 11)))) -@with_seed() def test_groupnorm(): layer = nn.GroupNorm() check_layer_forward(layer, (2, 10, 10, 10)) @@ -856,13 +828,11 @@ def test_groupnorm(): layer = nn.GroupNorm(num_groups=5) check_layer_forward(layer, (2, 10, 10, 10)) -@with_seed() def test_reflectionpad(): layer = nn.ReflectionPad2D(3) check_layer_forward(layer, (2, 3, 24, 24)) -@with_seed() def test_reshape(): x = mx.nd.ones((2, 4, 10, 10)) layer = nn.Conv2D(10, 2, in_channels=4) @@ -874,7 +844,6 @@ def test_reshape(): x.backward() -@with_seed() def test_slice(): x = mx.nd.ones((5, 4, 10, 10)) layer = nn.Conv2D(10, 2, in_channels=4) @@ -886,7 +855,6 @@ def test_slice(): x.backward() -@with_seed() def test_at(): x = mx.nd.ones((5, 4, 10, 10)) layer = nn.Conv2D(10, 2, in_channels=4) @@ -898,7 +866,6 @@ def test_at(): x.backward() -@with_seed() def test_deferred_init(): x = mx.nd.ones((5, 4, 10, 10)) layer = nn.Conv2D(10, 2) @@ -922,7 +889,6 @@ def check_split_data(x, num_slice, batch_axis, **kwargs): assert all(r1.reshape(-1) == r2.reshape(-1)) -@with_seed() @use_np def test_split_data_np(): x = _mx_np.random.uniform(size=(128, 33, 64)) @@ -936,7 +902,6 @@ def test_split_data_np(): return assert False, "Should have failed" -@with_seed() def test_split_data(): x = mx.nd.random.uniform(shape=(128, 33, 64)) check_split_data(x, 8, 0) @@ -949,7 +914,6 @@ def test_split_data(): return assert False, "Should have failed" -@with_seed() def test_flatten(): flatten = nn.Flatten() x = mx.nd.zeros((3,4,5,6)) @@ -959,7 +923,6 @@ def test_flatten(): x = mx.nd.zeros((3,)) assert flatten(x).shape == (3, 1) -@with_seed() def test_block_attr_hidden(): b = gluon.Block() @@ -968,7 +931,6 @@ def test_block_attr_hidden(): b.a = 1 -@with_seed() def test_block_attr_block(): b = gluon.Block() @@ -978,7 +940,6 @@ def test_block_attr_block(): b.b = (2,) -@with_seed() def test_block_attr_param(): b = gluon.Block() @@ -988,7 +949,6 @@ def test_block_attr_param(): b.b = (2,) -@with_seed() def test_block_attr_regular(): b = gluon.Block() @@ -999,7 +959,6 @@ def test_block_attr_regular(): assert b.c is c2 and list(b._children.values())[0]() is c2 -@with_seed() def test_block_attr_list_of_block(): class Model1(gluon.Block): def __init__(self, **kwargs): @@ -1058,12 +1017,10 @@ def check_sequential(net): assert len(slc) == 2 and slc[0] is dense2 and slc[1] is dense3 assert isinstance(slc, type(net)) -@with_seed() def test_sequential(): check_sequential(gluon.nn.Sequential()) check_sequential(gluon.nn.HybridSequential()) -@with_seed() def test_sequential_warning(): with warnings.catch_warnings(record=True) as w: # The following line permits the test to pass if run multiple times @@ -1074,7 +1031,6 @@ def test_sequential_warning(): assert len(w) == 1 -@with_seed() def test_global_norm_clip(): stypes = ['default', 'row_sparse'] def check_global_norm_clip(stype, check_isfinite): @@ -1095,7 +1051,6 @@ def check_global_norm_clip(stype, check_isfinite): for check_isfinite in [True, False]: check_global_norm_clip(stype, check_isfinite) -@with_seed() def test_embedding(): def check_embedding(sparse_grad): layer = gluon.nn.Embedding(10, 100, sparse_grad=sparse_grad) @@ -1123,7 +1078,6 @@ def check_embedding_large_input(sparse_grad): check_embedding_large_input(True) check_embedding_large_input(False) -@with_seed() def test_export(tmpdir): tmpfile = os.path.join(str(tmpdir), 'gluon') ctx = mx.context.current_context() @@ -1138,7 +1092,6 @@ def test_export(tmpdir): assert symbol_filename == tmpfile+'-symbol.json' assert params_filename == tmpfile+'-0000.params' -@with_seed() def test_import(): ctx = mx.context.current_context() net1 = gluon.model_zoo.vision.resnet18_v1( @@ -1161,7 +1114,6 @@ def test_import(): assert lines[2] == ')' -@with_seed() def test_hybrid_stale_cache(): net = mx.gluon.nn.HybridSequential() net.add(mx.gluon.nn.Dense(10, weight_initializer='zeros', bias_initializer='ones', flatten=False)) @@ -1188,7 +1140,6 @@ def test_hybrid_stale_cache(): assert net(mx.nd.ones((2,3,5))).shape == (2, 10) -@with_seed() def test_lambda(): net1 = mx.gluon.nn.HybridSequential() net1.add(nn.Activation('tanh'), @@ -1210,7 +1161,6 @@ def test_lambda(): assert_almost_equal(out1.asnumpy(), out3.asnumpy(), rtol=1e-3, atol=1e-3) -@with_seed() def test_fill_shape_deferred(): net = nn.HybridSequential() net.add(nn.Conv2D(64, kernel_size=2, padding=1), @@ -1225,7 +1175,6 @@ def test_fill_shape_deferred(): assert net[2].weight.shape[1] == 3072, net[2].weight.shape[1] -@with_seed() def test_dtype(): net = mx.gluon.model_zoo.vision.resnet18_v1() net.initialize() @@ -1262,7 +1211,6 @@ def forward(self, x): out = net(mx.nd.ones((3,), dtype=np.float64)) mx.nd.waitall() -@with_seed() def test_fill_shape_load(): ctx = mx.context.current_context() net1 = nn.HybridSequential() @@ -1287,7 +1235,6 @@ def test_fill_shape_load(): assert net2[2].weight.shape[1] == 3072, net2[2].weight.shape[1] -@with_seed() def test_inline(): net = mx.gluon.nn.HybridSequential() net.add(mx.gluon.nn.Dense(10)) @@ -1313,7 +1260,6 @@ def test_inline(): @xfail_when_nonstandard_decimal_separator -@with_seed() def test_activations(): point_to_validate = mx.nd.array([-0.1, 0.1] * 3) @@ -1370,7 +1316,6 @@ def selu(x): # assert test_point == ref_point -@with_seed() def test_dropout(): def get_slice(x, axis, idx): ix = () @@ -1409,7 +1354,6 @@ def check_dropout_axes(ratio, shape, axes): check_dropout_axes(0.25, nshape, axes = (0, 2, 3)) check_dropout_axes(0.25, nshape, axes = (1, 2, 3)) -@with_seed() def test_req(): data = mx.nd.random.uniform(shape=(1,3,224,224)) label = mx.nd.random.uniform(shape=(1)) @@ -1446,7 +1390,6 @@ def test_req(): assert_almost_equal(grad * 2, grad_double) -@with_seed() def test_save_load(tmpdir): net = mx.gluon.model_zoo.vision.get_resnet(1, 18, pretrained=False, root=str(tmpdir)) net.initialize() @@ -1481,7 +1424,6 @@ def forward(self, x): net2 = Network() net2.load_parameters(param_path) -@with_seed() def test_save_load_deduplicate_with_shared_params(tmpdir): class B(mx.gluon.Block): def __init__(self): @@ -1520,7 +1462,6 @@ def __init__(self, b1, b2): c = C(b1, b2) c.load_parameters(param_path) -@with_seed() def test_symbol_block_save_load(tmpdir): tmp = str(tmpdir) tmpfile = os.path.join(tmp, 'resnet34_fp64') @@ -1553,14 +1494,12 @@ def hybrid_forward(self, F, x): net2.load_parameters(params_file) -@with_seed() def test_hybrid_multi_context(): net = mx.gluon.model_zoo.vision.get_resnet(1, 18) net.initialize(ctx=[mx.cpu(0), mx.cpu(1)]) net.hybridize() net(mx.nd.zeros((1, 3, 32, 32), ctx=mx.cpu(0))).asnumpy() -@with_seed() def test_zero_grad(): def _test_grad_reset(ctx, dtype='float32', sparse=False, embeddingType=None): data = mx.nd.random.uniform(shape=(3,3), dtype=dtype, ctx=ctx) @@ -1610,7 +1549,6 @@ def _test_multi_reset(nArrays, dtype, ctx): _test_grad_reset(ctx, dtype=type, sparse=sparse, embeddingType=embType) -@with_seed() @pytest.mark.parametrize('static_alloc', [False, True]) @pytest.mark.parametrize('static_shape', [False, True]) def test_hybrid_static_memory(static_alloc, static_shape): @@ -1642,7 +1580,6 @@ def test(net, x): assert_almost_equal(grads1[key].asnumpy(), grads2[key].asnumpy(), rtol=1e-3, atol=1e-4) -@with_seed() @pytest.mark.parametrize('static_alloc', [False, True]) @pytest.mark.parametrize('static_shape', [False, True]) def test_hybrid_static_memory_switching(static_alloc, static_shape): @@ -1665,7 +1602,6 @@ def test_hybrid_static_memory_switching(static_alloc, static_shape): y.backward() mx.nd.waitall() -@with_seed() def test_hook(): global hook_call_count hook_call_count = 0 @@ -1700,7 +1636,6 @@ def call_pre_hook(block, x): assert hook_call_count == 1 assert pre_hook_call_count == 2 -@with_seed() def test_op_hook_output_names(): def check_name(block, expected_names, inputs=None, expected_opr_names=None, monitor_all=False): opr_names = [] @@ -1765,7 +1700,6 @@ def mon_callback(node_name, opr_name, arr): 'hybridsequential_dense0_fwd_bias', 'hybridsequential_dense0_fwd_output', 'hybridsequential_activation0_fwd_input0', 'hybridsequential_activation0_fwd_output'], monitor_all=True) -@with_seed() def test_apply(): global called_blocks called_blocks = [] @@ -1782,7 +1716,6 @@ def record_name(block): assert called_blocks == [type(block[0]), type(block[1]), type(block)] -@with_seed() @assert_raises_cudnn_not_satisfied(min_version='5.1.10') def test_summary(): net = gluon.model_zoo.vision.resnet50_v1() @@ -1804,7 +1737,6 @@ def test_summary(): net.hybridize() pytest.raises(AssertionError, net.summary, mx.nd.ones((32, 3, 224, 224))) -@with_seed() def test_sparse_hybrid_block_grad(): class Embedding(mx.gluon.HybridBlock): def __init__(self, num_tokens, embedding_size): @@ -1831,7 +1763,6 @@ def hybrid_forward(self, F, words): assert (grad[:10] == 2).all() assert (grad[10:] == 0).all() -@with_seed() def test_sparse_hybrid_block(): class Linear(mx.gluon.HybridBlock): def __init__(self, units): @@ -1950,7 +1881,6 @@ def check_layer_forward_withinput(net, x): mx.test_utils.assert_almost_equal(x.grad.asnumpy(), x_hybrid.grad.asnumpy(), rtol=1e-5, atol=1e-6) mx.test_utils.assert_almost_equal(out1.asnumpy(), out2.asnumpy(), rtol=1e-5, atol=1e-6) -@with_seed() @pytest.mark.parametrize('chn_num', [16, 256]) @pytest.mark.parametrize('kernel', [1, 3, 224]) def test_conv2d_16c(chn_num, kernel): @@ -1971,7 +1901,6 @@ def hybrid_forward(self, F, x): net = Net(chn_num, kernel) check_layer_forward_withinput(net, x) -@with_seed() @pytest.mark.parametrize('grp', [16]) @pytest.mark.parametrize('kernel_size', [1, 3]) def test_group_conv2d_16c(grp, kernel_size): @@ -1996,7 +1925,6 @@ def hybrid_forward(self, F, x): net = Net(grp, kernel_size) check_layer_forward_withinput(net, x) -@with_seed() @pytest.mark.skip(reason='skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/11164') def test_deconv2d_16c(): in_chn_list = [1024, 512, 256, 128, 64, 32, 16] @@ -2019,7 +1947,6 @@ def hybrid_forward(self, F, x): check_layer_forward_withinput(net, x) -@with_seed() @pytest.mark.skip(reason='skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/11164') def test_batchnorm_16c(): chn_list = [16, 1024] @@ -2051,7 +1978,6 @@ def hybrid_forward(self, F, x): check_layer_forward_withinput(net, x) -@with_seed() def test_concat(): chn_list = [16, 64] shapes = [1, 3, 5] @@ -2083,7 +2009,6 @@ def hybrid_forward(self, F, x): net = Net(axis, input_num, chn_list[i], 1) check_layer_forward_withinput(net, x) -@with_seed() def test_reshape_conv(): class Net(gluon.HybridBlock): def __init__(self, **kwargs): @@ -2099,7 +2024,6 @@ def hybrid_forward(self, F, x): check_layer_forward_withinput(net, x) -@with_seed() @pytest.mark.skip(reason='skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/11164') def test_reshape_conv_reshape_conv(): class Net(gluon.HybridBlock): @@ -2119,7 +2043,6 @@ def hybrid_forward(self, F, x): net = Net() check_layer_forward_withinput(net, x) -@with_seed() def test_slice_conv(): class Net(gluon.HybridBlock): def __init__(self, **kwargs): @@ -2135,7 +2058,6 @@ def hybrid_forward(self, F, x): check_layer_forward_withinput(net, x) -@with_seed() def test_slice_conv_slice_conv(): class Net(gluon.HybridBlock): def __init__(self, **kwargs): @@ -2155,7 +2077,6 @@ def hybrid_forward(self, F, x): check_layer_forward_withinput(net, x) -@with_seed() @pytest.mark.skip(reason='skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/11164') def test_slice_conv_reshape_conv(): class Net(gluon.HybridBlock): @@ -2176,7 +2097,6 @@ def hybrid_forward(self, F, x): net = Net() check_layer_forward_withinput(net, x) -@with_seed() def test_reshape_conv_slice_conv(): """ This test will test gluon Conv2d computation with ndarray reshape and slice @@ -2198,7 +2118,6 @@ def hybrid_forward(self, F, x): net = Net() check_layer_forward_withinput(net, x) -@with_seed() def test_reshape_dense(): class Net(gluon.HybridBlock): def __init__(self, **kwargs): @@ -2216,7 +2135,6 @@ def hybrid_forward(self, F, x): check_layer_forward_withinput(net, x) -@with_seed() def test_slice_dense(): class Net(gluon.HybridBlock): def __init__(self, slice, **kwargs): @@ -2236,7 +2154,6 @@ def hybrid_forward(self, F, x): net = Net(slice) check_layer_forward_withinput(net, x) -@with_seed() def test_slice_dense_slice_dense(): class Net(gluon.HybridBlock): def __init__(self, slice, **kwargs): @@ -2259,7 +2176,6 @@ def hybrid_forward(self, F, x): net = Net(slice) check_layer_forward_withinput(net, x) -@with_seed() def test_reshape_dense_reshape_dense(): class Net(gluon.HybridBlock): def __init__(self, **kwargs): @@ -2281,7 +2197,6 @@ def hybrid_forward(self, F, x): check_layer_forward_withinput(net, x) -@with_seed() def test_slice_dense_reshape_dense(): class Net(gluon.HybridBlock): def __init__(self, slice, **kwargs): @@ -2305,7 +2220,6 @@ def hybrid_forward(self, F, x): check_layer_forward_withinput(net, x) -@with_seed() def test_reshape_dense_slice_dense(): class Net(gluon.HybridBlock): def __init__(self, **kwargs): @@ -2327,7 +2241,6 @@ def hybrid_forward(self, F, x): check_layer_forward_withinput(net, x) -@with_seed() @pytest.mark.skip(reason='skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/11164') def test_reshape_batchnorm(): class Net(gluon.HybridBlock): @@ -2349,7 +2262,6 @@ def hybrid_forward(self, F, x): check_layer_forward_withinput(net, x) -@with_seed() @pytest.mark.serial def test_slice_batchnorm(): class Net(gluon.HybridBlock): @@ -2372,7 +2284,6 @@ def hybrid_forward(self, F, x): check_layer_forward_withinput(net, x) -@with_seed() @pytest.mark.skip(reason='skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/11164') @pytest.mark.serial def test_slice_batchnorm_slice_batchnorm(): @@ -2398,7 +2309,6 @@ def hybrid_forward(self, F, x): check_layer_forward_withinput(net, x) -@with_seed() @pytest.mark.skip(reason='skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/11164') def test_reshape_batchnorm_reshape_batchnorm(): class Net(gluon.HybridBlock): @@ -2423,7 +2333,6 @@ def hybrid_forward(self, F, x): check_layer_forward_withinput(net, x) -@with_seed() @pytest.mark.serial def test_slice_batchnorm_reshape_batchnorm(): class Net(gluon.HybridBlock): @@ -2450,7 +2359,6 @@ def hybrid_forward(self, F, x): check_layer_forward_withinput(net, x) -@with_seed() @pytest.mark.skip(reason='skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/11164') def test_reshape_batchnorm_slice_batchnorm(): class Net(gluon.HybridBlock): @@ -2476,7 +2384,6 @@ def hybrid_forward(self, F, x): net = Net(shape, slice) check_layer_forward_withinput(net, x) -@with_seed() @pytest.mark.skip(reason='skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/11164') def test_reshape_pooling2d(): max_pooling = nn.MaxPool2D(strides=(2, 3), padding=(1, 1)) @@ -2504,7 +2411,6 @@ def hybrid_forward(self, F, x): net = Net(shape, pooling_layers[i]) check_layer_forward_withinput(net, x) -@with_seed() @pytest.mark.serial def test_slice_pooling2d(): # transpose shape to bring feature dimension 'c' from 2nd position to last @@ -2542,7 +2448,6 @@ def hybrid_forward(self, F, x): net = Net(slice, pooling_layers[i]) check_layer_forward_withinput(net, x) -@with_seed() @pytest.mark.skip(reason='skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/11164') def test_reshape_pooling2d_reshape_pooling2d(): max_pooling = nn.MaxPool2D(strides=(2, 2), padding=(1, 1)) @@ -2577,7 +2482,6 @@ def hybrid_forward(self, F, x): net = Net(shape, pooling_layers[i], pooling_layers[j]) check_layer_forward_withinput(net, x) -@with_seed() @pytest.mark.serial def test_slice_pooling2d_slice_pooling2d(): max_pooling = nn.MaxPool2D(strides=(2, 3), padding=(1, 1)) @@ -2612,7 +2516,6 @@ def hybrid_forward(self, F, x): net = Net(slice, pooling_layers[i], pooling_layers[j]) check_layer_forward_withinput(net, x) -@with_seed() @pytest.mark.skip(reason='skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/11164') def test_slice_pooling2d_reshape_pooling2d(): max_pooling = nn.MaxPool2D(strides=(2, 3), padding=(1, 1)) @@ -2648,7 +2551,6 @@ def hybrid_forward(self, F, x): net = Net(shape, slice, pooling_layers[i], pooling_layers[j]) check_layer_forward_withinput(net, x) -@with_seed() @pytest.mark.skip(reason='skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/11164') @pytest.mark.serial def test_reshape_pooling2d_slice_pooling2d(): @@ -2687,7 +2589,6 @@ def hybrid_forward(self, F, x): net = Net(shape, slice, pooling_layers[i], pooling_layers[j]) check_layer_forward_withinput(net, x) -@with_seed() @pytest.mark.skip(reason='skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/11164') @pytest.mark.serial def test_reshape_deconv(): @@ -2706,7 +2607,6 @@ def hybrid_forward(self, F, x): net = Net(shape) check_layer_forward_withinput(net, x) -@with_seed() @pytest.mark.skip(reason='skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/11164') @pytest.mark.serial def test_slice_deconv(): @@ -2725,7 +2625,6 @@ def hybrid_forward(self, F, x): net = Net(slice) check_layer_forward_withinput(net, x) -@with_seed() @pytest.mark.skip(reason='skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/11164') @pytest.mark.serial def test_reshape_deconv_reshape_deconv(): @@ -2748,7 +2647,6 @@ def hybrid_forward(self, F, x): net = Net(shape) check_layer_forward_withinput(net, x) -@with_seed() @pytest.mark.skip(reason='skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/11164') @pytest.mark.serial def test_slice_deconv_slice_deconv(): @@ -2771,7 +2669,6 @@ def hybrid_forward(self, F, x): net = Net(slice) check_layer_forward_withinput(net, x) -@with_seed() @pytest.mark.skip(reason='skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/11164') @pytest.mark.serial def test_reshape_deconv_slice_deconv(): @@ -2796,7 +2693,6 @@ def hybrid_forward(self, F, x): net = Net(shape, slice) check_layer_forward_withinput(net, x) -@with_seed() @pytest.mark.skip(reason='skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/11164') @pytest.mark.serial def test_slice_deconv_reshape_deconv(): @@ -2821,7 +2717,6 @@ def hybrid_forward(self, F, x): net = Net(shape, slice) check_layer_forward_withinput(net, x) -@with_seed() @pytest.mark.serial def test_reshape_activation(): class Net(gluon.HybridBlock): @@ -2842,7 +2737,6 @@ def hybrid_forward(self, F, x): check_layer_forward_withinput(net, x) -@with_seed() @pytest.mark.serial def test_slice_activation(): class Net(gluon.HybridBlock): @@ -2864,7 +2758,6 @@ def hybrid_forward(self, F, x): check_layer_forward_withinput(net, x) -@with_seed() @pytest.mark.serial def test_reshape_activation_reshape_activation(): class Net(gluon.HybridBlock): @@ -2891,7 +2784,6 @@ def hybrid_forward(self, F, x): check_layer_forward_withinput(net, x) -@with_seed() @pytest.mark.serial def test_slice_activation_slice_activation(): class Net(gluon.HybridBlock): @@ -2918,7 +2810,6 @@ def hybrid_forward(self, F, x): check_layer_forward_withinput(net, x) -@with_seed() @pytest.mark.serial def test_reshape_activation_slice_activation(): class Net(gluon.HybridBlock): @@ -2947,7 +2838,6 @@ def hybrid_forward(self, F, x): check_layer_forward_withinput(net, x) -@with_seed() @pytest.mark.serial def test_slice_activation_reshape_activation(): class Net(gluon.HybridBlock): @@ -2975,7 +2865,6 @@ def hybrid_forward(self, F, x): net = Net(act0, act1, shape, slice) check_layer_forward_withinput(net, x) -@with_seed() @pytest.mark.serial def test_np_shape_parameters(): class Foo(gluon.Block): @@ -2992,7 +2881,6 @@ def forward(self, x): foo.initialize() print(foo(z).shape) -@with_seed() def test_gluon_param_load(): net = mx.gluon.nn.Dense(10, in_units=10) net.initialize() @@ -3001,7 +2889,6 @@ def test_gluon_param_load(): net.load_parameters('test_gluon_param_load.params', cast_dtype=True) mx.nd.waitall() -@with_seed() def test_gluon_param_load_dtype_source(): net = mx.gluon.nn.Dense(10, in_units=10) net.initialize() @@ -3012,7 +2899,6 @@ def test_gluon_param_load_dtype_source(): assert net.weight.dtype == np.float16 mx.nd.waitall() -@with_seed() def test_squeeze_consistency(): class Foo(gluon.HybridBlock): def __init__(self, inplace, **kwargs): @@ -3042,7 +2928,6 @@ def __init__(self, **kwargs): assert bl.param is not bl3.param assert bl.param.init == bl3.param.init -@with_seed() def test_reqs_switching_training_inference(): class Foo(gluon.HybridBlock): def __init__(self, **kwargs): @@ -3125,7 +3010,6 @@ def test_DeformableConvolution(): y = net(x) y.backward() -@with_seed() def test_ModulatedDeformableConvolution(): """test of the deformable convolution layer with possible combinations of arguments, currently this layer only supports gpu @@ -3182,7 +3066,6 @@ def test_concatenate(): x.wait_to_read() x2.wait_to_read() -@with_seed() def test_identity(): model = nn.Identity() x = mx.nd.random.uniform(shape=(128, 33, 64)) diff --git a/tests/python/unittest/test_gluon_data.py b/tests/python/unittest/test_gluon_data.py index 52502fb1379f..59c67e2b44f7 100644 --- a/tests/python/unittest/test_gluon_data.py +++ b/tests/python/unittest/test_gluon_data.py @@ -24,7 +24,6 @@ import random from mxnet import gluon import platform -from common import with_seed from mxnet.gluon.data import DataLoader import mxnet.ndarray as nd from mxnet import context @@ -32,7 +31,6 @@ from mxnet.gluon.data.dataset import ArrayDataset import pytest -@with_seed() def test_array_dataset(): X = np.random.uniform(size=(10, 20)) Y = np.random.uniform(size=(10,)) @@ -64,7 +62,6 @@ def prepare_record(tmpdir_factory): return str(test_images.join('test.rec')) -@with_seed() def test_recordimage_dataset(prepare_record): recfile = prepare_record fn = lambda x, y : (x, y) @@ -75,7 +72,6 @@ def test_recordimage_dataset(prepare_record): assert x.shape[0] == 1 and x.shape[3] == 3 assert y.asscalar() == i -@with_seed() def test_recordimage_dataset_handle(prepare_record): recfile = prepare_record class TmpTransform(mx.gluon.HybridBlock): @@ -97,7 +93,6 @@ def _dataset_transform_first_fn(x): """Named transform function since lambda function cannot be pickled.""" return x -@with_seed() def test_recordimage_dataset_with_data_loader_multiworker(prepare_record): recfile = prepare_record dataset = gluon.data.vision.ImageRecordDataset(recfile) @@ -123,7 +118,6 @@ def test_recordimage_dataset_with_data_loader_multiworker(prepare_record): assert x.shape[0] == 1 and x.shape[3] == 3 assert y.asscalar() == i -@with_seed() def test_sampler(): seq_sampler = gluon.data.SequentialSampler(10) assert list(seq_sampler) == list(range(10)) @@ -136,7 +130,6 @@ def test_sampler(): rand_batch_keep = gluon.data.BatchSampler(rand_sampler, 3, 'keep') assert sorted(sum(list(rand_batch_keep), [])) == list(range(10)) -@with_seed() def test_datasets(tmpdir): p = tmpdir.mkdir("test_datasets") assert len(gluon.data.vision.MNIST(root=str(p.join('mnist')))) == 60000 @@ -149,7 +142,6 @@ def test_datasets(tmpdir): assert len(gluon.data.vision.CIFAR100(root=str(p.join('cifar100')), fine_label=True)) == 50000 assert len(gluon.data.vision.CIFAR100(root=str(p.join('cifar100')), train=False)) == 10000 -@with_seed() def test_datasets_handles(tmpdir): p = tmpdir.mkdir("test_datasets_handles") assert len(gluon.data.vision.MNIST(root=str(p.join('mnist'))).__mx_handle__()) == 60000 @@ -162,13 +154,11 @@ def test_datasets_handles(tmpdir): assert len(gluon.data.vision.CIFAR100(root=str(p.join('cifar100')), fine_label=True).__mx_handle__()) == 50000 assert len(gluon.data.vision.CIFAR100(root=str(p.join('cifar100')), train=False).__mx_handle__()) == 10000 -@with_seed() def test_image_folder_dataset(prepare_record): dataset = gluon.data.vision.ImageFolderDataset(os.path.dirname(prepare_record)) assert dataset.synsets == ['test_images'] assert len(dataset.items) == 16 -@with_seed() def test_image_folder_dataset_handle(prepare_record): dataset = gluon.data.vision.ImageFolderDataset(os.path.dirname(prepare_record)) hd = dataset.__mx_handle__() @@ -176,7 +166,6 @@ def test_image_folder_dataset_handle(prepare_record): assert (hd[1][0] == dataset[1][0]).asnumpy().all() assert hd[5][1] == dataset[5][1] -@with_seed() def test_image_list_dataset(prepare_record): root = os.path.join(os.path.dirname(prepare_record), 'test_images') imlist = os.listdir(root) @@ -200,7 +189,6 @@ def test_image_list_dataset(prepare_record): assert len(img.shape) == 3 assert label == 0 -@with_seed() def test_image_list_dataset_handle(prepare_record): root = os.path.join(os.path.dirname(prepare_record), 'test_images') imlist = os.listdir(root) @@ -224,7 +212,6 @@ def test_image_list_dataset_handle(prepare_record): assert len(img.shape) == 3 assert label == 0 -@with_seed() @pytest.mark.garbage_expected def test_list_dataset(): for num_worker in range(0, 3): @@ -239,7 +226,6 @@ def __len__(self): def __getitem__(self, key): return mx.nd.full((10,), key) -@with_seed() @pytest.mark.garbage_expected def test_multi_worker(): data = _Dataset() @@ -249,7 +235,6 @@ def test_multi_worker(): assert (batch.asnumpy() == i).all() -@with_seed() def test_multi_worker_shape(): for thread_pool in [True, False]: batch_size = 1024 @@ -335,7 +320,6 @@ def _batchify(data): nd.array(labels, dtype=labels.dtype, ctx=context.Context('cpu_shared', 0)), nd.array(y_lens, ctx=context.Context('cpu_shared', 0))) -@with_seed() def test_multi_worker_forked_data_loader(): data = _Dummy(False) loader = DataLoader(data, batch_size=40, batchify_fn=_batchify, num_workers=2) @@ -349,7 +333,6 @@ def test_multi_worker_forked_data_loader(): for i, data in enumerate(loader): pass -@with_seed() def test_multi_worker_dataloader_release_pool(): # will trigger too many open file if pool is not released properly if os.name == 'nt': @@ -364,7 +347,6 @@ def test_multi_worker_dataloader_release_pool(): del the_iter del D -@with_seed() def test_dataloader_context(): X = np.random.uniform(size=(10, 20)) dataset = gluon.data.ArrayDataset(X) diff --git a/tests/python/unittest/test_gluon_data_vision.py b/tests/python/unittest/test_gluon_data_vision.py index ec0ef40436e8..a93b997cd2f2 100644 --- a/tests/python/unittest/test_gluon_data_vision.py +++ b/tests/python/unittest/test_gluon_data_vision.py @@ -25,13 +25,11 @@ from mxnet.gluon.data.vision import transforms from mxnet import image from mxnet.test_utils import * -from common import assertRaises, with_seed, \ - xfail_when_nonstandard_decimal_separator +from common import assertRaises, xfail_when_nonstandard_decimal_separator import numpy as np import pytest -@with_seed() def test_to_tensor(): # 3D Input data_in = np.random.uniform(0, 255, (300, 300, 3)).astype(dtype=np.uint8) @@ -60,7 +58,6 @@ def test_to_tensor(): assert same(out_nd.asnumpy(), np.transpose(np.ones(data_in.shape, dtype=np.float32), (2, 0, 1))) -@with_seed() def test_normalize(): # 3D Input data_in_3d = nd.random.uniform(0, 1, (3, 300, 300)) @@ -94,7 +91,6 @@ def test_normalize(): assertRaises(MXNetError, normalize_transformer, invalid_data_in) -@with_seed() def test_resize(): def _test_resize_with_diff_type(dtype): # test normal case @@ -131,7 +127,6 @@ def _test_resize_with_diff_type(dtype): _test_resize_with_diff_type(dtype) -@with_seed() def test_crop_resize(): def _test_crop_resize_with_diff_type(dtype): # test normal case @@ -193,7 +188,6 @@ def test_crop_backward(test_nd_arr, TestCase): test_crop_backward(data_in, test_case) -@with_seed() def test_flip_left_right(): for width in range(3, 301, 7): data_in = np.random.uniform(0, 255, (300, width, 3)).astype(dtype=np.uint8) @@ -202,7 +196,6 @@ def test_flip_left_right(): assert_almost_equal(flip_in, data_trans.asnumpy()) -@with_seed() def test_flip_top_bottom(): for height in range(3, 301, 7): data_in = np.random.uniform(0, 255, (height, 300, 3)).astype(dtype=np.uint8) @@ -211,7 +204,6 @@ def test_flip_top_bottom(): assert_almost_equal(flip_in, data_trans.asnumpy()) -@with_seed() def test_transformer(): from mxnet.gluon.data.vision import transforms @@ -234,19 +226,16 @@ def test_transformer(): transform(mx.nd.ones((245, 480, 3), dtype='uint8')).wait_to_read() -@with_seed() def test_random_crop(): x = mx.nd.ones((245, 480, 3), dtype='uint8') y = mx.nd.image.random_crop(x, width=100, height=100) assert y.shape == (100, 100, 3) -@with_seed() def test_random_resize_crop(): x = mx.nd.ones((245, 480, 3), dtype='uint8') y = mx.nd.image.random_resized_crop(x, width=100, height=100) assert y.shape == (100, 100, 3) -@with_seed() def test_hybrid_transformer(): from mxnet.gluon.data.vision import transforms @@ -267,7 +256,6 @@ def test_hybrid_transformer(): transform(mx.nd.ones((245, 480, 3), dtype='uint8')).wait_to_read() -@with_seed() def test_rotate(): transformer = transforms.Rotate(10.) assertRaises(TypeError, transformer, mx.nd.ones((3, 30, 60), dtype='uint8')) @@ -302,7 +290,6 @@ def test_rotate(): assert_almost_equal(ans, expected_result, atol=1e-6) -@with_seed() def test_random_rotation(): # test exceptions for probability input outside of [0,1] assertRaises(ValueError, transforms.RandomRotation, [-10, 10.], rotate_with_proba=1.1) @@ -323,7 +310,6 @@ def test_random_rotation(): @xfail_when_nonstandard_decimal_separator -@with_seed() def test_rotate(): transformer = transforms.Rotate(10.) assertRaises(TypeError, transformer, mx.nd.ones((3, 30, 60), dtype='uint8')) @@ -358,7 +344,6 @@ def test_rotate(): assert_almost_equal(ans, expected_result, atol=1e-6) -@with_seed() def test_random_rotation(): # test exceptions for probability input outside of [0,1] assertRaises(ValueError, transforms.RandomRotation, [-10, 10.], rotate_with_proba=1.1) @@ -378,7 +363,6 @@ def test_random_rotation(): assert_almost_equal(data, transformer(data)) -@with_seed() def test_random_transforms(): from mxnet.gluon.data.vision import transforms @@ -397,7 +381,6 @@ def transform_fn(x): assert counter == pytest.approx(5000, 1e-1) @xfail_when_nonstandard_decimal_separator -@with_seed() def test_random_gray(): from mxnet.gluon.data.vision import transforms @@ -424,7 +407,6 @@ def test_random_gray(): num_apply += 1 assert_almost_equal(num_apply/float(iteration), 0.5, 0.1) -@with_seed() def test_bbox_random_flip(): from mxnet.gluon.contrib.data.vision.transforms.bbox import ImageBboxRandomFlipLeftRight @@ -440,7 +422,6 @@ def test_bbox_random_flip(): num_apply += 1 assert_almost_equal(np.array([num_apply])/float(iteration), 0.5, 0.5) -@with_seed() def test_bbox_crop(): from mxnet.gluon.contrib.data.vision.transforms.bbox import ImageBboxCrop diff --git a/tests/python/unittest/test_gluon_model_zoo.py b/tests/python/unittest/test_gluon_model_zoo.py index eb9082a2bc87..cc2623aee22d 100644 --- a/tests/python/unittest/test_gluon_model_zoo.py +++ b/tests/python/unittest/test_gluon_model_zoo.py @@ -19,7 +19,6 @@ import mxnet as mx from mxnet.gluon.model_zoo.vision import get_model import sys -from common import with_seed import multiprocessing import pytest @@ -28,7 +27,6 @@ def eprint(*args, **kwargs): print(*args, file=sys.stderr, **kwargs) -@with_seed() @pytest.mark.parametrize('model_name', [ 'resnet18_v1', 'resnet34_v1', 'resnet50_v1', 'resnet101_v1', 'resnet152_v1', 'resnet18_v2', 'resnet34_v2', 'resnet50_v2', 'resnet101_v2', 'resnet152_v2', @@ -56,7 +54,6 @@ def parallel_download(model_name): model = get_model(model_name, pretrained=True, root='./parallel_download') print(type(model)) -@with_seed() @pytest.mark.skip(reason='MXNet is not yet safe for forking. Tracked in #17782.') def test_parallel_download(): processes = [] diff --git a/tests/python/unittest/test_gluon_probability_v1.py b/tests/python/unittest/test_gluon_probability_v1.py index fe1a2c7d4fd0..b0f52b164862 100644 --- a/tests/python/unittest/test_gluon_probability_v1.py +++ b/tests/python/unittest/test_gluon_probability_v1.py @@ -26,7 +26,6 @@ from mxnet.gluon import HybridBlock from mxnet.test_utils import use_np, assert_almost_equal -from common import with_seed from numpy.testing import assert_array_equal import pytest import scipy.stats as ss @@ -72,7 +71,6 @@ def test_mgp_getF_v1(): getF(sym.ones((2, 2)), nd.ones((2, 2))) -@with_seed() @use_np def test_gluon_uniform_v1(): class TestUniform(HybridBlock): @@ -143,7 +141,6 @@ def hybrid_forward(self, F, low, high, *args): rtol=1e-3, use_broadcast=False) -@with_seed() @use_np def test_gluon_normal_v1(): class TestNormal(HybridBlock): @@ -213,7 +210,6 @@ def hybrid_forward(self, F, loc, scale, *args): rtol=1e-3, use_broadcast=False) -@with_seed() @use_np def test_gluon_laplace_v1(): class TestLaplace(HybridBlock): @@ -283,7 +279,6 @@ def hybrid_forward(self, F, loc, scale, *args): rtol=1e-3, use_broadcast=False) -@with_seed() @use_np def test_gluon_cauchy_v1(): class TestCauchy(HybridBlock): @@ -365,7 +360,6 @@ def hybrid_forward(self, F, loc, scale, *args): rtol=1e-3, use_broadcast=False) -@with_seed() @use_np def test_gluon_half_cauchy_v1(): class TestHalfCauchy(HybridBlock): @@ -427,7 +421,6 @@ def hybrid_forward(self, F, scale, *args): rtol=1e-3, use_broadcast=False) -@with_seed() @use_np def test_gluon_poisson_v1(): class TestPoisson(HybridBlock): @@ -462,7 +455,6 @@ def hybrid_forward(self, F, rate, *args): rtol=1e-3, use_broadcast=False) -@with_seed() @use_np def test_gluon_geometric_v1(): class TestGeometric(HybridBlock): @@ -522,7 +514,6 @@ def hybrid_forward(self, F, params, *args): rtol=1e-3, use_broadcast=False) -@with_seed() @use_np def test_gluon_negative_binomial_v1(): class TestNegativeBinomial(HybridBlock): @@ -576,7 +567,6 @@ def hybrid_forward(self, F, n, params, *args): rtol=1e-3, use_broadcast=False) -@with_seed() @use_np def test_gluon_exponential_v1(): class TestExponential(HybridBlock): @@ -637,7 +627,6 @@ def hybrid_forward(self, F, scale, *args): rtol=1e-3, use_broadcast=False) -@with_seed() @use_np def test_gluon_weibull_v1(): class TestWeibull(HybridBlock): @@ -707,7 +696,6 @@ def hybrid_forward(self, F, concentration, scale, *args): rtol=1e-3, use_broadcast=False) -@with_seed() @use_np def test_gluon_pareto_v1(): class TestPareto(HybridBlock): @@ -776,7 +764,6 @@ def hybrid_forward(self, F, alpha, scale, *args): rtol=1e-3, use_broadcast=False) -@with_seed() @use_np def test_gluon_gamma_v1(): class TestGamma(HybridBlock): @@ -825,7 +812,6 @@ def hybrid_forward(self, F, shape, scale, *args): rtol=1e-3, use_broadcast=False) -@with_seed() @use_np def test_gluon_dirichlet_v1(): class TestDirichlet(HybridBlock): @@ -895,7 +881,6 @@ def hybrid_forward(self, F, alpha, *args): rtol=1e-3, use_broadcast=False) -@with_seed() @use_np def test_gluon_beta_v1(): class TestBeta(HybridBlock): @@ -943,7 +928,6 @@ def hybrid_forward(self, F, alpha, beta, *args): rtol=1e-3, use_broadcast=False) -@with_seed() @use_np def test_gluon_fisher_snedecor_v1(): class TestFisherSnedecor(HybridBlock): @@ -989,7 +973,6 @@ def hybrid_forward(self, F, df1, df2, *args): rtol=1e-3, use_broadcast=False) -@with_seed() @use_np def test_gluon_student_t_v1(): class TestT(HybridBlock): @@ -1039,7 +1022,6 @@ def hybrid_forward(self, F, df, loc, scale, *args): rtol=1e-3, use_broadcast=False) -@with_seed() @use_np def test_gluon_gumbel_v1(): class TestGumbel(HybridBlock): @@ -1109,7 +1091,6 @@ def hybrid_forward(self, F, loc, scale, *args): rtol=1e-3, use_broadcast=False) -@with_seed() @use_np def test_gluon_multinomial_v1(): class TestMultinomial(HybridBlock): @@ -1201,7 +1182,6 @@ def one_hot(a, num_classes): assert mx_out.shape == desired_shape -@with_seed() @use_np def test_gluon_binomial_v1(): class TestBinomial(HybridBlock): @@ -1280,7 +1260,6 @@ def hybrid_forward(self, F, params, *args): rtol=1e-3, use_broadcast=False) -@with_seed() @use_np def test_gluon_bernoulli_v1(): class TestBernoulli(HybridBlock): @@ -1341,7 +1320,6 @@ def hybrid_forward(self, F, params, *args): rtol=1e-3, use_broadcast=False) -@with_seed() @use_np def test_relaxed_bernoulli_v1(): class TestRelaxedBernoulli(HybridBlock): @@ -1392,7 +1370,6 @@ def prob_to_logit(prob): assert mx_out.shape == desired_shape -@with_seed() @use_np def test_gluon_categorical_v1(): class TestCategorical(HybridBlock): @@ -1502,7 +1479,6 @@ def hybrid_forward(self, F, params, *args): assert mx_out.shape == desired_shape -@with_seed() @use_np def test_gluon_one_hot_categorical_v1(): def one_hot(a, num_classes): @@ -1584,7 +1560,6 @@ def hybrid_forward(self, F, params, *args): desired_shape + (event_shape,) -@with_seed() @use_np def test_relaxed_one_hot_categorical_v1(): class TestRelaxedOneHotCategorical(HybridBlock): @@ -1653,7 +1628,6 @@ def hybrid_forward(self, F, params, *args): assert mx_out.shape == desired_shape -@with_seed() @use_np def test_gluon_mvn_v1(): class TestMVN(HybridBlock): @@ -1772,7 +1746,6 @@ def _stable_inv(cov): rtol=1e-3, use_broadcast=False) -@with_seed() @use_np def test_gluon_half_normal_v1(): class TestHalfNormal(HybridBlock): @@ -1834,7 +1807,6 @@ def hybrid_forward(self, F, scale, *args): rtol=1e-3, use_broadcast=False) -@with_seed() @use_np def test_affine_transform_v1(): r""" @@ -1898,7 +1870,6 @@ def hybrid_forward(self, F, loc, scale, *args): assert mx_out.shape == expected_shape -@with_seed() @use_np def test_compose_transform_v1(): class TestComposeTransform(HybridBlock): @@ -2010,7 +1981,6 @@ def hybrid_forward(self, F, logit, *args): assert mx_out.shape == batch_shape -@with_seed() @use_np def test_gluon_kl_v1(): def _test_zero_kl(p, shape): @@ -2204,7 +2174,6 @@ def alpha(): return np.random.uniform( @pytest.mark.garbage_expected -@with_seed() @use_np def test_gluon_stochastic_block_v1(): class dummyBlock(StochasticBlock): @@ -2235,7 +2204,6 @@ def hybrid_forward(self, F, loc, scale): assert l2_norm.shape == shape[:-1] -@with_seed() @use_np def test_gluon_stochastic_block_exception_v1(): class problemBlock(StochasticBlock): @@ -2259,7 +2227,6 @@ def hybrid_forward(self, F, loc, scale): @pytest.mark.garbage_expected -@with_seed() @use_np def test_gluon_stochastic_sequential_v1(): class normalBlock(HybridBlock): @@ -2314,7 +2281,6 @@ def hybrid_forward(self, F, x): mx_out = net(initial_value).asnumpy() -@with_seed() @use_np def test_gluon_constraint_v1(): class TestConstraint(HybridBlock): @@ -2384,7 +2350,6 @@ def hybrid_forward(self, F, *params): assert_almost_equal(mx_out, test_sample.asnumpy()) -@with_seed() @use_np def test_gluon_domain_map_v1(): class TestDomainMap(HybridBlock): diff --git a/tests/python/unittest/test_gluon_probability_v2.py b/tests/python/unittest/test_gluon_probability_v2.py index 50eaa5d294ae..6d3e31412de0 100644 --- a/tests/python/unittest/test_gluon_probability_v2.py +++ b/tests/python/unittest/test_gluon_probability_v2.py @@ -26,7 +26,6 @@ from mxnet.gluon import HybridBlock from mxnet.test_utils import use_np, assert_almost_equal -from common import with_seed from numpy.testing import assert_array_equal import pytest import scipy.stats as ss @@ -72,7 +71,6 @@ def test_mgp_getF(): getF(sym.ones((2, 2)), nd.ones((2, 2))) -@with_seed() @use_np def test_gluon_uniform(): class TestUniform(HybridBlock): @@ -143,7 +141,6 @@ def forward(self, low, high, *args): rtol=1e-3, use_broadcast=False) -@with_seed() @use_np def test_gluon_normal(): class TestNormal(HybridBlock): @@ -213,7 +210,6 @@ def forward(self, loc, scale, *args): rtol=1e-3, use_broadcast=False) -@with_seed() @use_np def test_gluon_laplace(): class TestLaplace(HybridBlock): @@ -283,7 +279,6 @@ def forward(self, loc, scale, *args): rtol=1e-3, use_broadcast=False) -@with_seed() @use_np def test_gluon_cauchy(): class TestCauchy(HybridBlock): @@ -365,7 +360,6 @@ def forward(self, loc, scale, *args): rtol=1e-3, use_broadcast=False) -@with_seed() @use_np def test_gluon_half_cauchy(): class TestHalfCauchy(HybridBlock): @@ -427,7 +421,6 @@ def forward(self, scale, *args): rtol=1e-3, use_broadcast=False) -@with_seed() @use_np def test_gluon_poisson(): class TestPoisson(HybridBlock): @@ -462,7 +455,6 @@ def forward(self, rate, *args): rtol=1e-3, use_broadcast=False) -@with_seed() @use_np def test_gluon_geometric(): class TestGeometric(HybridBlock): @@ -522,7 +514,6 @@ def forward(self, params, *args): rtol=1e-3, use_broadcast=False) -@with_seed() @use_np def test_gluon_negative_binomial(): class TestNegativeBinomial(HybridBlock): @@ -576,7 +567,6 @@ def forward(self, n, params, *args): rtol=1e-3, use_broadcast=False) -@with_seed() @use_np def test_gluon_exponential(): class TestExponential(HybridBlock): @@ -637,7 +627,6 @@ def forward(self, scale, *args): rtol=1e-3, use_broadcast=False) -@with_seed() @use_np def test_gluon_weibull(): class TestWeibull(HybridBlock): @@ -707,7 +696,6 @@ def forward(self, concentration, scale, *args): rtol=1e-3, use_broadcast=False) -@with_seed() @use_np def test_gluon_pareto(): class TestPareto(HybridBlock): @@ -776,7 +764,6 @@ def forward(self, alpha, scale, *args): rtol=1e-3, use_broadcast=False) -@with_seed() @use_np def test_gluon_gamma(): class TestGamma(HybridBlock): @@ -825,7 +812,6 @@ def forward(self, shape, scale, *args): rtol=1e-3, use_broadcast=False) -@with_seed() @use_np def test_gluon_dirichlet(): class TestDirichlet(HybridBlock): @@ -895,7 +881,6 @@ def forward(self, alpha, *args): rtol=1e-3, use_broadcast=False) -@with_seed() @use_np def test_gluon_beta(): class TestBeta(HybridBlock): @@ -943,7 +928,6 @@ def forward(self, alpha, beta, *args): rtol=1e-3, use_broadcast=False) -@with_seed() @use_np def test_gluon_fisher_snedecor(): class TestFisherSnedecor(HybridBlock): @@ -989,7 +973,6 @@ def forward(self, df1, df2, *args): rtol=1e-3, use_broadcast=False) -@with_seed() @use_np def test_gluon_student_t(): class TestT(HybridBlock): @@ -1039,7 +1022,6 @@ def forward(self, df, loc, scale, *args): rtol=1e-3, use_broadcast=False) -@with_seed() @use_np def test_gluon_gumbel(): class TestGumbel(HybridBlock): @@ -1109,7 +1091,6 @@ def forward(self, loc, scale, *args): rtol=1e-3, use_broadcast=False) -@with_seed() @use_np def test_gluon_multinomial(): class TestMultinomial(HybridBlock): @@ -1201,7 +1182,6 @@ def one_hot(a, num_classes): assert mx_out.shape == desired_shape -@with_seed() @use_np def test_gluon_binomial(): class TestBinomial(HybridBlock): @@ -1280,7 +1260,6 @@ def forward(self, params, *args): rtol=1e-3, use_broadcast=False) -@with_seed() @use_np def test_gluon_bernoulli(): class TestBernoulli(HybridBlock): @@ -1341,7 +1320,6 @@ def forward(self, params, *args): rtol=1e-3, use_broadcast=False) -@with_seed() @use_np def test_relaxed_bernoulli(): class TestRelaxedBernoulli(HybridBlock): @@ -1392,7 +1370,6 @@ def prob_to_logit(prob): assert mx_out.shape == desired_shape -@with_seed() @use_np def test_gluon_categorical(): class TestCategorical(HybridBlock): @@ -1502,7 +1479,6 @@ def forward(self, params, *args): assert mx_out.shape == desired_shape -@with_seed() @use_np def test_gluon_one_hot_categorical(): def one_hot(a, num_classes): @@ -1584,7 +1560,6 @@ def forward(self, params, *args): desired_shape + (event_shape,) -@with_seed() @use_np def test_relaxed_one_hot_categorical(): class TestRelaxedOneHotCategorical(HybridBlock): @@ -1653,7 +1628,6 @@ def forward(self, params, *args): assert mx_out.shape == desired_shape -@with_seed() @use_np def test_gluon_mvn(): class TestMVN(HybridBlock): @@ -1772,7 +1746,6 @@ def _stable_inv(cov): rtol=1e-3, use_broadcast=False) -@with_seed() @use_np def test_gluon_half_normal(): class TestHalfNormal(HybridBlock): @@ -1834,7 +1807,6 @@ def forward(self, scale, *args): rtol=1e-3, use_broadcast=False) -@with_seed() @use_np def test_affine_transform(): r""" @@ -1898,7 +1870,6 @@ def forward(self, loc, scale, *args): assert mx_out.shape == expected_shape -@with_seed() @use_np def test_compose_transform(): class TestComposeTransform(HybridBlock): @@ -2010,7 +1981,6 @@ def forward(self, logit, *args): assert mx_out.shape == batch_shape -@with_seed() @use_np def test_gluon_kl(): def _test_zero_kl(p, shape): @@ -2205,7 +2175,6 @@ def alpha(): return np.random.uniform( @pytest.mark.garbage_expected -@with_seed() @use_np def test_gluon_stochastic_block(): class dummyBlock(StochasticBlock): @@ -2238,7 +2207,6 @@ def forward(self, loc, scale): net.export('dummyBlock', epoch=0) -@with_seed() @use_np def test_gluon_stochastic_block_exception(): class problemBlock(StochasticBlock): @@ -2262,7 +2230,6 @@ def forward(self, loc, scale): @pytest.mark.garbage_expected -@with_seed() @use_np def test_gluon_stochastic_sequential(): class normalBlock(HybridBlock): @@ -2317,7 +2284,6 @@ def forward(self, x): mx_out = net(initial_value).asnumpy() -@with_seed() @use_np def test_gluon_domain_map(): class TestDomainMap(HybridBlock): diff --git a/tests/python/unittest/test_gluon_rnn.py b/tests/python/unittest/test_gluon_rnn.py index d60829632bea..49921413ad5f 100644 --- a/tests/python/unittest/test_gluon_rnn.py +++ b/tests/python/unittest/test_gluon_rnn.py @@ -24,7 +24,7 @@ from numpy.testing import assert_allclose import pytest from mxnet.test_utils import almost_equal, assert_almost_equal, default_context -from common import assert_raises_cudnn_not_satisfied, with_seed, retry +from common import assert_raises_cudnn_not_satisfied, retry def check_rnn_states(fused_states, stack_states, num_layers, bidirectional=False, is_lstm=True): @@ -76,7 +76,6 @@ def test_lstm(): assert outs == [(10, 100), (10, 100), (10, 100)] -@with_seed() @assert_raises_cudnn_not_satisfied(min_version='7.2.1') @pytest.mark.serial def test_lstmp(): @@ -338,7 +337,6 @@ def test_bidirectional(): @assert_raises_cudnn_not_satisfied(min_version='5.1.10') -@with_seed() @pytest.mark.serial def test_layer_bidirectional(): class RefBiLSTM(gluon.Block): @@ -760,7 +758,6 @@ def check_rnn_bidir_layer_gradients(mode, input_size, hidden_size, num_layers, l check_rnn_consistency(fused_layer, stack_layer, loss, input_size, hidden_size, bidirectional=True) -@with_seed() @assert_raises_cudnn_not_satisfied(min_version='5.1.10') def test_fused_lstm_layer(): input_sizes = [8] @@ -772,7 +769,6 @@ def test_fused_lstm_layer(): check_rnn_bidir_layer_gradients('lstm', input_size, hidden_size, num_layers, loss) -@with_seed() @assert_raises_cudnn_not_satisfied(min_version='5.1.10') def test_fused_gru_layer(): input_sizes = [8] @@ -784,7 +780,6 @@ def test_fused_gru_layer(): check_rnn_bidir_layer_gradients('gru', input_size, hidden_size, num_layers, loss) -@with_seed() @assert_raises_cudnn_not_satisfied(min_version='5.1.10') def test_fused_rnnrelu_layer(): input_sizes = [8] @@ -796,7 +791,6 @@ def test_fused_rnnrelu_layer(): check_rnn_bidir_layer_gradients('rnn_relu', input_size, hidden_size, num_layers, loss) -@with_seed() @assert_raises_cudnn_not_satisfied(min_version='5.1.10') def test_fused_rnntanh_layer(): input_sizes = [8] @@ -937,7 +931,6 @@ def check_rnn_forward(layer, inputs): mx.nd.waitall() -@with_seed() def test_rnn_cells(): check_rnn_forward(gluon.rnn.Conv1DLSTMCell((5, 7), 10, (3,), (3,)), mx.nd.ones((8, 3, 5, 7))) @@ -953,7 +946,6 @@ def test_rnn_cells(): check_rnn_forward(net, mx.nd.ones((8, 3, 5, 7))) -@with_seed() def test_convrnn(): cell = gluon.rnn.Conv1DRNNCell((10, 50), 100, 3, 3) check_rnn_cell(cell, in_shape=(1, 10, 50), out_shape=(1, 100, 48)) @@ -965,7 +957,6 @@ def test_convrnn(): check_rnn_cell(cell, in_shape=(1, 10, 20, 30, 50), out_shape=(1, 100, 18, 28, 48)) -@with_seed() def test_convlstm(): cell = gluon.rnn.Conv1DLSTMCell((10, 50), 100, 3, 3) check_rnn_cell(cell, in_shape=(1, 10, 50), out_shape=(1, 100, 48)) @@ -977,7 +968,6 @@ def test_convlstm(): check_rnn_cell(cell, in_shape=(1, 10, 20, 30, 50), out_shape=(1, 100, 18, 28, 48)) -@with_seed() def test_convgru(): cell = gluon.rnn.Conv1DGRUCell((10, 50), 100, 3, 3) check_rnn_cell(cell, in_shape=(1, 10, 50), out_shape=(1, 100, 48)) @@ -989,7 +979,6 @@ def test_convgru(): check_rnn_cell(cell, in_shape=(1, 10, 20, 30, 50), out_shape=(1, 100, 18, 28, 48)) -@with_seed() def test_conv_fill_shape(): cell = gluon.rnn.Conv1DLSTMCell((0, 7), 10, (3,), (3,)) cell.hybridize() @@ -997,7 +986,6 @@ def test_conv_fill_shape(): assert cell.i2h_weight.shape[1] == 5, cell.i2h_weight.shape[1] -@with_seed() def test_lstmp(): nhid = 100 nproj = 64 @@ -1014,7 +1002,6 @@ def test_lstmp(): assert outs == [(10, nproj), (10, nproj), (10, nproj)] -@with_seed() def test_vardrop(): def check_vardrop(drop_inputs, drop_states, drop_outputs): cell = gluon.rnn.VariationalDropoutCell(mx.gluon.rnn.RNNCell(100), @@ -1049,7 +1036,6 @@ def check_vardrop(drop_inputs, drop_states, drop_outputs): check_vardrop(0.5, 0, 0.5) -@with_seed() @pytest.mark.parametrize('cell_type,num_states', [ (gluon.rnn.RNNCell, 1), (gluon.rnn.LSTMCell, 2), diff --git a/tests/python/unittest/test_gluon_trainer.py b/tests/python/unittest/test_gluon_trainer.py index 983b5a54ceb8..4016da7283cf 100644 --- a/tests/python/unittest/test_gluon_trainer.py +++ b/tests/python/unittest/test_gluon_trainer.py @@ -22,7 +22,7 @@ from mxnet import gluon from mxnet.gluon import nn from mxnet.test_utils import assert_almost_equal -from common import with_seed, assertRaises, xfail_when_nonstandard_decimal_separator +from common import assertRaises, xfail_when_nonstandard_decimal_separator from copy import deepcopy import pytest @@ -31,7 +31,6 @@ def dict_equ(a, b): for k in a: assert (a[k].asnumpy() == b[k].asnumpy()).all() -@with_seed() def test_multi_trainer(): x = gluon.Parameter('x', shape=(10,), stype='row_sparse') x.initialize() @@ -46,7 +45,6 @@ def test_multi_trainer(): # multiple trainers for a sparse Parameter is not allowed trainer1 = gluon.Trainer([x], 'sgd') -@with_seed() def test_trainer_with_sparse_grad_on_single_context(): x = gluon.Parameter('x', shape=(10,), grad_stype='row_sparse') x.initialize(ctx=[mx.cpu(0)], init='zeros') @@ -61,7 +59,6 @@ def test_trainer_with_sparse_grad_on_single_context(): assert trainer._kvstore is None # No kvstore created for single-device training assert (x.data(mx.cpu(0)).asnumpy() == -1).all() -@with_seed() def test_trainer_with_teststore(): x = gluon.Parameter('x', shape=(10,)) x.initialize(ctx=[mx.cpu(0), mx.cpu(1)], init='zeros') @@ -80,7 +77,6 @@ def test_trainer_with_teststore(): invalid_trainer = gluon.Trainer([x], 'sgd', kvstore=kv, update_on_kvstore=True) pytest.raises(ValueError, invalid_trainer._init_kvstore) -@with_seed() def test_trainer(): x = gluon.Parameter('x', shape=(10,)) x.initialize(ctx=[mx.cpu(0), mx.cpu(1)], init='zeros') @@ -132,7 +128,6 @@ def test_trainer(): assert (x.data(mx.cpu(1)).asnumpy() == -1).all(), x.data(mx.cpu(1)).asnumpy() -@with_seed() def test_trainer_save_load(): previous_update_on_kvstore = os.getenv('MXNET_UPDATE_ON_KVSTORE', "1") os.putenv('MXNET_UPDATE_ON_KVSTORE', '1') @@ -153,7 +148,6 @@ def test_trainer_save_load(): assert trainer._kvstore._updater.optimizer._get_lr(0) == 0.2 os.putenv('MXNET_UPDATE_ON_KVSTORE', previous_update_on_kvstore) -@with_seed() def test_trainer_sparse_save_load(): x = gluon.Parameter('x', shape=(10, 1), lr_mult=1.0, stype='row_sparse', grad_stype='row_sparse') @@ -172,7 +166,6 @@ def test_trainer_sparse_save_load(): # check if parameter dict is correctly associated with optimizer after load_state assert trainer._kvstore._updater.optimizer._get_lr(0) == 0.2 -@with_seed() def test_trainer_multi_layer_init(): class Net(gluon.Block): def __init__(self, **kwargs): @@ -217,7 +210,6 @@ def check_init(ctxes): check_init([mx.cpu(1)]) @xfail_when_nonstandard_decimal_separator -@with_seed() def test_trainer_reset_kv(): def check_trainer_reset_kv(kv): x = gluon.Parameter('x', shape=(10,), lr_mult=1.0) @@ -252,7 +244,6 @@ def check_trainer_reset_kv(kv): check_trainer_reset_kv(kv) @xfail_when_nonstandard_decimal_separator -@with_seed() def test_trainer_sparse_kv(): def check_trainer_sparse_kv(kv, stype, grad_stype, update_on_kv, expected): x = mx.gluon.Parameter('x', shape=(10,1), lr_mult=1.0, stype=stype, grad_stype=grad_stype) @@ -289,7 +280,6 @@ def check_trainer_sparse_kv(kv, stype, grad_stype, update_on_kv, expected): check_trainer_sparse_kv(kv, 'row_sparse', 'row_sparse', None, True) check_trainer_sparse_kv(kv, 'row_sparse', 'row_sparse', False, ValueError) -@with_seed() def test_trainer_lr_sched(): x = gluon.Parameter('x', shape=(10,)) x.initialize(ctx=[mx.cpu(0), mx.cpu(1)], init='zeros') @@ -329,7 +319,6 @@ def test_trainer_lr_sched(): lr *= factor mx.nd.waitall() -@with_seed() def test_gluon_trainer_param_order(): net = mx.gluon.nn.Sequential() # layers may be added in a random order for all workers diff --git a/tests/python/unittest/test_higher_order_grad.py b/tests/python/unittest/test_higher_order_grad.py index ae3c33a4d9b7..ccdd650d604c 100644 --- a/tests/python/unittest/test_higher_order_grad.py +++ b/tests/python/unittest/test_higher_order_grad.py @@ -22,14 +22,13 @@ from operator import mul import random -from common import with_seed, xfail_when_nonstandard_decimal_separator +from common import xfail_when_nonstandard_decimal_separator import mxnet from mxnet import nd, autograd, gluon from mxnet.test_utils import ( assert_almost_equal, random_arrays, random_uniform_arrays, rand_shape_nd, same) -@with_seed() def test_sin(): def sin(x): return nd.sin(x) @@ -49,7 +48,6 @@ def grad_grad_grad_op(x): [grad_grad_op, grad_grad_grad_op], [2, 3]) -@with_seed() def test_cos(): def cos(x): return nd.cos(x) @@ -69,7 +67,6 @@ def grad_grad_grad_op(x): [grad_grad_op, grad_grad_grad_op], [2, 3]) -@with_seed() def test_tan(): def tan(x): return nd.tan(x) @@ -86,7 +83,6 @@ def grad_grad_op(x): check_second_order_unary(array, tan, grad_grad_op) -@with_seed() def test_sinh(): def sinh(x): return nd.sinh(x) @@ -100,7 +96,6 @@ def grad_grad_op(x): check_second_order_unary(array, sinh, grad_grad_op) -@with_seed() def test_cosh(): def cosh(x): return nd.cosh(x) @@ -114,7 +109,6 @@ def grad_grad_op(x): check_second_order_unary(array, cosh, grad_grad_op) -@with_seed() def test_tanh(): def tanh(x): return nd.tanh(x) @@ -133,7 +127,6 @@ def grad_grad_op(x): array, tanh, grad_grad_op, rtol=1e-6, atol=1e-5) -@with_seed() def test_arcsin(): def arcsin(x): return nd.arcsin(x) @@ -148,7 +141,6 @@ def grad_grad_op(x): check_second_order_unary(array, arcsin, grad_grad_op) -@with_seed() def test_arccos(): def arccos(x): return nd.arccos(x) @@ -164,7 +156,6 @@ def grad_grad_op(x): @xfail_when_nonstandard_decimal_separator -@with_seed() def test_arctan(): def arctan(x): return nd.arctan(x) @@ -181,7 +172,6 @@ def grad_grad_op(x): check_second_order_unary(array, arctan, grad_grad_op) -@with_seed() def test_arcsinh(): def arcsinh(x): return nd.arcsinh(x) @@ -195,7 +185,6 @@ def grad_grad_op(x): check_second_order_unary(array, arcsinh, grad_grad_op) -@with_seed() def test_arccosh(): def arccosh(x): return nd.arccosh(x) @@ -216,7 +205,6 @@ def grad_grad_op(x): @xfail_when_nonstandard_decimal_separator -@with_seed() def test_arctanh(): def arctanh(x): return nd.arctanh(x) @@ -231,7 +219,6 @@ def grad_grad_op(x): check_second_order_unary(array, arctanh, grad_grad_op) -@with_seed() def test_radians(): def radians(x): return nd.radians(x) @@ -245,7 +232,6 @@ def grad_grad_op(x): check_second_order_unary(array, radians, grad_grad_op) -@with_seed() def test_relu(): def relu(x): return nd.relu(x) @@ -259,7 +245,6 @@ def grad_grad_op(x): check_second_order_unary(array, relu, grad_grad_op) -@with_seed() def test_log(): def log(x): return nd.log(x) @@ -279,7 +264,6 @@ def grad_grad_op(x): @xfail_when_nonstandard_decimal_separator -@with_seed() def test_log2(): def log2(x): return nd.log2(x) @@ -294,7 +278,6 @@ def grad_grad_op(x): @xfail_when_nonstandard_decimal_separator -@with_seed() def test_log10(): def log10(x): return nd.log10(x) @@ -309,7 +292,6 @@ def grad_grad_op(x): @xfail_when_nonstandard_decimal_separator -@with_seed() def test_square(): def grad_grad_op(x): return nd.ones_like(x) * 2 @@ -320,7 +302,6 @@ def grad_grad_op(x): check_second_order_unary(array, nd.square, grad_grad_op) -@with_seed() def test_expm1(): def grad_grad_op(x): return nd.exp(x) @@ -331,7 +312,6 @@ def grad_grad_op(x): check_second_order_unary(array, nd.expm1, grad_grad_op) -@with_seed() def test_log1p(): def grad_grad_op(x): return -1/((1+x)**2) @@ -342,7 +322,6 @@ def grad_grad_op(x): check_second_order_unary(array, nd.log1p, grad_grad_op) -@with_seed() def test_reciprocal(): def reciprocal(x): return nd.reciprocal(x) @@ -356,7 +335,6 @@ def grad_grad_op(x): check_second_order_unary(array, reciprocal, grad_grad_op) -@with_seed() def test_abs(): def abs(x): return nd.abs(x) @@ -370,7 +348,6 @@ def grad_grad_op(x): check_second_order_unary(array, abs, grad_grad_op) -@with_seed() def test_clip(): def clip(x): a_min, a_max = sorted([random.random(), random.random()]) @@ -386,7 +363,6 @@ def grad_grad_op(x): check_second_order_unary(array, clip, grad_grad_op) -@with_seed() def test_dropout(): def dropout(x): return nd.Dropout(x) @@ -400,7 +376,6 @@ def grad_grad_op(x): check_second_order_unary(array, dropout, grad_grad_op) -@with_seed() def test_sigmoid(): def sigmoid(x): return nd.sigmoid(x) @@ -421,7 +396,6 @@ def grad_grad_op(x): @xfail_when_nonstandard_decimal_separator -@with_seed() def test_sqrt(): def sqrt(x): return nd.sqrt(x) @@ -441,7 +415,6 @@ def grad_grad_op(x): check_second_order_unary(array, sqrt, grad_grad_op) -@with_seed() def test_cbrt(): def cbrt(x): return nd.cbrt(x) @@ -462,7 +435,6 @@ def grad_grad_op(x): @xfail_when_nonstandard_decimal_separator -@with_seed() def test_rsqrt(): def rsqrt(x): return nd.rsqrt(x) @@ -483,7 +455,6 @@ def grad_grad_op(x): @xfail_when_nonstandard_decimal_separator -@with_seed() def test_rcbrt(): def rcbrt(x): return nd.rcbrt(x) @@ -622,7 +593,6 @@ def flatten2d_left(x): return x.reshape((s_0, s_1)) -@with_seed() def test_dense_backward_flatten(): print("2nd order gradient for Fully Connected, flatten=True") for x in NDArrayGenerator(4,2): @@ -666,7 +636,6 @@ def test_dense_backward_flatten(): assert x_grad_grad_check assert w_grad_grad_check -@with_seed() def test_dense_backward_no_flatten(): print("2nd order gradient for Fully Connected, flatten=False") for x in NDArrayGenerator(5,3): diff --git a/tests/python/unittest/test_image.py b/tests/python/unittest/test_image.py index d638946ea10a..2c61867d9c8f 100644 --- a/tests/python/unittest/test_image.py +++ b/tests/python/unittest/test_image.py @@ -20,7 +20,7 @@ import numpy as np import scipy.ndimage from mxnet.test_utils import * -from common import assertRaises, with_seed, xfail_when_nonstandard_decimal_separator +from common import xfail_when_nonstandard_decimal_separator import shutil import tempfile import unittest @@ -171,7 +171,6 @@ def test_scale_down(self): assert mx.image.scale_down((360, 1000), (480, 500)) == (360, 375) assert mx.image.scale_down((300, 400), (0, 0)) == (0, 0) - @with_seed() def test_resize_short(self): try: import cv2 @@ -193,7 +192,6 @@ def test_resize_short(self): mx_resized = mx.image.resize_short(mx_img, new_size, interp) assert_almost_equal(mx_resized.asnumpy()[:, :, (2, 1, 0)], cv_resized, atol=3) - @with_seed() def test_imresize(self): try: import cv2 @@ -252,7 +250,6 @@ def test_imageiter(self): ] _test_imageiter_last_batch(imageiter_list, (2, 3, 224, 224)) - @with_seed() def test_copyMakeBorder(self): try: import cv2 @@ -277,7 +274,6 @@ def test_copyMakeBorder(self): mx.image.copyMakeBorder(mx_img, top, bot, left, right, type=type_val, values=val, out=out_img) assert_almost_equal(out_img.asnumpy(), cv_border) - @with_seed() def test_augmenters(self): # ColorNormalizeAug mean = np.random.rand(3) * 255 @@ -352,7 +348,6 @@ def test_det_augmenters(self): for batch in det_iter: pass - @with_seed() def test_random_size_crop(self): # test aspect ratio within bounds width = np.random.randint(100, 500) @@ -367,7 +362,6 @@ def test_random_size_crop(self): 'ration of new width and height out of the bound{}/{}={}'.format(new_w, new_h, float(new_w)/new_h) @xfail_when_nonstandard_decimal_separator - @with_seed() def test_imrotate(self): # test correctness xlin = np.expand_dims(np.linspace(0, 0.5, 30), axis=1) @@ -410,13 +404,15 @@ def test_imrotate(self): img_in = mx.nd.random.uniform(0, 1, (5, 3, 30, 60), dtype=np.float32) nd_rots = mx.nd.array([1, 2, 3, 4, 5], dtype=np.float32) args={'src': img_in, 'rotation_degrees': nd_rots, 'zoom_in': True, 'zoom_out': True} - self.assertRaises(ValueError, mx.image.imrotate, **args) + with pytest.raises(ValueError): + mx.image.imrotate(**args) # single image exception - zoom_in=zoom_out=True img_in = mx.nd.random.uniform(0, 1, (3, 30, 60), dtype=np.float32) nd_rots = 11 args = {'src': img_in, 'rotation_degrees': nd_rots, 'zoom_in': True, 'zoom_out': True} - self.assertRaises(ValueError, mx.image.imrotate, **args) + with pytest.raises(ValueError): + mx.image.imrotate(**args) # batch of images with scalar rotation img_in = mx.nd.stack(nd_img, nd_img, nd_img) @@ -431,9 +427,9 @@ def test_imrotate(self): img_in = mx.nd.random.uniform(0, 1, (3, 30, 60), dtype=np.float32) nd_rots = mx.nd.array([1, 2, 3, 4, 5], dtype=np.float32) args = {'src': img_in, 'rotation_degrees': nd_rots, 'zoom_in': False, 'zoom_out': False} - self.assertRaises(TypeError, mx.image.imrotate, **args) + with pytest.raises(TypeError): + mx.image.imrotate(**args) - @with_seed() def test_random_rotate(self): angle_limits = [-5., 5.] src_single_image = mx.nd.random.uniform(0, 1, (3, 30, 60), diff --git a/tests/python/unittest/test_infer_type.py b/tests/python/unittest/test_infer_type.py index f103e2957819..5c462a727e25 100644 --- a/tests/python/unittest/test_infer_type.py +++ b/tests/python/unittest/test_infer_type.py @@ -18,11 +18,10 @@ # pylint: skip-file import mxnet as mx import numpy as np -from common import models, with_seed +from common import models from mxnet import autograd from mxnet.test_utils import assert_almost_equal -@with_seed() def test_infer_multiout_op(): data = mx.nd.arange(16, dtype=np.float64).reshape((4, 4)) data.attach_grad() @@ -33,7 +32,6 @@ def test_infer_multiout_op(): assert data.grad.dtype == np.float64 mx.nd.waitall() -@with_seed() def test_infer_multiout_op2(): def test_func(a): q, l = mx.nd.linalg.gelqf(a) diff --git a/tests/python/unittest/test_kvstore.py b/tests/python/unittest/test_kvstore.py index 721db6f10917..61ae409babf8 100644 --- a/tests/python/unittest/test_kvstore.py +++ b/tests/python/unittest/test_kvstore.py @@ -20,7 +20,7 @@ import numpy as np import unittest from mxnet.test_utils import rand_ndarray, assert_almost_equal -from common import with_seed, assertRaises +from common import assertRaises from mxnet.base import py_str, MXNetError import pytest @@ -52,7 +52,6 @@ def check_diff_to_scalar(A, x): assert(np.sum(np.abs((A - x).asnumpy())) == 0) -@with_seed() def test_single_kv_pair(): """single key-value pair push & pull""" def check_single_kv_pair(kv, key, stype): @@ -66,7 +65,6 @@ def check_single_kv_pair(kv, key, stype): check_single_kv_pair(init_kv(), 3, stype) check_single_kv_pair(init_kv_with_str(), 'a', stype) -@with_seed() def test_row_sparse_pull(): kv = init_kv_with_str('row_sparse') kv.init('e', mx.nd.ones(shape).tostype('row_sparse')) @@ -95,7 +93,6 @@ def check_row_sparse_pull(kv, count): check_row_sparse_pull(kv, 1) check_row_sparse_pull(kv, 4) -@with_seed() def test_init(): """test init""" def check_init(kv, key): @@ -107,7 +104,6 @@ def check_init(kv, key): check_init(mx.kv.create(), 3) check_init(mx.kv.create(), 'a') -@with_seed() def test_pull(): """test pull""" def check_pull(kv): @@ -124,7 +120,6 @@ def check_pull(kv): check_pull(mx.kv.create('device')) check_pull(mx.kv.create()) -@with_seed() def test_list_kv_pair(): """list key-value pair push & pull""" def check_list_kv_pair(kv, key, stype): @@ -141,7 +136,6 @@ def check_list_kv_pair(kv, key, stype): @pytest.mark.skip(reason='Skipped due to segfault. Tracked in #18098') -@with_seed() def test_aggregator(): """aggregate value on muliple devices""" @@ -176,7 +170,6 @@ def check_aggregator(kv, key, key_list, stype): check_aggregator(init_kv_with_str(), 'a', str_keys, stype) -@with_seed() @pytest.mark.skip(reason='Skipped due to segfault. Tracked in #18098') def test_sparse_aggregator(): """aggregate sparse ndarray on muliple devices""" @@ -238,7 +231,6 @@ def str_updater(key, recv, local): assert(isinstance(key, str)) local += recv -@with_seed() def test_updater(dev='cpu'): """updater""" @@ -281,13 +273,11 @@ def check_updater(kv, key, key_list, stype): str_kv._set_updater(str_updater) check_updater(str_kv, 'a', str_keys, stype) -@with_seed() def test_get_type(): kvtype = 'local_allreduce_cpu' kv = mx.kv.create(kvtype) assert kv.type == kvtype -@with_seed() def test_invalid_pull(): def check_ignored_pull_single(kv, key): dns_val = (mx.nd.ones(shape) * 2) diff --git a/tests/python/unittest/test_kvstore_custom.py b/tests/python/unittest/test_kvstore_custom.py index ab3e8a9681c4..221e5f736159 100644 --- a/tests/python/unittest/test_kvstore_custom.py +++ b/tests/python/unittest/test_kvstore_custom.py @@ -20,7 +20,7 @@ import numpy as np import unittest from mxnet.test_utils import rand_ndarray, assert_almost_equal -from common import with_seed, assertRaises +from common import assertRaises from mxnet.base import py_str, MXNetError shape = (4, 4) @@ -34,7 +34,6 @@ def check_diff_to_scalar(A, x): def init_kv(name='device'): return mx.kv.create(name) -@with_seed() def test_broadcast_single_kv_pair(): """single key-value pair push & pull""" def check_single_kv_pair(kv, key): @@ -54,7 +53,6 @@ def check_single_kv_pair(kv, key): check_single_kv_pair(init_kv(name), 3) check_single_kv_pair(init_kv(name), 'a') -@with_seed() def test_broadcast_list_kv_pair(): """list key-value pair push & pull""" def check_list_kv_pair(kv, key): @@ -73,7 +71,6 @@ def check_list_kv_pair(kv, key): check_list_kv_pair(init_kv(), keys) check_list_kv_pair(init_kv(), str_keys) -@with_seed() def test_pushpull_single_kv_pair(): """aggregate value on muliple devices""" def check_aggregator(kv, key, key_list=None): @@ -119,7 +116,6 @@ def check_aggregator(kv, key, key_list=None): check_aggregator(init_kv('teststore'), 3) check_aggregator(init_kv('teststore'), 'a') -@with_seed() def test_pushpull_list_kv_pair(): """aggregate value on muliple devices""" def check_aggregator(kv, key, key_list=None): @@ -155,7 +151,6 @@ def check_aggregator(kv, key, key_list=None): check_aggregator(init_kv('teststore'), 'a') -@with_seed() def test_custom_store(): kv = mx.kv.create('teststore') out = mx.nd.empty((1,)) @@ -172,13 +167,11 @@ def test_custom_store(): for arr in arr_list: check_diff_to_scalar(arr, 4) -@with_seed() def test_get_type_device(): kvtype = 'teststore' kv = mx.kv.create(kvtype) assert kv.type == kvtype -@with_seed() def test_set_optimizer(): def check_unsupported_methods(kv): assert not kv.is_capable('optimizer') diff --git a/tests/python/unittest/test_loss.py b/tests/python/unittest/test_loss.py index 3c9fb385c4a6..c2b15fdb783f 100644 --- a/tests/python/unittest/test_loss.py +++ b/tests/python/unittest/test_loss.py @@ -19,12 +19,11 @@ import numpy as np from mxnet import gluon, autograd from mxnet.test_utils import assert_almost_equal, default_context -from common import with_seed, xfail_when_nonstandard_decimal_separator +from common import xfail_when_nonstandard_decimal_separator import unittest @xfail_when_nonstandard_decimal_separator -@with_seed() def test_loss_ndarray(): output = mx.nd.array([1, 2, 3, 4]) label = mx.nd.array([1, 3, 5, 7]) @@ -56,7 +55,6 @@ def test_loss_ndarray(): assert_almost_equal(L, np.array([ 1.06346405, 0.04858733]), rtol=1e-3, atol=1e-4) -@with_seed() def test_bce_equal_ce2(): N = 100 loss1 = gluon.loss.SigmoidBCELoss(from_sigmoid=True) @@ -77,7 +75,6 @@ def test_logistic_loss_equal_bce(): assert_almost_equal(loss_signed(data, 2 * label - 1), loss_bce(data, label), atol=1e-6) -@with_seed() def test_ctc_loss(): loss = gluon.loss.CTCLoss() l = loss(mx.nd.ones((2,20,4)), mx.nd.array([[1,0,-1,-1],[2,1,1,-1]])) @@ -105,7 +102,6 @@ def test_ctc_loss(): @xfail_when_nonstandard_decimal_separator -@with_seed() def test_sdml_loss(): N = 5 # number of samples @@ -138,7 +134,6 @@ def test_sdml_loss(): avg_loss = loss.sum()/len(loss) assert(avg_loss < 0.05) -@with_seed() def test_cosine_loss(): #Generating samples input1 = mx.nd.random.randn(3, 2) diff --git a/tests/python/unittest/test_metric.py b/tests/python/unittest/test_metric.py index d66f4e97d708..c770dfb67375 100644 --- a/tests/python/unittest/test_metric.py +++ b/tests/python/unittest/test_metric.py @@ -22,7 +22,7 @@ from scipy.stats import pearsonr import json import math -from common import with_seed, xfail_when_nonstandard_decimal_separator +from common import xfail_when_nonstandard_decimal_separator from copy import deepcopy def check_metric(metric, *args, **kwargs): diff --git a/tests/python/unittest/test_ndarray.py b/tests/python/unittest/test_ndarray.py index c3405e6258b1..40383e06ffb4 100644 --- a/tests/python/unittest/test_ndarray.py +++ b/tests/python/unittest/test_ndarray.py @@ -24,7 +24,7 @@ import random import functools import pytest -from common import with_seed, assertRaises, TemporaryDirectory +from common import assertRaises, TemporaryDirectory from mxnet.test_utils import almost_equal from mxnet.test_utils import assert_almost_equal, assert_exception from mxnet.test_utils import default_context @@ -74,7 +74,6 @@ def random_ndarray(dim): return data -@with_seed() def test_ndarray_setitem(): shape = (3, 4, 2) @@ -171,7 +170,6 @@ def test_ndarray_setitem(): assert same(dst.asnumpy(), np.array([1, 2, 0], dtype=dst.dtype).reshape(dst.shape)) -@with_seed() def test_ndarray_elementwise(): nrepeat = 10 maxdim = 4 @@ -189,14 +187,12 @@ def test_ndarray_elementwise(): check_with_uniform(lambda x: mx.nd.norm(x).asscalar(), 1, dim, np.linalg.norm) -@with_seed() def test_ndarray_elementwisesum(): ones = mx.nd.ones((10,), dtype=np.int32) res = mx.nd.ElementWiseSum(ones, ones*2, ones*4, ones*8) assert same(res.asnumpy(), ones.asnumpy()*15) -@with_seed() def test_ndarray_negate(): npy = np.random.uniform(-10, 10, (2,3,4)) arr = mx.nd.array(npy) @@ -209,7 +205,6 @@ def test_ndarray_negate(): assert_almost_equal(npy, arr.asnumpy()) -@with_seed() def test_ndarray_magic_abs(): for dim in range(1, 7): shape = rand_shape_nd(dim) @@ -218,7 +213,6 @@ def test_ndarray_magic_abs(): assert_almost_equal(abs(arr).asnumpy(), arr.abs().asnumpy()) -@with_seed() def test_ndarray_reshape(): tensor = (mx.nd.arange(30) + 1).reshape(2, 3, 5) true_res = mx.nd.arange(30) + 1 @@ -240,7 +234,6 @@ def test_ndarray_reshape(): # https://github.com/apache/incubator-mxnet/issues/18886 assertRaises(ValueError, tensor.reshape, (2, 3)) -@with_seed() def test_ndarray_flatten(): tensor = (mx.nd.arange(30) + 1).reshape(2, 3, 5) copy = tensor.flatten() @@ -253,7 +246,6 @@ def test_ndarray_flatten(): assert same(ref.asnumpy(), tensor.reshape(2, 15).asnumpy()) -@with_seed() def test_ndarray_squeeze(): def check_squeeze(shape, axis=None): data = mx.random.uniform(low=-10.0, high=10.0, shape=shape) @@ -283,7 +275,6 @@ def check_squeeze(shape, axis=None): check_squeeze((1, 1, 1, 1)) -@with_seed() def test_ndarray_expand_dims(): for ndim in range(1, 6): for axis in range(-ndim-1, ndim+1): @@ -299,7 +290,6 @@ def test_ndarray_expand_dims(): assert not same(ref.asnumpy(), out_expected) -@with_seed() def test_ndarray_choose(): shape = (100, 20) npy = np.arange(np.prod(shape)).reshape(shape) @@ -311,7 +301,6 @@ def test_ndarray_choose(): mx.nd.choose_element_0index(arr, mx.nd.array(indices)).asnumpy()) -@with_seed() def test_ndarray_fill(): shape = (100, 20) npy = np.arange(np.prod(shape)).reshape(shape) @@ -327,7 +316,6 @@ def test_ndarray_fill(): mx.nd.fill_element_0index(arr, mx.nd.array(val), mx.nd.array(indices)).asnumpy()) -@with_seed() def test_ndarray_onehot(): shape = (100, 20) npy = np.arange(np.prod(shape)).reshape(shape) @@ -348,14 +336,12 @@ def test_init_from_scalar(): assert same(npy, arr.asnumpy()) -@with_seed() def test_ndarray_copy(): c = mx.nd.array(np.random.uniform(-10, 10, (10, 10))) d = c.copyto(mx.Context('cpu', 0)) assert np.sum(np.abs(c.asnumpy() != d.asnumpy())) == 0.0 -@with_seed() def test_ndarray_scalar(): c = mx.nd.empty((10,10)) d = mx.nd.empty((10,10)) @@ -371,7 +357,6 @@ def test_ndarray_scalar(): assert(np.sum(d.asnumpy()) < 1e-5) -@with_seed() def test_ndarray_pickle(): maxdim = 5 for dim in range(1, maxdim): @@ -385,7 +370,6 @@ def test_ndarray_pickle(): assert np.sum(a.asnumpy() != a2.asnumpy()) == 0 -@with_seed() def test_ndarray_saveload(): nrepeat = 10 fname = 'tmp_list.bin' @@ -418,7 +402,6 @@ def test_ndarray_saveload(): os.remove(fname) -@with_seed() def test_ndarray_legacy_load(): data = [] for i in range(6): @@ -430,7 +413,6 @@ def test_ndarray_legacy_load(): assert same(data[i].asnumpy(), legacy_data[i].asnumpy()) -@with_seed() def test_buffer_load(): nrepeat = 10 with TemporaryDirectory(prefix='test_buffer_load_') as tmpdir: @@ -477,7 +459,6 @@ def test_buffer_load(): assertRaises(mx.base.MXNetError, mx.nd.load_frombuffer, buf_single_ndarray[:-10]) -@with_seed() @pytest.mark.serial def test_ndarray_slice(): shape = (10,) @@ -510,7 +491,6 @@ def test_ndarray_slice(): assert same(A[i, :].asnumpy(), A2[i, :]) -@with_seed() def test_ndarray_crop(): # get crop x = mx.nd.ones((2, 3, 4)) @@ -535,7 +515,6 @@ def test_ndarray_crop(): assert same(x.asnumpy(), np_x) -@with_seed() @pytest.mark.serial def test_ndarray_concatenate(): axis = 1 @@ -549,7 +528,6 @@ def test_ndarray_concatenate(): assert same(array_np, array_nd.asnumpy()) -@with_seed() def test_clip(): shape = (10,) A = mx.random.uniform(-10, 10, shape) @@ -560,7 +538,6 @@ def test_clip(): assert B1[i] <= 2 -@with_seed() def test_dot(): # Non-zero atol required, as exposed by seed 828791701 atol = 1e-5 @@ -598,7 +575,6 @@ def test_dot(): assert_almost_equal(c, C.asnumpy(), atol=atol) -@with_seed() @pytest.mark.serial def test_reduce(): sample_num = 300 @@ -675,7 +651,6 @@ def test_reduce_inner(numpy_reduce_func, nd_reduce_func, multi_axes, mx.nd.argmin, False, check_dtype=False) -@with_seed() @pytest.mark.serial def test_broadcast(): sample_num = 1000 @@ -739,7 +714,6 @@ def test_broadcast_like_axis(): test_broadcast_like_axis() -@with_seed() @pytest.mark.serial def test_broadcast_binary(): N = 100 @@ -775,7 +749,6 @@ def check_broadcast_binary(fn): check_broadcast_binary(lambda x, y: x.astype(np.float32) == y.astype(np.float32)) -@with_seed() def test_moveaxis(): X = mx.nd.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]) @@ -850,7 +823,6 @@ def test_errors(): test_errors() -@with_seed() def test_arange(): for i in range(5): start = np.random.rand() * 10 @@ -867,7 +839,6 @@ def test_arange(): assert_almost_equal(pred, gt) -@with_seed() def test_linspace(): for i in range(5): start = np.random.rand() * 100 @@ -884,7 +855,6 @@ def test_linspace(): assert_almost_equal(pred, gt) -@with_seed() @pytest.mark.serial def test_order(): ctx = default_context() @@ -1120,7 +1090,6 @@ def get_large_matrix(): assert_almost_equal(nd_ret_sort, gt) -@with_seed() def test_ndarray_equal(): x = mx.nd.zeros((2, 3)) y = mx.nd.ones((2, 3)) @@ -1130,7 +1099,6 @@ def test_ndarray_equal(): assert (z.asnumpy() == np.ones((2, 3))).all() -@with_seed() def test_ndarray_not_equal(): x = mx.nd.zeros((2, 3)) y = mx.nd.ones((2, 3)) @@ -1140,7 +1108,6 @@ def test_ndarray_not_equal(): assert (z.asnumpy() == np.zeros((2, 3))).all() -@with_seed() def test_ndarray_greater(): x = mx.nd.zeros((2, 3)) y = mx.nd.ones((2, 3)) @@ -1152,7 +1119,6 @@ def test_ndarray_greater(): assert (z.asnumpy() == np.zeros((2, 3))).all() -@with_seed() def test_ndarray_greater_equal(): x = mx.nd.zeros((2, 3)) y = mx.nd.ones((2, 3)) @@ -1166,7 +1132,6 @@ def test_ndarray_greater_equal(): assert (z.asnumpy() == np.ones((2, 3))).all() -@with_seed() def test_ndarray_lesser(): x = mx.nd.zeros((2, 3)) y = mx.nd.ones((2, 3)) @@ -1178,7 +1143,6 @@ def test_ndarray_lesser(): assert (z.asnumpy() == np.zeros((2, 3))).all() -@with_seed() def test_ndarray_lesser_equal(): x = mx.nd.zeros((2, 3)) y = mx.nd.ones((2, 3)) @@ -1192,7 +1156,6 @@ def test_ndarray_lesser_equal(): assert (z.asnumpy() == np.ones((2, 3))).all() -@with_seed() def test_ndarray_take(): for data_ndim in range(2, 5): for idx_ndim in range(1, 4): @@ -1210,7 +1173,6 @@ def test_ndarray_take(): assert_almost_equal(result.asnumpy(), data_real[idx_real]) -@with_seed() def test_iter(): x = mx.nd.array([1, 2, 3]) y = [] @@ -1220,7 +1182,6 @@ def test_iter(): for i in range(x.size): assert same(y[i].asnumpy(), x[i].asnumpy()) -@with_seed() @pytest.mark.serial def test_cached(): sym = mx.sym.Convolution(kernel=(3, 3), num_filter=10) + 2 @@ -1264,7 +1225,6 @@ def test_cached(): o.backward() -@with_seed() def test_output(): shape = (2,2) ones = mx.nd.ones(shape) @@ -1289,7 +1249,6 @@ def test_output(): assert_almost_equal(np.eye(N, k=k), mx.nd.eye(N, k=k).asnumpy()) -@with_seed() @pytest.mark.serial def test_ndarray_fluent(): has_grad = set(['flatten', 'expand_dims', 'flip', 'tile', 'transpose', 'sum', 'nansum', 'prod', @@ -1398,7 +1357,6 @@ def test_basic_indexing_is_contiguous(): assert (y_mx[slc].asnumpy() == 0).all() -@with_seed() @pytest.mark.serial def test_ndarray_indexing(): def test_getitem(np_array, index, is_scalar=False): @@ -1693,7 +1651,6 @@ def test_assign_large_int_to_ndarray(): b = a.asnumpy() assert same(b[1,0], 16800000) -@with_seed() def test_assign_a_row_to_ndarray(): """Test case from https://github.com/apache/incubator-mxnet/issues/9976""" H, W = 10, 10 @@ -1723,7 +1680,6 @@ def test_assign_a_row_to_ndarray(): a_nd[0, :] = a_nd[1] assert same(a_np, a_nd.asnumpy()) -@with_seed() def test_ndarray_astype(): x = mx.nd.zeros((2, 3), dtype='int32') y = x.astype('float32') @@ -1756,7 +1712,6 @@ def test_ndarray_astype(): assert (id(x) == id(y)) -@with_seed() @pytest.mark.serial def test_norm(ctx=default_context()): try: @@ -1796,13 +1751,11 @@ def l2norm(input_data, axis=0, keepdims=False): assert_almost_equal(npy_out, mx_out) -@with_seed() def test_ndarray_cpu_shared_ctx(): ctx = mx.Context('cpu_shared', 0) res = mx.nd.zeros((1, 2, 3), ctx=ctx) assert(res.context == ctx) -@with_seed() @pytest.mark.serial def test_dlpack(): for dtype in [np.float32, np.int32]: @@ -1831,7 +1784,6 @@ def test_dlpack(): assert_almost_equal(a_np, d) assert_almost_equal(a_np, e) -@with_seed() def test_ndarray_is_inf(): random_dimensions = np.random.randint(2, 5) random_shape = [np.random.randint(2, 5) for i in range(random_dimensions)] @@ -1845,7 +1797,6 @@ def test_ndarray_is_inf(): np.testing.assert_equal(output.asnumpy(), expected_output.astype(int)) # astype since numpy functions default return type is boolean array instead of int -@with_seed() def test_ndarray_is_finite(): random_dimensions = np.random.randint(2, 5) random_shape = [np.random.randint(2, 5) for i in range(random_dimensions)] @@ -1859,7 +1810,6 @@ def test_ndarray_is_finite(): np.testing.assert_equal(output.asnumpy(), expected_output.astype(int)) # astype since numpy functions default return type is boolean array instead of int -@with_seed() def test_ndarray_is_nan(): random_dimensions = np.random.randint(2, 5) random_shape = [np.random.randint(2, 5) for i in range(random_dimensions)] @@ -1873,7 +1823,6 @@ def test_ndarray_is_nan(): np.testing.assert_equal(output.asnumpy(), expected_output.astype(int)) # astype since numpy functions default return type is boolean array instead of int -@with_seed() def test_ndarray_nan_comparison(): random_dimensions = np.random.randint(2, 5) random_shape = [np.random.randint(2, 5) for i in range(random_dimensions)] @@ -1933,7 +1882,6 @@ def test_zero_from_numpy(): assert False -@with_seed() def test_save_load_scalar_zero_size_ndarrays(): def check_save_load(save_is_np_shape, load_is_np_shape, shapes, save_throw_exception, load_throw_exception): with mx.np_shape(save_is_np_shape): @@ -2027,7 +1975,6 @@ def test_op(op, num_inputs, mutated_inputs, **kwargs): {'rescale_grad': 0.1, 'lr': 0.01, 'wd': 1e-3}) -@with_seed() @pytest.mark.serial def test_update_ops_mutation(): _test_update_ops_mutation_impl() @@ -2035,7 +1982,7 @@ def test_update_ops_mutation(): # Problem : # https://github.com/apache/incubator-mxnet/pull/15768#issuecomment-532046408 -@with_seed(412298777) +@pytest.mark.seed(412298777) @pytest.mark.serial def test_update_ops_mutation_failed_seed(): # The difference was -5.9604645e-08 which was diff --git a/tests/python/unittest/test_numpy_contrib_gluon_data_vision.py b/tests/python/unittest/test_numpy_contrib_gluon_data_vision.py index 0d18dca3de52..197ae0e1cb63 100644 --- a/tests/python/unittest/test_numpy_contrib_gluon_data_vision.py +++ b/tests/python/unittest/test_numpy_contrib_gluon_data_vision.py @@ -17,9 +17,7 @@ import mxnet as mx import numpy as np -import scipy.ndimage from mxnet.test_utils import * -from common import assertRaises, with_seed import shutil import tempfile import unittest @@ -63,7 +61,6 @@ def tearDown(self): print("cleanup {}".format(self.IMAGES_DIR)) shutil.rmtree(self.IMAGES_DIR) - @with_seed() @use_np def test_imageiter(self): im_list = [[np.random.randint(0, 5), x] for x in self.IMAGES] @@ -96,7 +93,6 @@ def test_imageiter(self): for batch in it: pass - @with_seed() @use_np def test_image_bbox_iter(self): im_list = [_generate_objects() + [x] for x in self.IMAGES] @@ -132,7 +128,6 @@ def test_image_bbox_iter(self): path_imglist=fname, path_root='', last_batch='keep') ] - @with_seed() @use_np def test_bbox_augmenters(self): # only test if all augmenters will work diff --git a/tests/python/unittest/test_numpy_default_dtype.py b/tests/python/unittest/test_numpy_default_dtype.py index 906298036978..4f9867a65b67 100644 --- a/tests/python/unittest/test_numpy_default_dtype.py +++ b/tests/python/unittest/test_numpy_default_dtype.py @@ -21,7 +21,6 @@ from mxnet import npx from mxnet import numpy as np from mxnet.test_utils import use_np, use_np_default_dtype -from common import with_seed class DtypeOpArgMngr(object): @@ -188,7 +187,6 @@ def check_default_dtype(op_list): check_deepnp_default_dtype(op, *workload['args'], **workload['kwargs']) -@with_seed() def test_default_float_dtype(): import platform if 'Windows' not in platform.system(): diff --git a/tests/python/unittest/test_numpy_gluon.py b/tests/python/unittest/test_numpy_gluon.py index c0147ced2b07..dcd68137cc62 100644 --- a/tests/python/unittest/test_numpy_gluon.py +++ b/tests/python/unittest/test_numpy_gluon.py @@ -26,12 +26,10 @@ from mxnet import gluon, autograd, np from mxnet.test_utils import use_np, assert_almost_equal, check_gluon_hybridize_consistency, assert_allclose from mxnet.gluon import nn -from common import with_seed import random import pytest -@with_seed() def test_create_np_param(): M, K, N = 10, 9, 20 @@ -70,7 +68,6 @@ def hybrid_forward(self, F, x, w): check_block_params(x.as_np_ndarray(), TestBlock2, True, np.ndarray, initializer) -@with_seed() @use_np def test_optimizer_with_np_ndarrays(): class LinearRegression(gluon.HybridBlock): @@ -115,7 +112,6 @@ def hybrid_forward(self, F, pred, label): trainer.step(1) -@with_seed() @use_np def test_optimizer_backward_compat(): optimizer = mx.optimizer.SGD() @@ -124,7 +120,6 @@ def test_optimizer_backward_compat(): updater(0, np.ones((0, 0)), np.zeros((0, 0))) -@with_seed() @use_np def test_np_loss_ndarray(): # Ported from test_loss.test_loss_ndarray @@ -158,7 +153,6 @@ def test_np_loss_ndarray(): assert_almost_equal(L, _np.array([1.06346405, 0.04858733]), use_broadcast=False, rtol=1e-3) -@with_seed() @use_np def test_np_get_constant(): const_arr = _np.random.uniform(0, 100, size=(10, 10)).astype(_np.float32) @@ -252,7 +246,6 @@ def hashable_index(tuple_idx): return tuple(l) -@with_seed() @use_np def test_symbolic_basic_slicing(): def random_slice_index(shape): @@ -377,7 +370,6 @@ def hybrid_forward(self, F, x): numpy_func=lambda a: a[idx]) -@with_seed() @use_np def test_net_symbol_save_load(): class Case1(gluon.HybridBlock): @@ -406,7 +398,6 @@ def hybrid_forward(self, F, x, y): mx.np.random.normal(0, 1, (10, 5, 8))]) -@with_seed() @use_np def test_hybridize_boolean_dtype(): class Foo(gluon.HybridBlock): @@ -428,7 +419,6 @@ def hybrid_forward(self, F, valid_length): assert mx.test_utils.same(out1.asnumpy(), out2.asnumpy()) -@with_seed() @use_np def test_activations_leakyrelu(): # Currently, all the activation tests, we will just test for runnable. @@ -437,7 +427,6 @@ def test_activations_leakyrelu(): out.asnumpy() -@with_seed() @use_np def test_activations_prelu(): act_layer = nn.PReLU() @@ -446,7 +435,6 @@ def test_activations_prelu(): out.asnumpy() -@with_seed() @use_np def test_activations_elu(): act_layer = nn.ELU(1.0) @@ -454,7 +442,6 @@ def test_activations_elu(): out.asnumpy() -@with_seed() @use_np def test_activations_selu(): act_layer = nn.SELU() @@ -462,7 +449,6 @@ def test_activations_selu(): out.asnumpy() -@with_seed() @use_np def test_activations_gelu(): act_layer = nn.GELU() @@ -470,7 +456,6 @@ def test_activations_gelu(): out.asnumpy() -@with_seed() @use_np def test_activations_swish(): act_layer = nn.Swish() @@ -499,7 +484,6 @@ def test_concatenate(): x2.wait_to_read() @use_np -@with_seed() def test_identity(): model = nn.Identity() x = mx.np.random.uniform(size=(128, 33, 64)) diff --git a/tests/python/unittest/test_numpy_gluon_data_vision.py b/tests/python/unittest/test_numpy_gluon_data_vision.py index 57ab8d07abbd..8b0812277b6f 100644 --- a/tests/python/unittest/test_numpy_gluon_data_vision.py +++ b/tests/python/unittest/test_numpy_gluon_data_vision.py @@ -15,10 +15,6 @@ # specific language governing permissions and limitations # under the License. -# pylint: skip-file -from __future__ import absolute_import -from __future__ import division - import os from collections import namedtuple from uuid import uuid4 @@ -26,14 +22,13 @@ import mxnet as mx from mxnet import gluon, autograd, np, npx from mxnet.test_utils import use_np, assert_almost_equal, check_gluon_hybridize_consistency, same, check_symbolic_backward -from common import assertRaises, with_seed, xfail_when_nonstandard_decimal_separator +from common import assertRaises, xfail_when_nonstandard_decimal_separator import random from mxnet.base import MXNetError from mxnet.gluon.data.vision import transforms from mxnet import image import pytest -@with_seed() @use_np def test_to_tensor(): # 3D Input @@ -63,7 +58,6 @@ def test_to_tensor(): assert same(out_nd.asnumpy(), np.transpose(np.ones(data_in.shape, dtype=np.float32), (2, 0, 1))) -@with_seed() @use_np def test_normalize(): # 3D Input @@ -98,7 +92,6 @@ def test_normalize(): assertRaises(MXNetError, normalize_transformer, invalid_data_in) -@with_seed() @use_np def test_resize(): def _test_resize_with_diff_type(dtype): @@ -136,7 +129,6 @@ def _test_resize_with_diff_type(dtype): _test_resize_with_diff_type(dtype) -@with_seed() @use_np def test_crop_resize(): def _test_crop_resize_with_diff_type(dtype): @@ -199,7 +191,6 @@ def test_crop_backward(test_nd_arr, TestCase): test_crop_backward(data_in, test_case) -@with_seed() @use_np def test_flip_left_right(): data_in = np.random.uniform(0, 255, (300, 300, 3)).astype(dtype=np.uint8) @@ -208,7 +199,6 @@ def test_flip_left_right(): assert_almost_equal(flip_in, data_trans.asnumpy()) -@with_seed() @use_np def test_flip_top_bottom(): data_in = np.random.uniform(0, 255, (300, 300, 3)).astype(dtype=np.uint8) @@ -217,7 +207,6 @@ def test_flip_top_bottom(): assert_almost_equal(flip_in, data_trans.asnumpy()) -@with_seed() @use_np def test_transformer(): from mxnet.gluon.data.vision import transforms @@ -241,21 +230,18 @@ def test_transformer(): transform(mx.np.ones((245, 480, 3), dtype='uint8')).wait_to_read() -@with_seed() @use_np def test_random_crop(): x = mx.np.ones((245, 480, 3), dtype='uint8') y = mx.npx.image.random_crop(x, width=100, height=100) assert y.shape == (100, 100, 3) -@with_seed() @use_np def test_random_resize_crop(): x = mx.np.ones((245, 480, 3), dtype='uint8') y = mx.npx.image.random_resized_crop(x, width=100, height=100) assert y.shape == (100, 100, 3) -@with_seed() @use_np def test_hybrid_transformer(): from mxnet.gluon.data.vision import transforms @@ -278,7 +264,6 @@ def test_hybrid_transformer(): transform(mx.np.ones((245, 480, 3), dtype='uint8')).wait_to_read() @xfail_when_nonstandard_decimal_separator -@with_seed() @use_np def test_rotate(): transformer = transforms.Rotate(10.) @@ -314,7 +299,6 @@ def test_rotate(): assert_almost_equal(ans.asnumpy(), expected_result.asnumpy(), atol=1e-6) -@with_seed() @use_np def test_random_rotation(): # test exceptions for probability input outside of [0,1] @@ -335,7 +319,6 @@ def test_random_rotation(): assert_almost_equal(data.asnumpy(), transformer(data).asnumpy()) -@with_seed() @use_np def test_random_transforms(): from mxnet.gluon.data.vision import transforms @@ -356,7 +339,6 @@ def transform_fn(x): assert counter == pytest.approx(5000, 1e-1) @xfail_when_nonstandard_decimal_separator -@with_seed() @use_np def test_random_gray(): from mxnet.gluon.data.vision import transforms @@ -384,7 +366,6 @@ def test_random_gray(): num_apply += 1 assert_almost_equal(num_apply/float(iteration), 0.5, 0.1) -@with_seed() @use_np def test_bbox_random_flip(): from mxnet.gluon.contrib.data.vision.transforms.bbox import ImageBboxRandomFlipLeftRight @@ -401,7 +382,6 @@ def test_bbox_random_flip(): num_apply += 1 assert_almost_equal(np.array([num_apply])/float(iteration), 0.5, 0.5) -@with_seed() @use_np def test_bbox_crop(): from mxnet.gluon.contrib.data.vision.transforms.bbox import ImageBboxCrop diff --git a/tests/python/unittest/test_numpy_interoperability.py b/tests/python/unittest/test_numpy_interoperability.py index 07fe364d937f..8bfb0039e58a 100644 --- a/tests/python/unittest/test_numpy_interoperability.py +++ b/tests/python/unittest/test_numpy_interoperability.py @@ -29,7 +29,7 @@ from mxnet.test_utils import assert_almost_equal from mxnet.test_utils import use_np from mxnet.test_utils import is_op_runnable -from common import assertRaises, with_seed, random_seed +from common import assertRaises, random_seed from mxnet.numpy_dispatch_protocol import with_array_function_protocol, with_array_ufunc_protocol from mxnet.numpy_dispatch_protocol import _NUMPY_ARRAY_FUNCTION_LIST, _NUMPY_ARRAY_UFUNC_LIST @@ -3308,7 +3308,6 @@ def check_interoperability(op_list): _check_interoperability_helper(name, rel_tol, abs_tol, *workload['args'], **workload['kwargs']) -@with_seed() @use_np @with_array_function_protocol @pytest.mark.serial @@ -3322,7 +3321,6 @@ def test_np_memory_array_function(): assert op(data_mx, np.ones((5, 0))) == op(data_np, _np.ones((5, 0))) -@with_seed() @use_np @with_array_function_protocol @pytest.mark.serial @@ -3330,7 +3328,6 @@ def test_np_array_function_protocol(): check_interoperability(_NUMPY_ARRAY_FUNCTION_LIST) -@with_seed() @use_np @with_array_ufunc_protocol @pytest.mark.serial @@ -3338,7 +3335,6 @@ def test_np_array_ufunc_protocol(): check_interoperability(_NUMPY_ARRAY_UFUNC_LIST) -@with_seed() @use_np @pytest.mark.serial def test_np_fallback_ops(): diff --git a/tests/python/unittest/test_numpy_loss.py b/tests/python/unittest/test_numpy_loss.py index 609c6f00a1bc..26b0bc7dfced 100644 --- a/tests/python/unittest/test_numpy_loss.py +++ b/tests/python/unittest/test_numpy_loss.py @@ -19,12 +19,11 @@ import numpy as np from mxnet import gluon, autograd from mxnet.test_utils import assert_almost_equal, default_context, use_np -from common import with_seed, xfail_when_nonstandard_decimal_separator +from common import xfail_when_nonstandard_decimal_separator import pytest @xfail_when_nonstandard_decimal_separator -@with_seed() @use_np @pytest.mark.parametrize("hybridize", [False, True]) def test_loss_np_ndarray(hybridize): @@ -127,7 +126,6 @@ def test_loss_np_ndarray(hybridize): assert_almost_equal(L, np.array([ 1.06346405, 0.04858733]), rtol=1e-3, atol=1e-4) -@with_seed() @use_np @pytest.mark.parametrize("hybridize", [False, True]) def test_bce_equal_ce2(hybridize): @@ -163,7 +161,6 @@ def test_logistic_loss_equal_bce(hybridize): assert_almost_equal(loss_signed(data, 2 * label - 1), loss_bce(data, label), atol=1e-6) -@with_seed() @use_np @pytest.mark.parametrize("hybridize", [False, True]) def test_ctc_loss(hybridize): @@ -205,7 +202,6 @@ def test_ctc_loss(hybridize): @xfail_when_nonstandard_decimal_separator -@with_seed() @use_np def test_sdml_loss(): @@ -240,7 +236,6 @@ def test_sdml_loss(): assert(avg_loss < 0.05) -@with_seed() @use_np @pytest.mark.parametrize("hybridize", [False, True]) def test_cosine_loss(hybridize): diff --git a/tests/python/unittest/test_numpy_ndarray.py b/tests/python/unittest/test_numpy_ndarray.py index 74f6af33d4ad..4ce4f75463fb 100644 --- a/tests/python/unittest/test_numpy_ndarray.py +++ b/tests/python/unittest/test_numpy_ndarray.py @@ -26,13 +26,12 @@ from mxnet import np, npx, autograd from mxnet.gluon import HybridBlock from mxnet.test_utils import same, assert_almost_equal, rand_shape_nd, rand_ndarray, use_np -from common import with_seed, retry, TemporaryDirectory, xfail_when_nonstandard_decimal_separator +from common import retry, TemporaryDirectory, xfail_when_nonstandard_decimal_separator from mxnet.test_utils import verify_generator, gen_buckets_probs_with_ppf, assert_exception, is_op_runnable, collapse_sum_like from mxnet.ndarray.ndarray import py_slice from mxnet.base import integer_types -@with_seed() @use_np def test_np_empty(): # (input dtype, expected output dtype) @@ -78,7 +77,6 @@ def test_np_empty(): assert_exception(np.empty, NotImplementedError, shape, dtype, order, ctx) -@with_seed() @use_np def test_np_array_creation(): dtypes = [_np.int8, _np.int32, _np.float16, _np.float32, _np.float64, _np.bool, _np.bool_, @@ -105,7 +103,6 @@ def test_np_array_creation(): assert same(mx_arr.asnumpy(), np_arr) -@with_seed() @use_np @pytest.mark.serial def test_np_zeros(): @@ -160,7 +157,6 @@ def check_zero_array_creation(shape, dtype): check_zero_array_creation(shape, dtype) -@with_seed() @use_np def test_np_ones(): # test np.ones in Gluon @@ -214,7 +210,6 @@ def check_ones_array_creation(shape, dtype): check_ones_array_creation(shape, dtype) -@with_seed() @use_np @pytest.mark.serial def test_identity(): @@ -261,7 +256,6 @@ def check_identity_array_creation(shape, dtype): @xfail_when_nonstandard_decimal_separator -@with_seed() @pytest.mark.serial def test_np_ndarray_binary_element_wise_ops(): np_op_map = { @@ -516,7 +510,6 @@ def check_binary_op_result(shape1, shape2, op, dtype=None): check_binary_op_result(None, (0, 2), op, dtype) -@with_seed() @pytest.mark.serial def test_np_hybrid_block_multiple_outputs(): @use_np @@ -550,7 +543,6 @@ def hybrid_forward(self, F, x, *args, **kwargs): assert_exception(net, TypeError, data_np) -@with_seed() @use_np def test_np_grad_ndarray_type(): data = np.array(2, dtype=_np.float32) @@ -559,7 +551,6 @@ def test_np_grad_ndarray_type(): assert type(data.detach()) == np.ndarray -@with_seed() @use_np @pytest.mark.serial def test_np_ndarray_astype(): @@ -597,7 +588,6 @@ def check_astype_equal(itype, otype, copy, expect_zero_copy=False, hybridize=Fal check_astype_equal(itype, otype, copy, hybridize) -@with_seed() def test_np_ndarray_copy(): mx_data = np.array([2, 3, 4, 5], dtype=_np.int32) assert_exception(mx_data.copy, NotImplementedError, order='F') @@ -606,7 +596,6 @@ def test_np_ndarray_copy(): assert same(mx_ret.asnumpy(), np_ret) -@with_seed() def test_formatting(): def test_0d(): a = np.array(np.pi) @@ -634,7 +623,6 @@ def test_nd_no_format(): # for exmpale, if a = np.array([np.pi]), the return value of '{}'.format(a) is '[3.1415927] @gpu(0)' -@with_seed() @use_np @pytest.mark.serial def test_np_ndarray_indexing(): @@ -1011,7 +999,6 @@ def test_setitem_autograd(np_array, index): test_getitem_slice_bound() -@with_seed() @use_np @pytest.mark.serial def test_np_save_load_ndarrays(): @@ -1056,7 +1043,6 @@ def test_np_save_load_ndarrays(): @retry(5) -@with_seed() @use_np @pytest.mark.serial def test_np_multinomial(): @@ -1130,7 +1116,6 @@ def test_np_multinomial(): mx.test_utils.assert_almost_equal(freq[i, :], pvals, rtol=0.20, atol=1e-1) -@with_seed() @pytest.mark.skipif(not is_op_runnable(), reason="Comparison ops can only run on either CPU instances, or GPU instances with" " compute capability >= 53 if MXNet is built with USE_TVM_OP=ON") @use_np @@ -1143,7 +1128,6 @@ def test_boolean_index_single(): assert same(a[np.array(True, dtype=np.bool_)].asnumpy(), a[None].asnumpy()) assert same(a[np.array(False, dtype=np.bool_)].asnumpy(), a[None][0:0].asnumpy()) -@with_seed() @pytest.mark.skipif(not is_op_runnable(), reason="Comparison ops can only run on either CPU instances, or GPU instances with" " compute capability >= 53 if MXNet is built with USE_TVM_OP=ON") @use_np @@ -1160,7 +1144,6 @@ def test_boolean_index_catch_exception(): index = np.zeros((4, 4), dtype=bool) assert_exception(arr.__getitem__, IndexError, index) -@with_seed() @pytest.mark.skipif(not is_op_runnable(), reason="Comparison ops can only run on either CPU instances, or GPU instances with" " compute capability >= 53 if MXNet is built with USE_TVM_OP=ON") @use_np @@ -1172,7 +1155,6 @@ def test_boolean_index_onedim(): b = np.array([True], dtype=bool) assert same(a[b].asnumpy(), a.asnumpy()) -@with_seed() @pytest.mark.skipif(not is_op_runnable(), reason="Comparison ops can only run on either CPU instances, or GPU instances with" " compute capability >= 53 if MXNet is built with USE_TVM_OP=ON") @use_np @@ -1190,7 +1172,6 @@ def test_boolean_index_twodim(): assert same(a[b[1]].asnumpy(), _np.array([[4, 5, 6]], dtype=a.dtype)) assert same(a[b[0]].asnumpy(), a[b[2]].asnumpy()) -@with_seed() @pytest.mark.skipif(not is_op_runnable(), reason="Comparison ops can only run on either CPU instances, or GPU instances with" " compute capability >= 53 if MXNet is built with USE_TVM_OP=ON") @use_np @@ -1202,7 +1183,6 @@ def test_boolean_index_list(): assert same(a[b].asnumpy(), _np.array([1, 3], dtype=a.dtype)) (a[None, b], [[1, 3]]) -@with_seed() @pytest.mark.skipif(not is_op_runnable(), reason="Comparison ops can only run on either CPU instances, or GPU instances with" " compute capability >= 53 if MXNet is built with USE_TVM_OP=ON") @use_np @@ -1240,7 +1220,6 @@ def test_boolean_index_tuple(): _np_b = b.asnumpy() assert same(a[b > 1].asnumpy(), _np_a[_np_b > 1]) -@with_seed() @pytest.mark.skipif(not is_op_runnable(), reason="Comparison ops can only run on either CPU instances, or GPU instances with" " compute capability >= 53 if MXNet is built with USE_TVM_OP=ON") @use_np @@ -1286,7 +1265,6 @@ def test_boolean_index_assign(): mx_data[False, 1] = 8 assert_almost_equal(mx_data.asnumpy(), np_data, rtol=1e-3, atol=1e-5, use_broadcast=False) -@with_seed() @pytest.mark.skipif(not is_op_runnable(), reason="Comparison ops can only run on either CPU instances, or GPU instances with" " compute capability >= 53 if MXNet is built with USE_TVM_OP=ON") @use_np @@ -1306,7 +1284,6 @@ def test_boolean_index_autograd(): assert_almost_equal(a.grad.asnumpy(), a_grad_np, rtol=1e-4, atol=1e-5, use_broadcast=False) -@with_seed() @use_np def test_np_get_dtype(): dtypes = [_np.int8, _np.int32, _np.float16, _np.float32, _np.float64, _np.bool, _np.bool_, diff --git a/tests/python/unittest/test_numpy_op.py b/tests/python/unittest/test_numpy_op.py index b96b335c2e38..4838a9f0e585 100644 --- a/tests/python/unittest/test_numpy_op.py +++ b/tests/python/unittest/test_numpy_op.py @@ -33,7 +33,7 @@ from mxnet.test_utils import check_numeric_gradient, use_np, collapse_sum_like, effective_dtype from mxnet.test_utils import new_matrix_with_real_eigvals_nd from mxnet.test_utils import new_sym_matrix_with_real_eigvals_nd -from common import assertRaises, with_seed, retry, xfail_when_nonstandard_decimal_separator +from common import assertRaises, retry, xfail_when_nonstandard_decimal_separator import random from mxnet.test_utils import verify_generator, gen_buckets_probs_with_ppf from mxnet.numpy_op_signature import _get_builtin_op @@ -41,7 +41,6 @@ from mxnet.operator import get_all_registered_operators -@with_seed() @use_np @pytest.mark.parametrize('hybridize', [True, False]) @pytest.mark.parametrize('dtype', [_np.float32, _np.float64]) @@ -208,7 +207,6 @@ def tensordot_backward(out_grad, a, b, axes=2): assert_almost_equal(b.grad.asnumpy(), gt_in_grad[1], rtol=1e-2, atol=1e-2) -@with_seed() @use_np @pytest.mark.parametrize('shape_a,shape_b', [ ((3, 0), (0, 4)), @@ -244,7 +242,6 @@ def test_np_dot(shape_a, shape_b): check_numeric_gradient(mx_sym, {"a": a, "b": b}, numeric_eps=eps, rtol=1e-2, atol=1e-3) -@with_seed() @use_np @pytest.mark.parametrize('shape_a,shape_b', [ ((4, 5), (2, 3)), @@ -257,7 +254,6 @@ def test_np_dot_error(shape_a, shape_b): mx_res = np.dot(a.as_np_ndarray(), b.as_np_ndarray()) -@with_seed() @use_np @pytest.mark.parametrize('shape', [(), (5,), (3, 3)]) @pytest.mark.parametrize('hybridize', [True, False]) @@ -305,7 +301,6 @@ def vdot_backward(a, b): rtol=1e-1, atol=1e-1, dtype=dtype) -@with_seed() @use_np @pytest.mark.parametrize('a_shape,b_shape', [ ((3,), (3,)), @@ -398,7 +393,6 @@ def inner_backward(a, b): rtol=1e-1, atol=1e-1, dtype=dtype) -@with_seed() @use_np @pytest.mark.parametrize('a_shape,b_shape', [ ((3,), (3,)), @@ -443,7 +437,6 @@ def hybrid_forward(self, F, a, b): rtol=1e-1, atol=1e-1, dtype=dtype) -@with_seed() @use_np @pytest.mark.parametrize('shape_a,shape_b', [ ((3,), (3,)), @@ -578,7 +571,6 @@ def test_np_matmul_error(shape_a, shape_b): np.matmul(a, b) -@with_seed() @use_np @pytest.mark.parametrize('a_shape,b_shape', [ ((3,), (3,)), @@ -656,7 +648,6 @@ def hybrid_forward(self, F, a, b): assert_almost_equal(b.grad.asnumpy(), np_backward[1], rtol=1e-2, atol=1e-2) -@with_seed() @use_np @pytest.mark.parametrize('shape', [rand_shape_nd(4, dim=4), (4, 0, 4, 0)]) @pytest.mark.parametrize('axis', [0, 1, 2, 3, (), None]) @@ -755,7 +746,6 @@ def is_int(dtype): assert_almost_equal(mx_out.asnumpy(), np_out, rtol=1e-3, atol=1e-5, use_broadcast=False) -@with_seed() @use_np @pytest.mark.parametrize('bool_agg', ['all', 'any']) @pytest.mark.parametrize('shape', [ @@ -794,7 +784,6 @@ def hybrid_forward(self, F, a): assert_almost_equal(mx_outs.asnumpy(), np_outs) -@with_seed() @use_np @pytest.mark.parametrize('func', ['max', 'min']) @pytest.mark.parametrize('in_data_dim', [2, 3, 4]) @@ -886,7 +875,6 @@ def _test_np_exception(func, shape, dim): _test_np_exception(func, shape, dim) -@with_seed() @use_np @pytest.mark.parametrize('a_shape,w_shape,axes', [ ((3, 5), (3, 5), None), @@ -999,7 +987,6 @@ def avg_backward(a, w, avg, axes, init_a_grad=None, init_w_grad=None): assert_almost_equal(mx_out.asnumpy(), np_out, rtol=rtol, atol=atol) -@with_seed() @use_np def test_np_mean(): class TestMean(HybridBlock): @@ -1094,7 +1081,6 @@ def is_int(dtype): assert_almost_equal(mx_out.asnumpy(), np_out, rtol=1e-3, atol=1e-5) -@with_seed() @use_np def test_np_moment(): class TestMoment(HybridBlock): @@ -1159,7 +1145,6 @@ def legalize_shape(shape): assert_almost_equal(mx_out.asnumpy(), np_out, rtol=rtol, atol=atol, use_broadcast=False, equal_nan=True) -@with_seed() @use_np def test_np_shape(): shapes = [ @@ -1179,7 +1164,6 @@ def test_np_shape(): assert mx_shape == np_shape -@with_seed() @use_np @pytest.mark.parametrize('config', [ (0.0, 1.0, 10), @@ -1260,7 +1244,6 @@ def test_np_linspace_arange(): assert_almost_equal(mx.np.linspace(0, test_index, test_index + 1).asnumpy(), _np.arange(test_index + 1)) -@with_seed() @use_np @pytest.mark.parametrize('config', [ (0.0, 1.0, 20), @@ -1306,7 +1289,6 @@ def hybrid_forward(self, F, x): assert mx_out.dtype == np_out.dtype -@with_seed() @use_np @pytest.mark.parametrize('start,end,step', [ ([], [], None), @@ -1360,7 +1342,6 @@ def hybrid_forward(self, F, a): expected_grad[basic_index] = 1 assert same(a.grad.asnumpy(), expected_grad) -@with_seed() @use_np def test_npx_index_add(): class TestIndexAdd(HybridBlock): @@ -1514,7 +1495,6 @@ def index_add_bwd(out_grad, a_grad, ind, val_grad, ind_ndim, ind_num, grad_req_a assert_almost_equal(mx_out.asnumpy(), expected_ret, rtol=eps, atol=eps) -@with_seed() @use_np def test_npx_index_update(): class TestIndexUpdate(HybridBlock): @@ -1676,7 +1656,6 @@ def index_update_bwd(out_grad, a_grad, ind, val_grad, ind_ndim, ind_num, grad_re check_index_update_forward(mx_out.asnumpy(), a.asnumpy(), ind.astype(indtype), val.asnumpy(), ind_ndim, ind_num, eps) -@with_seed() @use_np def test_npx_batch_dot(): ctx = mx.context.current_context() @@ -1795,7 +1774,6 @@ def gt_grad_batch_dot_numpy(lhs, rhs, ograd, transpose_a, transpose_b, lhs_req, transpose_b=transpose_b)) -@with_seed() @use_np @pytest.mark.parametrize('shape', [(4, 2), (4, 3, 4), (4, 6, 4, 5), (4, 5, 6, 4, 5)]) @@ -1951,7 +1929,6 @@ def _test_batchnorm_impl(axis, _test_batchnorm_impl(axis, data_grad_req, gamma_grad_req, beta_grad_req) -@with_seed() @use_np def test_npx_softmax(): class TestSoftmax(HybridBlock): @@ -2009,7 +1986,6 @@ def np_log_softmax(x, axis=-1): assert_almost_equal(mx_a.grad.asnumpy(), _np.zeros(shape), rtol=1e-3, atol=1e-5) -@with_seed() @use_np def test_npi_boolean_assign(): class TestBooleanAssignScalar(HybridBlock): @@ -2097,7 +2073,6 @@ def hybrid_forward(self, F, a, mask, value): assert_almost_equal(mx_data2.asnumpy(), np_data, rtol=1e-3, atol=1e-5, use_broadcast=False) -@with_seed() @use_np def test_np_reshape(): class TestReshape(HybridBlock): @@ -2131,7 +2106,6 @@ def hybrid_forward(self, F, a): assert_almost_equal(mx_out.asnumpy(), np_out, rtol=1e-3, atol=1e-5, use_broadcast=False) -@with_seed() @use_np def test_np_argsort(): class TestArgsort(HybridBlock): @@ -2166,7 +2140,6 @@ def hybrid_forward(self, F, x): assert_almost_equal(mx_out.asnumpy(), np_out, rtol=1e-5, atol=1e-6, use_broadcast=False) -@with_seed() @use_np @pytest.mark.parametrize('kind', ['quicksort', 'mergesort', 'heapsort']) @pytest.mark.parametrize('shape', [ @@ -2219,7 +2192,6 @@ def hybrid_forward(self, F, x, *args, **kwargs): assert_almost_equal(ret.asnumpy(), expected_ret, atol=1e-5, rtol=1e-5, use_broadcast=False) -@with_seed() @use_np def test_np_squeeze(): config = [((), None), @@ -2259,7 +2231,6 @@ def hybrid_forward(self, F, x): @xfail_when_nonstandard_decimal_separator -@with_seed() @use_np def test_np_tri(): class TestTri(HybridBlock): @@ -2293,7 +2264,6 @@ def hybrid_forward(self, F, x): assert_almost_equal(mx_out.asnumpy(), np_out, rtol=1e-5, atol=1e-6, use_broadcast=False) -@with_seed() @use_np def test_np_prod(): class TestProd(HybridBlock): @@ -2345,7 +2315,6 @@ def hybrid_forward(self, F, a, *args, **kwargs): assert_almost_equal(mx_out.asnumpy(), np_out, rtol=1e-3, atol=1e-5, use_broadcast=False) -@with_seed() @use_np def test_np_flatten(): class TestFlatten(HybridBlock): @@ -2370,7 +2339,6 @@ def hybrid_forward(self, F, x): assert_almost_equal(a_mx.grad.asnumpy(), _np.ones_like(a_np), rtol=1e-5, atol=1e-6, use_broadcast=False) -@with_seed() @use_np @pytest.mark.parametrize('src_shape,dst_shape', [ ((), (1, 2, 4, 5)), @@ -2423,7 +2391,6 @@ def hybrid_forward(self, F, x): ret = test_scalar_broadcast_to(np.empty(())) assert_almost_equal(ret.asnumpy(), expected_ret, rtol=1e-5, atol=1e-6, use_broadcast=False) -@with_seed() @use_np @pytest.mark.parametrize('src_shape,npx_dst_shape,np_dst_shape', [ ((5,), (3, 4, -2), (3, 4, 5)), @@ -2467,7 +2434,6 @@ def hybrid_forward(self, F, x): assert_almost_equal(a_mx.grad.asnumpy(), expected_grad, rtol=1e-5, atol=1e-6, use_broadcast=False) -@with_seed() @use_np @pytest.mark.parametrize('hybridize', [True, False]) @pytest.mark.parametrize('dtype', [_np.float32, _np.float16, _np.int32]) @@ -2546,7 +2512,6 @@ def test_np_transpose_error(): pytest.raises(MXNetError, lambda: dat.transpose((0, 1, 3))) -@with_seed() @use_np def test_np_meshgrid(): nx, ny = (4, 5) @@ -2560,7 +2525,6 @@ def test_np_meshgrid(): assert same(zv.asnumpy(), zv_expected) -@with_seed() @use_np @pytest.mark.parametrize('shapes', [ [(), (2, 1), (1, 3), (4, 1, 1), (5, 4, 2, 3)], @@ -2575,7 +2539,6 @@ def test_np_broadcast_arrays(shapes): assert same(expected_ret, ret.asnumpy()) -@with_seed() @use_np def test_np_tile(): config = [ @@ -2616,7 +2579,6 @@ def hybrid_forward(self, F, x): assert same(ret_mx.asnumpy(), ret_np) -@with_seed() @use_np def test_np_tril(): # numpy tril does not support scalar array (zero-dim) @@ -2677,7 +2639,6 @@ def hybrid_forward(self, F, x): assert same(ret_mx.asnumpy(), ret_np) -@with_seed() @use_np def test_np_triu(): # numpy triu does not support scalar array (zero-dim) @@ -2738,7 +2699,6 @@ def hybrid_forward(self, F, x): assert same(ret_mx.asnumpy(), ret_np) -@with_seed() @use_np def test_np_unary_funcs(): def check_unary_func(func, ref_grad, shape, low, high): @@ -2822,7 +2782,6 @@ def hybrid_forward(self, F, a): assert_almost_equal(y.asnumpy(), -np_test_data) -@with_seed() @use_np @retry(3) @pytest.mark.parametrize('func,ref_grad,low,high', [ @@ -2915,7 +2874,6 @@ def hybrid_forward(self, F, a, *args, **kwargs): assertRaises(NotImplementedError, getattr(np, func), mx_test_data, order='mxnet') -@with_seed() @use_np @pytest.mark.parametrize('ndim', [2, 3, 4]) @pytest.mark.parametrize('func,low,high', [ @@ -2966,7 +2924,6 @@ def hybrid_forward(self, F, a, *args, **kwargs): check_unary_func(func, shape, low, high) -@with_seed() @use_np def test_np_binary_funcs(): def check_binary_func(func, lshape, rshape, low, high, lgrads, rgrads=None, alltypes=None): @@ -3107,7 +3064,6 @@ def hybrid_forward(self, F, a, b, *args, **kwargs): check_binary_func(func, lshape, rshape, low, high, lgrads, rgrads, dtypes) -@with_seed() @use_np def test_np_mixed_precision_binary_funcs(): itypes = [np.bool, np.int8, np.int32, np.int64] @@ -3239,7 +3195,6 @@ def hybrid_forward(self, F, a, b, *args, **kwargs): continue check_mixed_precision_binary_func(func, low, high, lshape, rshape, lgrad, rgrad, type1, type2) -@with_seed() @use_np def test_np_mixed_mxnp_op_funcs(): # generate onp & mx_np in same type @@ -3280,7 +3235,6 @@ def test_np_mixed_mxnp_op_funcs(): out = onp / mx_np assert isinstance(out, mx.np.ndarray) -@with_seed() @use_np def test_np_binary_scalar_funcs(): itypes = [np.int8, np.int32, np.int64] @@ -3341,7 +3295,6 @@ def hybrid_forward(self, F, a, *args, **kwargs): check_binary_scalar_func(func, low, high, shape, lgrad, ltype, is_int, hybridize) -@with_seed() @use_np def test_np_boolean_binary_funcs(): def check_boolean_binary_func(func, mx_x1, mx_x2): @@ -3395,7 +3348,6 @@ def hybrid_forward(self, F, a, b, *args, **kwargs): check_boolean_binary_func(func, x1, x2) -@with_seed() @use_np def test_npx_relu(): def np_relu(x): @@ -3432,7 +3384,6 @@ def hybrid_forward(self, F, a): assert_almost_equal(mx_out.asnumpy(), np_out, rtol=1e-3, atol=1e-5) -@with_seed() @use_np def test_npx_sigmoid(): def np_sigmoid(x): @@ -3469,7 +3420,6 @@ def hybrid_forward(self, F, a): assert_almost_equal(mx_out.asnumpy(), np_out, rtol=1e-3, atol=1e-5) -@with_seed() @use_np def test_np_atleast_nd(): class TestAtleastND(HybridBlock): @@ -3523,7 +3473,6 @@ def hybrid_forward(self, F, *arys): same(mx_out[i].asnumpy(), np_out[i]) -@with_seed() @use_np def test_np_arange(): configs = [ @@ -3586,7 +3535,6 @@ def hybrid_forward(self, F, x): assert same(mx_out.asnumpy(), np_out) -@with_seed() @use_np def test_np_insert(): class TestInsert(HybridBlock): @@ -3699,7 +3647,6 @@ def GetNdim(tp): assert_almost_equal(mx_out.asnumpy(), np_out, rtol=1e-3, atol=1e-5) -@with_seed() @use_np def test_np_split(): class TestSplit(HybridBlock): @@ -3752,7 +3699,6 @@ def get_indices(axis_size): assert_almost_equal(mx_out.asnumpy(), np_out, rtol=1e-3, atol=1e-5) -@with_seed() @use_np def test_np_array_split(): class TestArray_split(HybridBlock): @@ -3809,7 +3755,6 @@ def get_indices(axis_size): assert_almost_equal(mx_out.asnumpy(), np_out, rtol=rtol, atol=atol) -@with_seed() @use_np def test_np_vsplit(): class TestVsplit(HybridBlock): @@ -3864,7 +3809,6 @@ def get_indices(axis_size): assert_almost_equal(mx_out.asnumpy(), np_out, rtol=1e-3, atol=1e-5) -@with_seed() @use_np def test_np_concat(): class TestConcat(HybridBlock): @@ -3933,7 +3877,6 @@ def get_new_shape(shape, axis): assert_almost_equal(mx_out.asnumpy(), np_out, rtol=1e-3, atol=1e-5) -@with_seed() @use_np def test_np_append(): class TestAppend(HybridBlock): @@ -3989,7 +3932,6 @@ def get_new_shape(shape, axis): assert_almost_equal(mx_out.asnumpy(), np_out, rtol=1e-3, atol=1e-5) -@with_seed() @use_np def test_np_stack(): class TestStack(HybridBlock): @@ -4039,7 +3981,6 @@ def hybrid_forward(self, F, a, *args): assert same(mx_out.asnumpy(), np_out) -@with_seed() @use_np def test_np_hstack(): class TestHStack(HybridBlock): @@ -4103,7 +4044,6 @@ def get_new_shape(shape): assert_almost_equal(mx_out.asnumpy(), np_out, rtol=1e-3, atol=1e-5) -@with_seed() @use_np def test_np_dstack(): class TestDStack(HybridBlock): @@ -4164,7 +4104,6 @@ def get_new_shape(shape): assert_almost_equal(mx_out.asnumpy(), np_out, rtol=1e-3, atol=1e-5) -@with_seed() @use_np def test_np_ravel(): class TestRavel(HybridBlock): @@ -4197,7 +4136,6 @@ def hybrid_forward(self, F, a): assert_almost_equal(mx_out.asnumpy(), np_out, rtol=1e-3, atol=1e-5) -@with_seed() @use_np def test_np_randint(): ctx = mx.context.current_context() @@ -4243,7 +4181,6 @@ def test_np_randint(): verify_generator(generator=generator_mx_same_seed, buckets=buckets, probs=probs, nrepeat=100) -@with_seed() @use_np def test_np_swapaxes(): config = [((0, 1, 2), 0, 0), @@ -4277,7 +4214,6 @@ def hybrid_forward(self, F, x): assert same(ret_mx.asnumpy(), ret_np) -@with_seed() @use_np @pytest.mark.skip(reason='https://github.com/apache/incubator-mxnet/issues/18600') def test_np_delete(): @@ -4353,7 +4289,6 @@ def GetDimSize(shp, axis): assert_almost_equal(mx_out.asnumpy(), np_out, rtol=1e-3, atol=1e-5) -@with_seed() @use_np def test_np_argmin_argmax(): workloads = [ @@ -4424,7 +4359,6 @@ def hybrid_forward(self, F, x): assert same(mx_ret.asnumpy(), np_ret) -@with_seed() @use_np def test_np_argmin_argmax_large_tensor(): # compare inp[arg] with ext directly because along one axis there might @@ -4441,7 +4375,6 @@ def single_run(dtype): single_run(d) -@with_seed() @use_np def test_np_clip(): workloads = [ @@ -4513,7 +4446,6 @@ def hybrid_forward(self, F, x): assert_almost_equal(mx_ret.asnumpy(), np_ret, atol=1e-4, rtol=1e-3, use_broadcast=False) -@with_seed() @use_np def test_npx_random_bernoulli(): def _test_bernoulli_exception(prob, logit): @@ -4548,7 +4480,6 @@ def _test_bernoulli_exception(prob, logit): assertRaises(ValueError, _test_bernoulli_exception, scaled_prob, None) -@with_seed() @use_np def test_npx_constraint_check(): msg = "condition violated" @@ -4589,7 +4520,6 @@ def executor(boolean_tensor): assert (input_tensor.asnumpy() == out.asnumpy()).all() -@with_seed() @use_np def test_npx_special_unary_func(): def check_unary_func(func, ref_grad, shape, low, high): @@ -4644,7 +4574,6 @@ def hybrid_forward(self, F, a, *args, **kwargs): @xfail_when_nonstandard_decimal_separator -@with_seed() @use_np def test_np_random_grad(): class TestRandomGrad(HybridBlock): @@ -4695,7 +4624,6 @@ def hybrid_forward(self, F, loc, scale): assert mx_out.asnumpy().shape == np_out.shape -@with_seed() @use_np def test_np_lognormal_grad(): class TestLognormalGrad(HybridBlock): @@ -4745,7 +4673,6 @@ def _test_lognormal_exception(sigma): assertRaises(ValueError, _test_lognormal_exception, -1) -@with_seed() @use_np def test_npx_sample_n(): def shape_formatter(s): @@ -4783,7 +4710,6 @@ def hybrid_forward(self, F, param1, param2): assert out.shape == expected_shape -@with_seed() @use_np def test_np_random(): shapes = [(), (1,), (2, 3), (4, 0, 5), 6, (7, 8), None] @@ -4835,7 +4761,6 @@ def hybrid_forward(self, F, x): assert out.shape == expected_shape -@with_seed() @use_np def test_gamma_exception(): def _test_gamma_exception(shape, scale): @@ -4861,7 +4786,6 @@ def _test_gamma_exception(shape, scale): assertRaises(ValueError, _test_gamma_exception, shape, scale) -@with_seed() @use_np @pytest.mark.parametrize("shape", [(1,), (2, 2), (4, 2, 2)]) @pytest.mark.parametrize("a", [2.0, 5.0, 10.0]) @@ -4901,7 +4825,6 @@ def hybrid_forward(self, F, a): assert_almost_equal(expected_grad, param.grad.asnumpy(), rtol=1e-2, atol=1e-3) -@with_seed() @use_np @pytest.mark.skip(reason='https://github.com/apache/incubator-mxnet/issues/18600') def test_np_random_beta(): @@ -4946,7 +4869,6 @@ def _test_random_beta_range(output): assert _test_random_beta_range(mx_out_imperative.asnumpy()) == True -@with_seed() @use_np @pytest.mark.skip(reason='https://github.com/apache/incubator-mxnet/issues/18600') def test_np_random_f(): @@ -4978,7 +4900,6 @@ def hybrid_forward(self, F, dfnum, dfden): assert_almost_equal(np_out.shape, mx_out_imperative.shape) -@with_seed() @use_np @pytest.mark.skip(reason='https://github.com/apache/incubator-mxnet/issues/18600') def test_np_random_chisquare(): @@ -5014,7 +4935,6 @@ def hybrid_forward(self, F, df): assert_almost_equal(np_out.shape, mx_out_imperative.shape) -@with_seed() @use_np def test_np_random_rayleigh(): class TestRayleigh(HybridBlock): @@ -5052,7 +4972,6 @@ def _test_rayleigh_exception(scale): assertRaises(ValueError, _test_rayleigh_exception, -1) -@with_seed() @use_np def test_np_exponential(): class TestRandomExp(HybridBlock): @@ -5088,7 +5007,6 @@ def _test_exponential_exception(scale): assertRaises(ValueError, _test_exponential_exception, -1) -@with_seed() @use_np def test_np_random_a(): op_names = ['pareto', 'power', 'weibull'] @@ -5157,7 +5075,6 @@ def _test_exception(a): assertRaises(ValueError, _test_exception, 0) -@with_seed() @use_np def test_np_weibull_grad(): class TestRandomW(HybridBlock): @@ -5190,7 +5107,6 @@ def hybrid_forward(self, F, a): assert_almost_equal(a.grad.asnumpy().sum(), formula_grad.asnumpy().sum(), rtol=1e-3, atol=1e-5) -@with_seed() @use_np def test_np_pareto_grad(): class TestRandomP(HybridBlock): @@ -5224,7 +5140,6 @@ def hybrid_forward(self, F, a): assert_almost_equal(a.grad.asnumpy().sum(), formula_grad.asnumpy().sum(), rtol=1e-3, atol=1e-5) -@with_seed() @use_np def test_np_randn(): # Test shapes. @@ -5246,7 +5161,6 @@ def test_np_randn(): assert data_mx.shape == shape -@with_seed() @use_np @pytest.mark.skip(reason='Test hangs. Tracked in #18144') def test_np_multivariate_normal(): @@ -5293,7 +5207,6 @@ def hybrid_forward(self, F, mean, cov): assert list(desired_shape) == list(actual_shape) -@with_seed() @use_np def test_npx_categorical(): class TestNumpyCategorical(HybridBlock): @@ -5320,7 +5233,6 @@ def hybrid_forward(self, F, prob): assert mx_out.shape == desired_shape -@with_seed() @use_np def test_random_seed(): for seed in [234, 594, 7240, 20394]: @@ -5331,7 +5243,6 @@ def test_random_seed(): assert_almost_equal(ret[0].asnumpy(), ret[1].asnumpy(), rtol=1e-4, atol=1e-5, use_broadcast=False) -@with_seed() @use_np def test_np_cumsum(): def np_cumsum_backward(ograd, axis=None, dtype=None): @@ -5382,7 +5293,6 @@ def hybrid_forward(self, F, a): assert_almost_equal(mx_out.asnumpy(), np_out, rtol=1e-3, atol=1e-5) -@with_seed() @use_np @pytest.mark.skip(reason='Skipped as the test is flaky and the feature causes curand error. Tracked in #18100') def test_np_histogram(): @@ -5400,7 +5310,6 @@ def test_np_histogram(): assert_almost_equal(mx_bins.asnumpy(), np_bins, rtol=1e-3, atol=1e-5) -@with_seed() @use_np @pytest.mark.skip(reason='Skipped as the test is flaky and the feature causes curand error. Tracked in #18100') def test_np_choice(): @@ -5499,7 +5408,6 @@ def test_indexing_mode(sampler, set_size, samples_size, replace, weight=None): test_indexing_mode(test_choice_weighted, num_classes, num_classes // 2, replace, weight) -@with_seed() @use_np def test_np_eye(): configs = [ @@ -5567,7 +5475,6 @@ def hybrid_forward(self, F, x): assert same(mx_out.asnumpy(), np_out) -@with_seed() @use_np def test_np_indices(): dtypes = ['int32', 'int64', 'float16', 'float32', 'float64'] @@ -5612,7 +5519,6 @@ def hybrid_forward(self, F, x): assert mx_out.shape == np_out.shape -@with_seed() @use_np def test_np_repeat(): config = [ @@ -5650,7 +5556,6 @@ def hybrid_forward(self, F, x): assert same(ret_mx.asnumpy(), ret_np) -@with_seed() @use_np def test_np_linalg_norm(): class TestLinalgNorm(HybridBlock): @@ -5769,7 +5674,6 @@ def spectral_norm_grad(data): assert_almost_equal(mx_ret.asnumpy(), np_ret, rtol=rtol, atol=atol) -@with_seed() @use_np @pytest.mark.parametrize('shape', [ (3, 3), @@ -5875,7 +5779,6 @@ def check_svd(UT, L, V, data_np): check_svd(UT, L, V, data_np) -@with_seed() @use_np def test_np_linalg_qr(): class TestQR(HybridBlock): @@ -6015,7 +5918,6 @@ def check_qr(q, r, a_np): check_qr(Q, R, data_np) -@with_seed() @use_np def test_np_linalg_cholesky(): class TestCholesky(HybridBlock): @@ -6136,7 +6038,6 @@ def newSymmetricPositiveDefineMatrix_nD(shape, ran=(0., 10.), max_cond=4): check_cholesky(L, data_np) -@with_seed() @use_np @pytest.mark.parametrize('hybridize', [True, False]) @pytest.mark.parametrize('dtype', ['float32', 'float64']) @@ -6223,7 +6124,6 @@ def check_inv(A_inv, data_np): check_inv(A_inv, data_np) -@with_seed() @use_np def test_np_linalg_solve(): class TestSolve(HybridBlock): @@ -6420,7 +6320,6 @@ def get_grad_A(A, ind): check_tensorinv(mx_out, a, ind) -@with_seed() @use_np def test_np_linalg_tensorsolve(): class TestTensorsolve(HybridBlock): @@ -6572,7 +6471,6 @@ def newInvertibleMatrix_2D(shape, max_cond=4): check_tensorsolve(mx_out, a.asnumpy(), b.asnumpy(), axes) -@with_seed() @use_np def test_np_linalg_lstsq(): class TestLstsq(HybridBlock): @@ -6648,7 +6546,6 @@ def check_lstsq(a_np, b_np, rcond_np, x, residuals, rank, s): check_lstsq(a_np, b_np, rcond, x, residuals, rank, s) -@with_seed() @use_np def test_np_linalg_matrix_rank(): class TestMatrixRank(HybridBlock): @@ -6725,7 +6622,6 @@ def check_matrix_rank(rank, a_np, tol, hermitian): check_matrix_rank(rank, a.asnumpy(), tol.asnumpy(), hermitian=False) -@with_seed() @use_np def test_np_linalg_pinv(): class TestPinv(HybridBlock): @@ -6804,7 +6700,6 @@ def check_pinv(x, a_np, rcond_np, hermitian, use_rcond): check_pinv(mx_out, a.asnumpy(), rcond.asnumpy(), hermitian, use_rcond) -@with_seed() @use_np def test_np_linalg_eigvals(): class TestEigvals(HybridBlock): @@ -6872,7 +6767,6 @@ def check_eigvals(x, a_np): check_eigvals(mx_out, a.asnumpy()) -@with_seed() @use_np def test_np_linalg_eigvalsh(): class TestEigvalsh(HybridBlock): @@ -6949,7 +6843,6 @@ def new_matrix_from_sym_matrix_nd(sym_a, UPLO): check_eigvalsh(mx_out, a.asnumpy(), UPLO) -@with_seed() @use_np def test_np_linalg_eig(): class TestEig(HybridBlock): @@ -7029,7 +6922,6 @@ def check_eig(w, v, a_np): check_eig(mx_w, mx_v, a.asnumpy()) -@with_seed() @use_np def test_np_linalg_eigh(): class TestEigh(HybridBlock): @@ -7135,7 +7027,6 @@ def new_matrix_from_sym_matrix_nd(sym_a, UPLO): check_eigh(w, v, a.asnumpy(), UPLO) -@with_seed() @use_np def test_np_linalg_det(): class TestDet(HybridBlock): @@ -7185,7 +7076,6 @@ def hybrid_forward(self, F, a): check_numeric_gradient(mx_sym, [a.as_nd_ndarray()], rtol=1e-1, atol=1e-1, dtype=dtype) -@with_seed() @use_np @retry(3) @pytest.mark.parametrize('grad_req', ['write', 'add', 'null']) @@ -7232,7 +7122,6 @@ def hybrid_forward(self, F, a): assert_almost_equal(mx_out[1].asnumpy(), np_out[1], rtol=1e-1, atol=1e-1) -@with_seed() @use_np def test_np_vstack(): class TestVstack(HybridBlock): @@ -7288,7 +7177,6 @@ def g(data): assert_almost_equal(mx_out.asnumpy(), expected_np, rtol=rtol, atol=atol) -@with_seed() @use_np def test_np_full(): class TestFull(HybridBlock): @@ -7339,7 +7227,6 @@ def hybrid_forward(self, F, a): assert_almost_equal(mx_out.asnumpy(), expected_np, rtol=rtol, atol=atol) -@with_seed() @use_np @pytest.mark.skip(reason='Skipped as the test is flaky and the feature causes curand error. Tracked in #18100') def test_np_full_like(): @@ -7387,7 +7274,6 @@ def hybrid_forward(self, F, x, *args, **kwargs): assert_almost_equal(ret.asnumpy(), expected_ret, rtol=1e-3, atol=1e-5) -@with_seed() @use_np def test_np_roll(): class TestRoll(HybridBlock): @@ -7445,7 +7331,6 @@ def hybrid_forward(self, F, x): numeric_eps=1e-3, rtol=1e-3, atol=1e-5, dtype=i_dtype[dtype]) -@with_seed() @use_np def test_np_trace(): class TestTrace(HybridBlock): @@ -7527,7 +7412,6 @@ def g(data, axis1, axis2, offset): assert False -@with_seed() @use_np def test_np_windows(): class TestWindows(HybridBlock): @@ -7562,7 +7446,6 @@ def hybrid_forward(self, F, x, *args, **kwargs): assert_almost_equal(mx_out.asnumpy(), np_out, rtol=1e-3, atol=1e-5) -@with_seed() @use_np def test_np_flip(): class TestFlip(HybridBlock): @@ -7602,7 +7485,6 @@ def hybrid_forward(self, F, x): assert_almost_equal(mx_out.asnumpy(), np_out, rtol=rtol, atol=atol) -@with_seed() @use_np def test_np_flipud_fliplr(): class TestFlipud(HybridBlock): @@ -7656,7 +7538,6 @@ def hybrid_forward(self, F, x): assert_almost_equal(mx_out.asnumpy(), np_out, rtol=rtol, atol=atol) -@with_seed() @use_np def test_np_around(): class TestAround(HybridBlock): @@ -7688,7 +7569,6 @@ def hybrid_forward(self, F, x): assert_almost_equal(mx_out.asnumpy(), np_out, rtol=rtol, atol=atol) -@with_seed() @use_np def test_np_flatnonzero(): class TestFlatnonzero(HybridBlock): @@ -7718,7 +7598,6 @@ def hybrid_forward(self, F, a): assert_almost_equal(mx_out.asnumpy(), np_out, rtol=rtol, atol=atol) -@with_seed() @use_np def test_np_round(): class TestRound(HybridBlock): @@ -7751,7 +7630,6 @@ def hybrid_forward(self, F, x): assert_almost_equal(mx_out.asnumpy(), np_out, rtol=rtol, atol=atol) -@with_seed() @use_np def test_np_nonzero(): class TestNonzero(HybridBlock): @@ -7783,7 +7661,6 @@ def hybrid_forward(self, F, x): assert_almost_equal(mx_out.asnumpy(), np_out, rtol, atol) -@with_seed() @use_np def test_np_unique(): class TestUnique(HybridBlock): @@ -7843,7 +7720,6 @@ def hybrid_forward(self, F, a): assert_almost_equal(mx_out[i].asnumpy(), np_out[i], rtol=1e-3, atol=1e-5) -@with_seed() @use_np def test_np_take(): configs = [ @@ -7949,7 +7825,6 @@ def check_output_n_grad(data_shape, idx_shape, axis, mode): check_output_n_grad(config[0], config[1], config[2], mode) -@with_seed() def test_np_builtin_op_signature(): import inspect from mxnet import _numpy_op_doc @@ -7964,7 +7839,6 @@ def test_np_builtin_op_signature(): assert str(op.__signature__) == str(inspect.signature(_op_from_doc)) -@with_seed() @use_np def test_np_tril_indices(): class TestTrilindices(HybridBlock): @@ -8001,7 +7875,6 @@ def hybrid_forward(self, F, x, *args, **kwargs): assert same(np_data, mx_data.asnumpy()) -@with_seed() @use_np def test_np_fill_diagonal(): class TestFillDiagonal(HybridBlock): @@ -8046,7 +7919,6 @@ def hybrid_forward(self, F, x): assert same(np_data, mx_data.asnumpy()) -@with_seed() @use_np def test_np_moveaxis(): class TestMoveaxis(HybridBlock): @@ -8090,7 +7962,6 @@ def hybrid_forward(self, F, x): assert same(mx_out.asnumpy(), np_out) -@with_seed() @use_np def test_np_rot90(): class TestTRot90(HybridBlock): @@ -8143,7 +8014,6 @@ def hybrid_forward(self, F, a, *args): assert same(mx_out.asnumpy(), np_out) -@with_seed() @use_np def test_np_hsplit(): class TestHSplit(HybridBlock): @@ -8194,7 +8064,6 @@ def hybrid_forward(self, F, a, *args, **kwargs): assert_almost_equal(mx_out.asnumpy(), np_out, rtol=1e-3, atol=1e-5) -@with_seed() @use_np def test_np_dsplit(): class TestDSplit(HybridBlock): @@ -8243,7 +8112,6 @@ def hybrid_forward(self, F, a, *args, **kwargs): assert_almost_equal(mx_out.asnumpy(), np_out, rtol=1e-3, atol=1e-5) -@with_seed() @use_np def test_np_einsum(): class TestEinsum(HybridBlock): @@ -8401,7 +8269,6 @@ def dbg(name, data): assert_almost_equal(grad[0][iop], grad[1][iop], rtol=rtol, atol=atol) -@with_seed() @use_np @pytest.mark.skip(reason='Skipped as the test is flaky and the feature causes curand error. Tracked in #18100') def test_np_diagflat(): @@ -8444,7 +8311,6 @@ def hybrid_forward(self,F,a): assert_almost_equal(mx_out.asnumpy(), np_out, rtol=rtol, atol=atol) -@with_seed() @use_np def test_np_pad(): class TestPad(HybridBlock): @@ -8511,7 +8377,6 @@ def hybrid_forward(self,F,A,**kwargs): assert_almost_equal(mx.np.pad(mx_grad, pad_width=pw, mode="constant"), gt_in_grad.asnumpy(), rtol=rtol, atol=atol) -@with_seed() @use_np def test_np_rand(): # Test shapes. @@ -8561,7 +8426,6 @@ def generator_mx(x): return np.random.rand( probs=probs, nsamples=samples, nrepeat=trials) -@with_seed() @use_np def test_np_true_divide(): shapes = [ @@ -8620,7 +8484,6 @@ def test_np_true_divide(): assert_almost_equal(out_mx.asnumpy(), out_np, rtol=1e-3, atol=1e-3, use_broadcast=False) -@with_seed() @use_np def test_np_column_stack(): class TestColumnStack(HybridBlock): @@ -8735,7 +8598,6 @@ def hybrid_forward(self, F, a, *args, **kwargs): assert_almost_equal(npx_out.asnumpy(), expected_out, rtol=1e-3, atol=1e-5) -@with_seed() @use_np def test_np_share_memory(): ops = [np.shares_memory, np.may_share_memory] @@ -8798,7 +8660,6 @@ def hybrid_forward(self, F, a): assert_almost_equal(mx_out.asnumpy(), np_out, atol=atol, rtol=rtol) -@with_seed() @use_np def test_np_quantile(): class TestQuantile(HybridBlock): @@ -8859,7 +8720,6 @@ def hybrid_forward(self, F, a): assert_almost_equal(mx_out.asnumpy(), np_out, atol=atol, rtol=rtol) -@with_seed() @use_np def test_np_percentile(): class TestPercentile(HybridBlock): @@ -8922,7 +8782,6 @@ def hybrid_forward(self, F, a): assert_almost_equal(mx_out.asnumpy(), np_out, atol=atol, rtol=rtol) -@with_seed() @use_np def test_np_diff(): def np_diff_backward(ograd, n, axis): @@ -8974,7 +8833,6 @@ def hybrid_forward(self, F, a): assert_almost_equal(mx_out.asnumpy(), np_out, rtol=rtol, atol=atol) -@with_seed() @use_np def test_np_ediff1d(): def np_diff_backward(size, shape): @@ -9088,7 +8946,6 @@ def hybrid_forward(self, F, a): assert_almost_equal(arg.grad.asnumpy(), np.ones_like(arg), atol=atol, rtol=rtol) -@with_seed() @use_np def test_np_column_stack(): class TestColumnStack(HybridBlock): @@ -9142,7 +8999,6 @@ def g(data): assert_almost_equal(mx_out.asnumpy(), expected_np, rtol=rtol, atol=atol) -@with_seed() @use_np @pytest.mark.skip(reason='Test hangs. Tracked in #18144') def test_np_resize(): @@ -9178,7 +9034,6 @@ def hybrid_forward(self, F, x, *args, **kwargs): assert_almost_equal(ret.asnumpy(), expected_ret, atol=1e-5, rtol=1e-5, use_broadcast=False) -@with_seed() @use_np def test_np_diag(): class TestDiag(HybridBlock): @@ -9237,7 +9092,6 @@ def hybrid_forward(self, F, a): assert_almost_equal(mx_out.asnumpy(), np_out, rtol=rtol, atol=atol) -@with_seed() @use_np @pytest.mark.parametrize('config', [ [(1, 5), (0, 1)], [(2, 2), (0, 1)], @@ -9320,7 +9174,6 @@ def hybrid_forward(self, F, a): assert_almost_equal(mx_out.asnumpy(), np_out, rtol=rtol, atol=atol) -@with_seed() @use_np def test_np_nan_to_num(): def take_ele_grad(ele): @@ -9422,7 +9275,6 @@ def hybrid_forward(self, F, a): assert_almost_equal(mx_out.asnumpy(), np_out, rtol=1e-3, atol=1e-5, use_broadcast=False) -@with_seed() @use_np def test_np_unary_bool_funcs(): def check_unary_func(func): @@ -9529,7 +9381,6 @@ def hybrid_forward(self, F, a): check_unary_func("isfinite") -@with_seed() @use_np def test_np_polyval(): class TestPolyval(HybridBlock): @@ -9596,7 +9447,6 @@ def polyval_grad(p, x): assert_almost_equal(mx_out.asnumpy(), np_out, atol=atol, rtol=rtol) -@with_seed() @use_np def test_np_where(): class TestWhere(HybridBlock): @@ -9664,7 +9514,6 @@ def hybrid_forward(self, F, cond, x, y): same(mx_out, np_out) -@with_seed() @use_np def test_np_expand_dims(): class TestExpandDims(HybridBlock): @@ -9718,7 +9567,6 @@ def hybrid_forward(self, F, x): assert_almost_equal(y.asnumpy(), expected, use_broadcast=False) -@with_seed() @use_np @pytest.mark.skip(reason='Test hangs. Tracked in #18144') def test_np_unravel_index(): @@ -9771,7 +9619,6 @@ def hybrid_forward(self, F, a): assert_almost_equal(elem_mx.asnumpy(), elem_np, rtol=rtol, atol=atol) -@with_seed() @use_np def test_np_diag_indices_from(): class TestDiag_indices_from(HybridBlock): @@ -9808,7 +9655,6 @@ def hybrid_forward(self, F, a): assert_almost_equal(elem_mx.asnumpy(), elem_np, rtol=rtol, atol=atol) -@with_seed() @use_np def test_np_interp(): class TestInterp(HybridBlock): @@ -9875,7 +9721,6 @@ def hybrid_forward(self, F, xp, fp): assert_almost_equal(mx_out.asnumpy(), np_out, atol=atol, rtol=rtol) -@with_seed() @use_np def test_np_bincount(): class TestBincount(HybridBlock): @@ -9925,7 +9770,6 @@ def hybrid_forward(self, F, a, weights): assert_almost_equal(mx_out.asnumpy(), np_out, rtol=rtol, atol=atol) -@with_seed() @use_np @pytest.mark.skip(reason='Test hangs. Tracked in #18144') def test_np_empty_like(): @@ -9984,7 +9828,6 @@ def hybrid_forward(self, F, x, *args, **kwargs): assert ret.asnumpy().shape == expected_ret.shape -@with_seed() @use_np @pytest.mark.parametrize('hybridize', [True, False]) @pytest.mark.parametrize('dtype', [np.float32, np.float64]) @@ -10241,7 +10084,6 @@ def get_cross_backward(a, b, axises): check_np_cross(mx_out, a.asnumpy(), b.asnumpy(), axes) -@with_seed() @use_np def test_np_rollaxis(): class TestRollaxis(HybridBlock): @@ -10281,7 +10123,6 @@ def hybrid_forward(self, F, a, *args, **kwargs): assert same(mx_out.asnumpy(), np_out) -@with_seed() @use_np def test_npx_stop_gradient(): class TestStopGradient(HybridBlock): @@ -10310,7 +10151,6 @@ def hybrid_forward(self, F, a): assert_almost_equal(new_grad, old_grad + 1) -@with_seed() @use_np def test_np_elementwise_ops_on_misaligned_input(): a = np.array([1,2,3,4], dtype='float16') @@ -10333,7 +10173,6 @@ def test_np_elementwise_ops_on_misaligned_input(): assert a[3] == 4.0 -@with_seed() @use_np @pytest.mark.parametrize('dtype', ['float16', 'float32', 'float64']) @pytest.mark.parametrize('lead_dim', [2, 3, 4, 6, 10]) @@ -10361,7 +10200,6 @@ def test_np_broadcast_ops_on_misaligned_input(dtype, lead_dim, both_ways): assert_almost_equal(f, expected) -@with_seed() @use_np @pytest.mark.parametrize('dtype', ['float16', 'float32', 'float64']) @pytest.mark.parametrize('lead_dim', [2, 3, 4, 6, 10]) @@ -10388,7 +10226,6 @@ def test_np_broadcast_ops_on_misaligned_input_oneside(dtype, lead_dim, both_ways mx.nd.waitall() assert_almost_equal(f, expected) -@with_seed() @use_np @pytest.mark.parametrize('num_batch', [1, 2]) @pytest.mark.parametrize('num_channel_data', [4, 8]) diff --git a/tests/python/unittest/test_operator.py b/tests/python/unittest/test_operator.py index 65c30292f533..3660d7f4d6d0 100644 --- a/tests/python/unittest/test_operator.py +++ b/tests/python/unittest/test_operator.py @@ -29,12 +29,11 @@ from mxnet.test_utils import * from mxnet.operator import * from mxnet.base import py_str, MXNetError, _as_list -from common import with_seed, assert_raises_cudnn_not_satisfied, assert_raises_cuda_not_satisfied, assertRaises +from common import assert_raises_cudnn_not_satisfied, assert_raises_cuda_not_satisfied, assertRaises from common import xfail_when_nonstandard_decimal_separator, with_environment import pytest import os -@with_seed() @assert_raises_cudnn_not_satisfied(min_version='5.1.10') @pytest.mark.serial def test_rnn_with_new_param(): @@ -75,7 +74,6 @@ def test_rnn_with_new_param(): assert_allclose(ex03, ex04, rtol=1e-2, atol=1e-4) -@with_seed() @pytest.mark.serial def test_lstm_dropout(): X = mx.sym.Variable('x') @@ -89,7 +87,6 @@ def test_lstm_dropout(): out = exe.forward(is_train=True) out[0].wait_to_read() -@with_seed() @pytest.mark.serial def test_gru_dropout(): X = mx.sym.Variable('x') @@ -102,7 +99,6 @@ def test_gru_dropout(): out = exe.forward(is_train=True) out[0].wait_to_read() -@with_seed() @pytest.mark.serial def test_rnntanh_dropout(): X = mx.sym.Variable('x') @@ -115,7 +111,6 @@ def test_rnntanh_dropout(): out = exe.forward(is_train=True) out[0].wait_to_read() -@with_seed() @pytest.mark.serial def test_rnnrelu_dropout(): X = mx.sym.Variable('x') @@ -194,7 +189,6 @@ def check_elementwise_sum_with_shape(shape, n): assert_almost_equal(a, out_grad, rtol=1e-5, atol=1e-5) -@with_seed() @pytest.mark.serial def test_elementwise_sum(): nrepeat = 2 @@ -250,7 +244,6 @@ def check_concat_with_shape(shapes, dimension, skip_second): assert_almost_equal(grad, np_grad + 1) -@with_seed() def test_concat(): for dimension in range(4): n = 2 @@ -307,7 +300,6 @@ def test_concat(): check_concat_with_shape(shapes, dimension - 4, True) check_concat_with_shape(shapes, dimension - 4, False) -@with_seed() def test_slice_channel(): def check_slice_channel(data_ndim, axis, num_outputs, squeeze_axis): ins = [] @@ -403,7 +395,6 @@ def test_swapaxes(): @xfail_when_nonstandard_decimal_separator -@with_seed() def test_scalarop(): data = mx.symbol.Variable('data') shape = (3, 4) @@ -425,7 +416,6 @@ def test_scalarop(): check_symbolic_backward(test, [data_tmp], [np.ones(shape)*2], [npout_grad]) -@with_seed() def test_scalar_pow(): data = mx.symbol.Variable('data') shape = (1, 1) @@ -436,7 +426,6 @@ def test_scalar_pow(): check_symbolic_backward(test, [data_tmp], [np.ones(shape)], [2 * data_tmp]) -@with_seed() def test_symbol_pow(): shape = (1, 1) @@ -456,7 +445,6 @@ def test_symbol_pow(): check_symbolic_backward(test, [data_tmp, exp_tmp], [np.ones(shape)], [data_dir, exp_dir]) -@with_seed() def test_fully_connected(): # Create data of given shape as a uniform distribution centered on 0.0 def random_data(shape, dtype=np.float32): @@ -482,7 +470,6 @@ def random_data(shape, dtype=np.float32): #check_symbolic_forward(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias2.asnumpy()}, {'fc_output': res}) -@with_seed() def test_pow_fn(): shape = (3, 4) exp = mx.symbol.Variable("exp") @@ -493,7 +480,6 @@ def test_pow_fn(): check_symbolic_backward(y, [x], [np.ones(shape)], [np.log(2) * 2**x]) -@with_seed() def test_relu(): def frelu(x): return np.maximum(x, 0.0) @@ -516,7 +502,6 @@ def frelu_grad(x): # NOTE(haojin2): Skipping the numeric check tests for float16 data type due to precision issues, # the analytical checks are still performed on each and every data type to verify the correctness. -@with_seed() def test_leaky_relu(): def fleaky_relu(x, act_type, slope=0.25): neg_indices = x < 0 @@ -557,7 +542,6 @@ def fleaky_relu_grad(grad, x, y, act_type, slope=0.25): # NOTE(haojin2): Skipping the numeric check tests for float16 data type due to precision issues, # the analytical checks are still performed on each and every data type to verify the correctness. -@with_seed() def test_prelu(): def fprelu(x, gamma): pos_indices = x > 0 @@ -619,7 +603,6 @@ def fprelu_grad(x, y, gamma): check_symbolic_backward(y, [xa, gam_full], [np.ones(ya_full.shape, dtype=dtype)], [g_xa_full, g_gam_full], rtol=rtol, atol=atol, dtype=dtype) -@with_seed() def test_selu(): alpha = 1.6732632423543772848170429916717 lamb = 1.0507009873554804934193349852946 @@ -650,7 +633,6 @@ def fselu_grad(grad, x, y): check_symbolic_backward(y, [xa], [np.ones(shape, dtype=dtype)], [ga], rtol=rtol, atol=atol, dtype=dtype) -@with_seed() def test_gelu(): CUBE_CONSTANT = 0.044715 ROOT_TWO_OVER_PI = 0.7978845608028654 @@ -683,7 +665,6 @@ def fgelu_grad(grad, x, y): check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype) -@with_seed() def test_sigmoid(): def fsigmoid(a): return np.divide(1.0, (1.0 + np.exp(-a))) @@ -696,7 +677,6 @@ def fsigmoid(a): check_symbolic_forward(y, [xa], [ya]) check_symbolic_backward(y, [xa], [np.ones(shape)], [ya * (1 - ya)]) -@with_seed() def test_shape_array(): for i in range(1,6): shape = rand_shape_nd(i) @@ -714,7 +694,6 @@ def test_shape_array(): same(yo, ya) assert_almost_equal(xg, np.zeros_like(xg.asnumpy())) -@with_seed() def test_size_array(): for i in range(1,6): shape = rand_shape_nd(i) @@ -732,7 +711,6 @@ def test_size_array(): same(yo, ya) assert_almost_equal(xg, np.zeros_like(xg.asnumpy())) -@with_seed() def test_hard_sigmoid(): def fhardsigmoid(a, alpha=0.2, beta=0.5): return np.maximum(np.zeros(a.shape, dtype=a.dtype), @@ -764,7 +742,6 @@ def fhardsigmoid_grad(a, out_grad, alpha=0.2, beta=0.5): check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype) check_symbolic_backward(y, [xa], [np.ones(shape)], [grad_xa], rtol=rtol, atol=atol, dtype=dtype) -@with_seed() def test_softsign(): def fsoftsign(a): return np.divide(a, (1.0 + np.abs(a))) @@ -780,7 +757,6 @@ def fsoftsign_grad(a): check_symbolic_forward(y, [xa], [ya]) check_symbolic_backward(y, [xa], [np.ones(shape)], [ya_grad]) -@with_seed() def test_binary_logic(): def _inner_test(forward_gt, logic_sym, x_shape, y_shape, test_scalar=True): x = mx.symbol.Variable("x") @@ -837,7 +813,6 @@ def _inner_test(forward_gt, logic_sym, x_shape, y_shape, test_scalar=True): x_shape=(1, 10), y_shape=(10, 1), test_scalar=False) -@with_seed() def test_unary_logic(): def reference(a, dtype): return np.logical_not(a).astype(dtype) @@ -853,7 +828,6 @@ def reference(a, dtype): assert_almost_equal(sym_out, reference(xa, dtype=xa.dtype)) -@with_seed() def test_embedding(): in_dim = 10 out_dim = 4 @@ -885,7 +859,6 @@ def test_embedding(): # check ops handle duplicate input correctly. -@with_seed() def test_binary_op_duplicate_input(): data = mx.symbol.Variable('data') shape = (3, 4) @@ -904,7 +877,6 @@ def test_binary_op_duplicate_input(): assert_almost_equal(arr_grad, 2.0 * data_tmp) -@with_seed() def test_sign(): data = mx.symbol.Variable('data') shape = (3, 4) @@ -929,7 +901,6 @@ def test_sign(): assert_almost_equal(arr_grad, npout_grad) -@with_seed() def test_round_ceil_floor(): data = mx.symbol.Variable('data') shape = (3, 4) @@ -947,7 +918,6 @@ def test_round_ceil_floor(): assert_almost_equal(out, npout) -@with_seed() def test_trunc(): data_tmp = np.random.rand(3, 4) * 10 - 5 arr_data = mx.nd.array(data_tmp) @@ -964,7 +934,6 @@ def test_trunc(): assert_almost_equal(out, npout) -@with_seed() def test_rsqrt_cos_sin(): data = mx.symbol.Variable('data') shape = (3, 4) @@ -989,7 +958,6 @@ def test_rsqrt_cos_sin(): assert_almost_equal(arr_grad, npout_grad) -@with_seed() def test_maximum_minimum(): data1 = mx.symbol.Variable('data1') data2 = mx.symbol.Variable('data2') @@ -1027,7 +995,6 @@ def test_maximum_minimum(): assert_almost_equal(arr_grad2, npout_grad2) -@with_seed() def test_maximum_minimum_scalar(): data1 = mx.symbol.Variable('data') shape = (3, 4) @@ -1059,7 +1026,6 @@ def test_maximum_minimum_scalar(): assert_almost_equal(arr_grad1, npout_grad1) -@with_seed() def test_abs(): data = mx.symbol.Variable('data') shape = (3, 4) @@ -1201,7 +1167,6 @@ def check_deconvolution_target_shape(input_shape, kernel, stride, pad, adj, targ assert out_shapes[0] == (input_shape[0], 5) + target_shape -@with_seed() @pytest.mark.serial def test_deconvolution(): # 2D @@ -1299,7 +1264,6 @@ def test_deconvolution(): pad = (3,) ) -@with_seed() def test_deconvolution_forward_with_bias(): """Check if deconvolution forward can work well with bias=True """ @@ -1363,7 +1327,6 @@ def _init_bilinear(arr, f): assert out.shape == data_shape[:2] + target_shape -@with_seed() def test_nearest_upsampling(): for root_scale in [1,2,3]: for scale in [1,2,3]: @@ -1373,7 +1336,6 @@ def test_nearest_upsampling(): check_nearest_upsampling_with_shape(shapes, scale, root_scale) -@with_seed() def test_bilinear_upsampling(): rootscale = [2,3] scales = [1,2,3] @@ -1389,7 +1351,6 @@ def test_bilinear_upsampling(): weight_shape = (1, num_filter, kernel, kernel) check_bilinear_upsampling_with_shape(data_shape, weight_shape, scale, root_scale, num_filter) -@with_seed() def test_batchnorm_training(): def check_batchnorm_training(stype): for shape in [(2, 3), (2, 3, 2, 2), (2, 8, 2, 2)]: @@ -1462,7 +1423,6 @@ def check_batchnorm_training(stype): @xfail_when_nonstandard_decimal_separator -@with_seed() @pytest.mark.parametrize('op_name', ['BatchNorm', 'SyncBatchNorm']) @pytest.mark.parametrize('shape', [(4, 2), (4, 3, 4), (4, 6, 4, 5), (4, 5, 6, 4, 5)]) @@ -1621,7 +1581,6 @@ def _test_batchnorm_impl(axis, data_grad_req, gamma_grad_req, beta_grad_req) -@with_seed() def test_groupnorm(): acc_types = {'float16': 'float32', 'float32': 'float64', 'float64': 'float64'} def x_hat_helper(x, num_groups, eps): @@ -1712,7 +1671,6 @@ def np_groupnorm_grad(ograd, data, gamma, beta, mean, std, num_groups, eps): atol=5e-2 if dtype == np.float16 else 1e-4, dtype=dtype) -@with_seed() def test_convolution_grouping(): for dim in [1, 2, 3]: num_filter = 4 @@ -1746,7 +1704,6 @@ def test_convolution_grouping(): @pytest.mark.skip(reason="Flaky test https://github.com/apache/incubator-mxnet/issues/14052") -@with_seed() def test_depthwise_convolution(): for dim in [1,2]: for num_base in [1, 4, 16, 32, 64]: @@ -1790,7 +1747,6 @@ def test_depthwise_convolution(): assert_allclose(arr1, arr2, rtol=1e-3, atol=1e-3) -@with_seed() def test_convolution_independent_gradients(): # NOTE(zixuanweeei): Flaky test tracked by https://github.com/apache/incubator-mxnet/issues/15603. # GPU context will be enabled after figuring out the possible issue tracked at @@ -2022,7 +1978,6 @@ def reduce_op(shape, x): assert_allclose(y_2.asnumpy(), x_2, rtol=rtol, atol=atol) -@with_seed() def test_binary_op(): a = mx.sym.Variable('a') b = mx.sym.Variable('b') @@ -2084,7 +2039,6 @@ def test_bneq(a, b): test_bpow(a, b) test_bneq(a, b) -@with_seed() def test_broadcast_binary_op(): def check_bmaxmin_gradient(test_sym, x, y, delta, rtol, atol): """This function ensures that checking the numerical gradient of @@ -2198,7 +2152,6 @@ def test_bxor(a, b): test_bor(a, b) test_bxor(a, b) -@with_seed() def test_run_convolution_dilated_impulse_response(dil=(1,1), kernel_shape=(3,3), verbose=False): dim = len(dil) assert(len(kernel_shape) == dim) @@ -2268,7 +2221,6 @@ def test_run_convolution_dilated_impulse_response(dil=(1,1), kernel_shape=(3,3), assert(out[center] - np.sum(kernel_gradient) - out_orig[center] < 0.001) -@with_seed() def test_convolution_dilated_impulse_response(): # 1D for dil in [ (1,), (2,), (3,) ]: @@ -2284,7 +2236,6 @@ def test_convolution_dilated_impulse_response(): test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks) -@with_seed() @pytest.mark.serial @pytest.mark.parametrize('src_shape,shape_args,reverse,dst_shape', [ ((2, 3, 5, 5), (0, -1), False, (2, 75)), @@ -2353,7 +2304,6 @@ def test_reshape_new(src_shape, shape_args, reverse, dst_shape): 'Output Shape = %s' %(str(holdout_src_shape), str(shape_args), str(reverse), str(dst_shape), str(output_shape[0])) -@with_seed() def test_reshape_old(): net = mx.sym.Variable("data") net = mx.sym.Reshape(net, target_shape=(2, 0)) @@ -2373,7 +2323,6 @@ def test_reshape_old(): assert_allclose(exe.grad_arrays[0].asnumpy(), out_grad_npy.reshape((5, 4, 3, 7))) -@with_seed() def test_reshape_like(): def test_reshape_like_new(lhs_shape, rhs_shape, lbeg, lend, rbeg, rend, dst_shape): lhs = mx.sym.Variable("lhs") @@ -2426,7 +2375,6 @@ def test_reshape_like_new(lhs_shape, rhs_shape, lbeg, lend, rbeg, rend, dst_shap assert(output_shape[0] == (30,20,2)) -@with_seed() def test_reduce(): sample_num = 500 def test_reduce_inner(numpy_reduce_func, numpy_reduce_grad_func, mx_reduce_sym, nan_prob=0, @@ -2535,7 +2483,6 @@ def test_reduce_inner(numpy_reduce_func, numpy_reduce_grad_func, mx_reduce_sym, mx.symbol.norm, test_exclude=False, test_none_axis=test_none) -@with_seed() def test_broadcast(): sample_num = 200 for i in range(sample_num): @@ -2579,7 +2526,6 @@ def test_broadcasting_ele(sym_bcast): test_broadcasting_ele(sym_bcast_like) -@with_seed() def test_transpose(): for ndim in range(1, 10): for t in range(5): @@ -2595,7 +2541,6 @@ def test_transpose(): assert_allclose(np.transpose(x.asnumpy()), y.asnumpy()) -@with_seed() @pytest.mark.serial def test_pseudo2dtranspose(): def getTwoInts(mn, mx): @@ -2619,7 +2564,6 @@ def getTranspAxes(ndim): assert_allclose(np.transpose(x.asnumpy(), axes=axes), y.asnumpy()) -@with_seed() @pytest.mark.serial def test_big_transpose(): n = [1] @@ -2637,7 +2581,6 @@ def test_big_transpose(): assert_allclose(x_np, z.asnumpy().astype('uint8')) -@with_seed() @pytest.mark.serial def test_larger_transpose(): x = mx.nd.random.normal(shape=(50,51)) @@ -2645,7 +2588,6 @@ def test_larger_transpose(): assert_allclose(np.transpose(x.asnumpy()), y.asnumpy()) -@with_seed() def test_expand_dims(): for ndim in range(1, 6): for axis in range(-ndim + 1, ndim): @@ -2657,7 +2599,6 @@ def test_expand_dims(): assert_allclose(x1.shape, y1.shape) -@with_seed() def test_crop(): for ndim in range(1, 6): for t in range(5): @@ -2690,7 +2631,6 @@ def test_crop(): check_numeric_gradient(vy, [x.asnumpy()]) -@with_seed() def test_slice_axis(): for ndim in range(1, 6): shape = np.random.randint(1, 11, size=(ndim,)) @@ -2733,7 +2673,6 @@ def test_slice_axis(): xx[idx] = x.asnumpy()[idx] assert_allclose(xx + x_grad_npy, xgrad.asnumpy(), atol=1E-5) -@with_seed() def test_slice_like(): for ndim in range(1, 6): from_shape = np.random.randint(1, 11, size=(ndim,)) @@ -2774,7 +2713,6 @@ def test_slice_like(): assert_allclose(xx, xgrad.asnumpy()) assert_allclose(xgrad1.asnumpy(), mx.nd.zeros_like(xgrad1).asnumpy()) -@with_seed() def test_slice_like_different_types(): x = [[ 1., 2., 3., 4.], [ 5., 6., 7., 8.], @@ -2788,7 +2726,6 @@ def test_slice_like_different_types(): z = mx.nd.slice_like(x, y) assert_allclose(z.asnumpy(), [[1,2,3],[5,6,7]]) -@with_seed() def test_reshape_like_different_types(): x = mx.nd.zeros((2, 3)) @@ -2798,7 +2735,6 @@ def test_reshape_like_different_types(): z = mx.nd.reshape_like(x, y) assert_allclose(z.asnumpy(), [[0,0],[0,0],[0,0]]) -@with_seed() def test_broadcast_like_different_types(): x = mx.nd.zeros((2, 1)) y = mx.nd.ones((2, 2)) @@ -2808,7 +2744,6 @@ def test_broadcast_like_different_types(): assert_allclose(z.asnumpy(), [[0,0],[0,0]]) assert x.dtype == z.dtype -@with_seed() def test_flip(): for ndim in range(1, 6): for t in range(5): @@ -2820,7 +2755,6 @@ def test_flip(): assert_allclose(x.asnumpy()[idx], y.asnumpy()) -@with_seed() def test_stn(): import sys np.set_printoptions(threshold=sys.maxsize) @@ -2910,7 +2844,6 @@ def test_stn_valid_sampling(): ) + target_shape)) -@with_seed() def test_dot(): ctx = default_context() dtypes = ['float32', 'float64'] @@ -2985,7 +2918,6 @@ def dot_sym_xT_yT(data_type): check_numeric_gradient(dot_sym_xT_yT(data_type), [m1_npy.T, m2_npy.T], numeric_eps=1e-1, rtol=2e-2, atol=1e-3) -@with_seed() def test_batch_dot(): ctx = default_context() dtypes = ['float32', 'float64'] @@ -3197,7 +3129,6 @@ def unittest_correlation(data_shape,kernel_size,max_displacement,stride1,stride2 assert_almost_equal(exe1.grad_dict['img2'], grad2, rtol=1e-3, atol=1e-4) -@with_seed() def test_correlation(): def test_infer_type(dtype): a = mx.sym.Variable('a') @@ -3242,7 +3173,7 @@ def test_infer_type(dtype): unittest_correlation((1,3,10,10), kernel_size = 1,max_displacement = 4,stride1 = 0,stride2 = 1,pad_size = 4,is_multiply = True, dtype = dtype) # Seed set because the test is not robust enough to operate on random data -@with_seed(1234) +@pytest.mark.seed(1234) def test_roipooling(): data = mx.symbol.Variable(name='data') @@ -3279,7 +3210,6 @@ def check_pad_with_shape(shape, xpu, pad_width, mode, dtype="float64"): check_numeric_gradient(Y, [x.asnumpy()], numeric_eps=1e-2, rtol=1e-2) -@with_seed() def test_pad(): ctx = default_context() shape1 = (2, 3, 3, 5) @@ -3335,7 +3265,6 @@ def check_instance_norm_with_shape(shape, xpu): numeric_eps=1e-2, rtol=1e-2, atol=1e-2) -@with_seed() def test_instance_normalization(): check_instance_norm_with_shape((1, 1, 1), default_context()) check_instance_norm_with_shape((2, 1, 2), default_context()) @@ -3376,7 +3305,6 @@ def check_l2_normalization(in_shape, mode, dtype, norm_eps=1e-10): check_numeric_gradient(out, [in_data], numeric_eps=1e-3, rtol=1e-2, atol=5e-3) -@with_seed() def test_l2_normalization(): for dtype in ['float16', 'float32', 'float64']: for mode in ['channel', 'spatial', 'instance']: @@ -3482,7 +3410,6 @@ def npy_layer_norm_grad(data, gamma, out_grad, axis, eps): gt_beta_grad + init_beta_grad, backward_check_eps, backward_check_eps) -@with_seed() def test_norm(): try: import scipy @@ -3659,14 +3586,12 @@ def check_sequence_func(ftype, mask_value=0, axis=0): numeric_eps=1e-3, rtol=1e-2, atol=1E-4) -@with_seed() @pytest.mark.skip(reason="Flaky test: https://github.com/apache/incubator-mxnet/issues/11395") def test_sequence_last(): check_sequence_func("last", axis=0) check_sequence_func("last", axis=1) -@with_seed() def test_sequence_mask(): check_sequence_func("mask", axis = 0, mask_value=-2.3) check_sequence_func("mask", axis = 1, mask_value=0.3) @@ -3737,7 +3662,6 @@ def test_wrapper(arr, xpu, sequence_length=None, use_sequence_length=False): assert_array_equal(test_wrapper(arr_4, xpu, sequence_length=seq_len_1, use_sequence_length=True), arr_5) -@with_seed() def test_sequence_reverse(): check_sequence_func("reverse", axis=0) check_sequence_reverse(mx.cpu()) @@ -3811,7 +3735,6 @@ def mathematical_core(name, forward_mxnet_call, forward_numpy_call, backward_num assert_almost_equal(arr_grad, npout_grad) -@with_seed() def test_special_functions_using_scipy(): try: from scipy import special as scipy_special @@ -3851,7 +3774,6 @@ def rounding(name, forward_mxnet_call, forward_numpy_call, data_init=5., grad_in assert_almost_equal(out, npout) -@with_seed() def test_mathematical(): # rsqrt mathematical_core("rsqrt", @@ -3939,7 +3861,6 @@ def test_mathematical(): rounding("fix", lambda x: mx.sym.fix(x), lambda x: np.fix(x)) -@with_seed() def test_special_functions_using_scipy(): try: from scipy import special as scipy_special @@ -3956,7 +3877,6 @@ def test_special_functions_using_scipy(): lambda x: scipy_special.psi(x), 0.5, 0.5) -@with_seed() def test_clip(): data = mx.symbol.Variable('data') shape = (30, 30) @@ -3967,7 +3887,6 @@ def test_clip(): [np.where(data_tmp <= 0.6, [1], [0]) * np.where(data_tmp >= -0.6, [1], [0])]) -@with_seed() def test_init(): def test_basic_val_init(sym_func, np_func, shape, dtype): x = sym_func(shape=shape, dtype=dtype) @@ -4028,7 +3947,6 @@ def test_arange_like_without_axis(): test_arange_like_without_axis() -@with_seed() def test_order(): ctx = default_context() @@ -4160,7 +4078,6 @@ def get_large_matrix(): is_ascend=True)]) -@with_seed() def test_blockgrad(): a = mx.sym.Variable('a') b = mx.sym.BlockGrad(a) @@ -4171,7 +4088,6 @@ def test_blockgrad(): exe.backward() # No error if BlockGrad works -@with_seed() def test_take_autograd_req(): row_len = 2 col_len = 8 @@ -4197,7 +4113,6 @@ def test_take_autograd_req(): x.backward() assert_almost_equal(np.ones(sc.grad.shape), sc.grad) -@with_seed() @pytest.mark.parametrize('mode,out_of_range', [ ('clip', True), ('wrap', True), @@ -4283,7 +4198,6 @@ def grad_helper(grad_in, axis, idx): assert_almost_equal(exe.grad_dict['a'], grad_in) -@with_seed() def test_grid_generator(): # transform_type = affine test_case = [(20,21),(4,3),(6,12),(15,17)] @@ -4352,7 +4266,6 @@ def test_grid_generator(): assert_almost_equal(exe_add.grad_dict['flow'], grad_est + flow_grad_npy, rtol=1e-3, atol=1e-5) -@with_seed() def test_index2d(): for _ in range(30): n = np.random.randint(1, 100) @@ -4363,7 +4276,6 @@ def test_index2d(): assert_almost_equal(r, data.asnumpy()[np.arange(n), x.asnumpy()]) -@with_seed() def test_cast(): for srctype in [np.int32, np.float32, np.float16]: for dsttype in [np.float32, np.int32, np.float16]: @@ -4400,7 +4312,6 @@ def get_cast_op_data(): yield np.nan # Test requires all platforms to round float32->float16 with same round-to-nearest-even policy. -@with_seed() def test_cast_float32_to_float16(): input_np = np.array(list(get_cast_op_data())).astype(np.float32) # The intermediate cast to np.float64 below gets around a numpy rounding bug that is fixed @@ -4427,7 +4338,6 @@ def check_cast(op, input_np, expected_output): check_cast(mx.sym.amp_cast, input_np, expected_output) -@with_seed() def test_amp_multicast(): if default_context().device_type == 'cpu': return @@ -4467,7 +4377,6 @@ def check_amp_multicast(input_np, expected_output): check_amp_multicast(input_np, expected_output) -@with_seed() def test_all_finite(): data = mx.sym.Variable("data", dtype=np.float32) data2 = mx.sym.Variable("data2", dtype=np.float32) @@ -4495,7 +4404,6 @@ def test_all_finite(): assert sym_output[0] == 1 -@with_seed() def test_repeat(): def test_repeat_forward(): ndim_max = 6 # max number of dims of the ndarray @@ -4570,7 +4478,6 @@ def test_repeat_numeric_gradient(): test_repeat_numeric_gradient() -@with_seed() def test_reverse(): data = mx.symbol.Variable('data') shape = (5, 5, 5) @@ -4582,7 +4489,6 @@ def test_reverse(): check_symbolic_backward(test, [data_tmp], [grad], [grad[:, ::-1, ::-1]]) -@with_seed() def test_tile(): def test_normal_case(): ndim_min = 1 @@ -4676,7 +4582,6 @@ def test_invalid_reps(): test_invalid_reps() -@with_seed() def test_one_hot(): def test_normal_case(index_type=np.int32): ndim_max = 6 @@ -4736,7 +4641,6 @@ def test_zero_depth(): test_zero_depth() -@with_seed() def test_where(): def get_forward_expected_output(condition, x, y): original_shape = x.shape @@ -4898,7 +4802,6 @@ def test_1d_cond(): test_1d_cond() -@with_seed() def test_softmin(): for ndim in range(1, 5): for dtype in [np.float16, np.float32, np.float64]: @@ -4918,7 +4821,6 @@ def test_softmin(): check_numeric_gradient(sym, [data], rtol=rtol, atol=atol, dtype=dtype) -@with_seed() def test_new_softmax(): for ndim in range(1, 5): shape = np.random.randint(1, 5, size=ndim) @@ -4934,7 +4836,6 @@ def test_new_softmax(): check_numeric_gradient(sym, [data], rtol=1e-2, atol=1e-3) -@with_seed() def test_softmax_with_temperature(): for ndim in range(1, 5): shape = np.random.randint(1, 5, size=ndim) @@ -4947,7 +4848,6 @@ def test_softmax_with_temperature(): check_symbolic_backward(sym, [data], [np.ones(shape)], [expected_bwd], rtol=0.05, atol=1e-3) check_numeric_gradient(sym, [data], rtol=0.05, atol=1e-3) -@with_seed() def test_log_softmax(): for ndim in range(1, 5): for _ in range(5): @@ -4972,7 +4872,6 @@ def softmax_forward(input_data, true_output): softmax_forward(mx.nd.array([[[[-3.4e38,-3.4e38]]]]), np.array([1.0,1.0])) softmax_forward(mx.nd.array([[[[3.4e38,3.4e38]]]]), np.array([1.0,1.0])) -@with_seed() @with_environment('MXNET_SAFE_ACCUMULATION', '1') def test_softmax_dtype(): def check_dtypes_almost_equal(op_name, @@ -5011,7 +4910,6 @@ def check_dtypes_almost_equal(op_name, 'float32', 'float64', 'float64') -@with_seed() def test_softmax_with_length(): def np_softmax_with_length(data, length): res = np.zeros(data.shape) @@ -5044,7 +4942,6 @@ def np_softmax_with_length(data, length): rtol=1e-2, atol=2e-3 if dtype == np.float16 else 1e-3, dtype="asnumpy") -@with_seed() def test_pick(): def test_pick_helper(index_type=np.int32): for mode in ['clip', 'wrap']: @@ -5112,7 +5009,6 @@ def check_ctc_loss(acts, labels, loss_truth, contrib=False): # test grad check_numeric_gradient(ctc, [acts, labels], grad_nodes=['input'], rtol=0.05, atol=1e-3) -@with_seed() def test_ctc_loss(): # Test 1: check that batches are same + check against Torch WarpCTC acts = np.array([ @@ -5142,7 +5038,6 @@ def test_ctc_loss(): for contrib in [False, True]: check_ctc_loss(acts2, labels3, true_loss, contrib=contrib) -@with_seed() def test_ctc_loss_with_large_classes(): ctx = default_context() num_classes = 6000 @@ -5162,7 +5057,6 @@ def test_ctc_loss_with_large_classes(): expected_loss = np.array([688.02826, 145.34462]) assert_almost_equal(loss, expected_loss) -@with_seed() def test_ctc_loss_grad(): def check_ctc_loss_grad(blank_label, contrib=False): # from tf vocab_size = 5 @@ -5255,7 +5149,6 @@ def check_ctc_loss_grad(blank_label, contrib=False): # from tf for label in ['first', 'last']: check_ctc_loss_grad(label, contrib=contrib) -@with_seed() def test_quantization_op(): min0 = mx.nd.array([0.0]) max0 = mx.nd.array([1.0]) @@ -5270,7 +5163,6 @@ def test_quantization_op(): assert same(qa.asnumpy(), qa_real.asnumpy()) assert_almost_equal(a_.asnumpy(), a_real.asnumpy(), rtol=1e-2) -@with_seed() def test_index_copy(): x = mx.nd.zeros((5,3)) t = mx.nd.array([[1,2,3],[4,5,6],[7,8,9]]) @@ -5296,7 +5188,6 @@ def test_index_copy(): assert same(t.grad.asnumpy(), t_grad.asnumpy()) -@with_seed() def test_boolean_mask(): data = mx.nd.array([[1, 2, 3],[4, 5, 6],[7, 8, 9]]) index = mx.nd.array([0, 1, 0]) @@ -5350,7 +5241,6 @@ def test_boolean_mask(): assert same(c.asnumpy(), a_np[ci.asnumpy().astype('bool')]) -@with_seed() def test_div_sqrt_dim(): data_tmp = np.random.normal(0, 1, (5, 10, 8)) data = mx.symbol.Variable('data') @@ -5372,7 +5262,6 @@ def is_fd_problem_input(x): return abs(x) < eps/2 or expected_relative_error(x) > rtol return np.vectorize(is_fd_problem_input) -@with_seed() def test_reciprocal_op(): data_tmp = np.random.rand(3, 4).astype(np.float32) * 10 - 5 @@ -5388,7 +5277,6 @@ def test_reciprocal_op(): check_symbolic_forward(test, [data_tmp], [np.reciprocal(data_tmp)]) -@with_seed() def test_cbrt_op(): data_tmp = np.random.rand(3, 4).astype(np.float32) * 10 - 5 @@ -5403,7 +5291,6 @@ def test_cbrt_op(): check_symbolic_forward(test, [data_tmp], [np.cbrt(data_tmp)]) -@with_seed() def test_rcbrt_op(): data_tmp = np.random.rand(3, 4).astype(np.float32) * 10 - 5 @@ -5419,7 +5306,6 @@ def test_rcbrt_op(): check_symbolic_forward(test, [data_tmp], [1/np.cbrt(data_tmp)]) -@with_seed() def test_custom_op(): class Sqr(mx.operator.CustomOp): def forward(self, is_train, req, in_data, out_data, aux): @@ -5616,7 +5502,6 @@ def create_operator(self, ctx, shapes, dtypes): assert_almost_equal(x, np.ones(shape=(10, 10), dtype=np.float32)) @pytest.mark.skip(reason="Flaky test, tracked at https://github.com/apache/incubator-mxnet/issues/17467") -@with_seed() def test_custom_op_fork(): # test custom operator fork # see https://github.com/apache/incubator-mxnet/issues/14396 @@ -5680,7 +5565,6 @@ def infer_shape(self, in_shape): def create_operator(self, ctx, shapes, dtypes): return Dot() -@with_seed() def test_custom_op_exc(): # test except handling # see https://github.com/apache/incubator-mxnet/pull/14693 @@ -5740,7 +5624,6 @@ def f(in_data, out_data): pytest.raises(MXNetError, custom_exc4) -@with_seed() def test_psroipooling(): for num_rois in [1, 2]: for num_classes, num_group in itertools.product([2, 3], [2, 3]): @@ -5764,7 +5647,6 @@ def test_psroipooling(): grad_nodes=grad_nodes) -@with_seed() def test_psroipooling_with_type(): arg_params = { 'psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])} @@ -5789,7 +5671,6 @@ def test_psroipooling_with_type(): 'psroipool_rois': 'null'}, arg_params=arg_params) -@with_seed() def test_deformable_convolution(): for num_batch in [1, 2]: for num_channel_data, num_deformable_group in itertools.product([4, 8], [1, 2]): @@ -5881,7 +5762,6 @@ def _validate_sample_location(input_rois, input_offset, spatial_scale, pooled_w, return output_offset @pytest.mark.skip(reason="Flaky test, tracked at https://github.com/apache/incubator-mxnet/issues/11713") -@with_seed() def test_deformable_psroipooling(): sample_per_part = 4 trans_std = 0.1 @@ -6046,7 +5926,6 @@ def np_random_data(shape, dtype=np.float32): check_grad(test_gemm, [a2, b2]) # Test gemm separately from other la-operators. -@with_seed() def test_gemm(): _gemm_test_helper(np.float64, True) with environment('MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION', '0'): @@ -6087,7 +5966,6 @@ def _make_triangle_symm(a, ndims, m, lower, dtype=np.float32): # @ankkhedia: Getting rid of fixed seed as flakiness could not be reproduced # tracked at https://github.com/apache/incubator-mxnet/issues/11718 @xfail_when_nonstandard_decimal_separator -@with_seed() def test_laop(): dtype = np.float64 rtol_fw = 1e-7 @@ -6255,7 +6133,6 @@ def _syevd_combined_symbol(a): transpose_b=False, name='Ut_L_U') return mx.sym.Group([u_ut, ut_lam_u]) -@with_seed() def test_laop_2(): dtype = np.float64 rtol_fw = 1e-7 @@ -6379,7 +6256,7 @@ def _syevd_backward(grad_u, grad_l, u, l): return np.dot(temp3, u) # Seed set because the test is not robust enough to operate on random data -@with_seed(1896893923) +@pytest.mark.seed(1896893923) def test_laop_3(): # Currently disabled on GPU as syevd needs cuda8 # and MxNet builds use cuda 7.5 @@ -6449,7 +6326,6 @@ def test_laop_3(): # @piyushghai - Removing the fixed seed for this test. # Issue for flakiness is tracked at - https://github.com/apache/incubator-mxnet/issues/11721 -@with_seed() def test_laop_4(): # Currently disabled on GPU as syevd needs cuda8 # and MxNet builds use cuda 7.5 @@ -6523,7 +6399,6 @@ def test_laop_5(): check_numeric_gradient(test_trian, [data_in]) # Tests for linalg.inverse -@with_seed() @pytest.mark.skip(reason="Test crashes https://github.com/apache/incubator-mxnet/issues/15975") def test_laop_6(): dtype = np.float64 @@ -6572,7 +6447,6 @@ def test_laop_6(): check_fw(test_logabsdet, [a], [r2]) check_grad(test_logabsdet, [a]) -@with_seed() def test_stack(): for _ in range(100): ndim = random.randint(1, 5) @@ -6591,7 +6465,6 @@ def test_stack(): ## TODO: test fails intermittently when cudnn on. temporarily disabled cudnn until gets fixed. ## tracked at https://github.com/apache/incubator-mxnet/issues/14288 -@with_seed() def test_dropout(): def zero_count(array, ratio): zeros = 0 @@ -6762,7 +6635,6 @@ def check_passthrough(ratio, shape, cudnn_off=True): @pytest.mark.skip(reason="test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/11290") -@with_seed() def test_scatter_gather_nd(): def check(data, idx): data.attach_grad() @@ -6801,7 +6673,6 @@ def check(data, idx): idx = mx.nd.array([[0, 0, 0, 0]], dtype='int32') assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(1,)).asscalar() == data.asnumpy().sum()) -@with_seed() def test_gather_nd_check_bound(): def _test_gather_nd_exception(data, indices): output = mx.nd.gather_nd(data, indices).asnumpy() @@ -6877,7 +6748,7 @@ def np_smooth_l1_grad(x, sigma): # - Backward: Comparison to NumPy (several dtype) # - Finite difference tests (only dtype = float64) # Seed set because the test is not robust enough to operate on random data -@with_seed(192837465) +@pytest.mark.seed(192837465) def test_unary_math_operators(): have_scipy = True try: @@ -7088,7 +6959,6 @@ def finite_diff_binary_op( # - Forward: Comparison to NumPy (several dtype) # - Backward: Comparison to NumPy (several dtype) # - Finite difference tests (only dtype = float64) -@with_seed() def test_binary_math_operators(): shape=(9, 10) dtype_l = [np.float64, np.float32, np.float16] @@ -7127,7 +6997,6 @@ def test_binary_math_operators(): name, op[0], shape, op[4], op[5], op[6], op[7], rtol_fd, atol_fd, num_eps) -@with_seed() @pytest.mark.serial def test_slice(): def test_slice_forward_backward(a, index): @@ -7203,7 +7072,6 @@ def check_slice_axis_partial_infer(data, axis, begin, end, expected_out_shape): check_slice_axis_partial_infer(var1, 1, 0, 5, (10, -1)) -@with_seed() def test_float16_min_max(): """Test for issue: https://github.com/apache/incubator-mxnet/issues/9007""" a = mx.nd.array([np.finfo('float16').min, np.finfo('float16').max], dtype='float16') @@ -7212,7 +7080,6 @@ def test_float16_min_max(): assert np.finfo('float16').max == mx.nd.max(a).asscalar() -@with_seed() @mx.use_np_shape def test_zero_size_min_max(): def min(): @@ -7227,7 +7094,6 @@ def max(): pytest.raises(MXNetError, max) -@with_seed() def test_squeeze_op(): def check_squeeze_op(shape, axis=None): data = mx.nd.random.uniform(low=-10.0, high=10.0, shape=shape) @@ -7261,7 +7127,6 @@ def check_squeeze_op(shape, axis=None): test = mx.sym.squeeze(data, axis=(2, 4)) check_numeric_gradient(test, [data_tmp]) -@with_seed() @pytest.mark.serial def test_adaptive_avg_pool_op(): def py_adaptive_avg_pool(x, height, width): @@ -7304,7 +7169,6 @@ def check_adaptive_avg_pool_op(shape, output_height, output_width=None): for j in range(1, 11): check_adaptive_avg_pool_op(shape, i, j) -@with_seed() def test_bilinear_resize_op(): def py_bilinear_resize(x, outputHeight, outputWidth): batch, channel, inputHeight, inputWidth = x.shape @@ -7614,7 +7478,6 @@ def check_backward(rpn_pre_nms_top_n, rpn_post_nms_top_n): check_forward(1000, 500) check_backward(rpn_pre_nms_top_n, rpn_post_nms_top_n) -@with_seed() def test_quadratic_function(): def f(x, a, b, c): return a * x**2 + b * x + c @@ -7717,12 +7580,10 @@ def getRandom(base, percent = 1.): if expected != result[0] or num_ctx > 1 and expected != result[1]: assert False -@with_seed() @pytest.mark.serial def test_allclose_function(): allclose_function([default_context()]) -@with_seed() def test_histogram(): def f(x, bins=10, range=None): return np.histogram(x, bins, range=range) @@ -7755,7 +7616,6 @@ def f(x, bins=10, range=None): assert_almost_equal(np_histo2, executor2.outputs[0].asnumpy(), 0, 0, ("EXPECTED_histo2", "FORWARD_histo2"), equal_nan=False) -@with_seed() @pytest.mark.skip(reason="test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/13915") def test_activation(): shapes = [(9,), (9, 10), (9, 10, 10), (1, 9, 10, 10)] @@ -7803,7 +7663,6 @@ def test_activation(): finite_diff_unary_op( name, op[0], shape, op[3], op[4], rtol_fd, atol_fd, num_eps) -@with_seed() @pytest.mark.serial def test_ravel(): # be aware that check_symbolic_forward will use float type internally @@ -7828,7 +7687,6 @@ def test_ravel(): check_symbolic_forward(c, location={'a': ravel_npy}, expected=[data]) -@with_seed() def test_unravel_index(): unravel_shape = (2, 10) unravel_size = np.prod(unravel_shape) @@ -7852,7 +7710,6 @@ def test_context_num_gpus(): raise e -@with_seed() @pytest.mark.serial def test_op_roi_align(): T = np.float32 @@ -8024,7 +7881,6 @@ def test_roi_align_autograd(sampling_ratio=0): test_roi_align_value(position_sensitive=True) test_roi_align_autograd() -@with_seed() def test_op_rroi_align(): T = np.float32 @@ -8162,7 +8018,6 @@ def test_rroi_align_value(sampling_ratio=-1): test_rroi_align_value() test_rroi_align_value(sampling_ratio=2) -@with_seed() def test_diag(): # Test 2d input @@ -8261,7 +8116,6 @@ def test_diag(): diag_sym = mx.sym.diag(data=data, k=-2, axis1=1, axis2=-1) check_numeric_gradient(diag_sym, [a_np]) -@with_seed() @pytest.mark.serial def test_depthtospace(): def f(x, blocksize): @@ -8312,7 +8166,6 @@ def test_invalid_block_size(): test_invalid_space_dim() test_invalid_block_size() -@with_seed() @pytest.mark.serial def test_spacetodepth(): def f(x, blocksize): @@ -8365,7 +8218,6 @@ def test_invalid_depth_dim(): test_invalid_depth_dim() -@with_seed() def test_softmax_cross_entropy(): def f_sm_ce(data, label): return np.sum(-np.log(data) * label) @@ -8383,7 +8235,6 @@ def f_sm_ce(data, label): check_symbolic_forward(sym, {'data' : np_data, 'label' : np_label}, [np.array([f_sm_ce(np_sm, np_one_hot_label)])], rtol=1e-3, atol=1e-5) -@with_seed() def test_split_v2(): dim = random.randint(2, 6) shape = rand_shape_nd(dim) @@ -8402,7 +8253,6 @@ def test_split_v2(): check_symbolic_backward(sym, {"data": mx_data}, out_grad, [np.concatenate(out_grad, axis=axis)]) -@with_seed() def test_moments(): dim = random.randint(2, 5) shape = rand_shape_nd(dim, dim=5) @@ -8430,7 +8280,6 @@ def test_moments(): check_numeric_gradient(mx_test_sym, [np_a], numeric_eps=eps, rtol=1e-2, atol=2e-4) -@with_seed() def test_invalid_kernel_size(): invalid_kernel_size = 28 assert_exception( @@ -8440,7 +8289,6 @@ def test_invalid_kernel_size(): mx.nd.array(np.random.rand(1, 1, 28, 28)), kernel_size=invalid_kernel_size) -@with_seed() def test_valid_kernel_size(): valid_kernel_size = 9 mx.nd.Correlation( @@ -8448,7 +8296,6 @@ def test_valid_kernel_size(): mx.nd.array(np.random.rand(1, 1, 28, 28)), kernel_size=valid_kernel_size) -@with_seed() def test_valid_max_pooling_pad_type_same(): import math input_data = mx.nd.array(np.random.rand(1,1,10)) @@ -8464,7 +8311,6 @@ def test_valid_max_pooling_pad_type_same(): pooling_convention="same") assert(math.ceil(input_data.shape[2]/stride) == output_data.shape[2]) -@with_seed() def test_invalid_max_pooling_pad_type_same(): import math input_data = mx.nd.array(np.random.rand(1,1,10)) @@ -8483,7 +8329,6 @@ def test_invalid_max_pooling_pad_type_same(): pooling_convention="same") -@with_seed() @pytest.mark.serial def test_image_normalize(): # Part 1 - Test 3D input with 3D mean/std @@ -8598,7 +8443,6 @@ def test_image_normalize(): # check backward using finite difference check_numeric_gradient(img_norm_sym, [data_in_4d], atol=0.001) -@with_seed() @pytest.mark.serial def test_index_array(): def test_index_array_default(): @@ -8666,7 +8510,6 @@ def test_index_array_select_axes_zero_size(): test_index_array_select_axes_zero_size() -@with_seed() def test_scalar_tensor_creation(): assertRaises(MXNetError, mx.nd.zeros, shape=()) assertRaises(MXNetError, mx.nd.ones, shape=()) @@ -8676,7 +8519,6 @@ def test_scalar_tensor_creation(): assert same(data_mx.asnumpy(), data_np) -@with_seed() def test_zero_size_tensor_creation(): assertRaises(MXNetError, mx.nd.zeros, shape=(0, 1, 3, 0)) assertRaises(MXNetError, mx.nd.ones, shape=(0, 1, 3, 0)) @@ -8686,7 +8528,6 @@ def test_zero_size_tensor_creation(): assert same(data_mx.asnumpy(), data_np) -@with_seed() def test_concat_with_zero_size_tensor(): with mx.np_shape(): data1 = mx.nd.ones((0, 8, 12)) @@ -8702,7 +8543,6 @@ def test_concat_with_zero_size_tensor(): assert ret.shape == (0, 12, 10) -@with_seed() def test_np_shape_decorator(): @mx.use_np_shape def check_scalar_one(): @@ -8734,7 +8574,6 @@ def check_concat(shape1, shape2, axis): check_concat((8, 0, 0), (8, 0, 0), 2) -@with_seed() def test_add_n(): data_shape = (2, 2) input_num = 5 @@ -8782,7 +8621,6 @@ def test_transpose_infer_shape_mixed(): assert(y[0].shape == (2,3)) -@with_seed() def test_sample_normal_default_shape(): # Test case from https://github.com/apache/incubator-mxnet/issues/16135 s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5])) @@ -8998,7 +8836,6 @@ def convert_bias(F, q_bias, k_bias, v_bias, num_heads): assert_allclose(grads_orig[k], grads_opti[k], rtol=1e-2, atol=1e-3) -@with_seed() @assert_raises_cuda_not_satisfied(min_version='9.1') @pytest.mark.serial def test_multihead_attention_selfatt(): @@ -9168,7 +9005,6 @@ def convert_bias(F, k_bias, v_bias, num_heads): assert(grads_orig[k].shape == grads_opti[k].shape) assert_allclose(grads_orig[k], grads_opti[k], rtol=1e-2, atol=1e-3) -@with_seed() @assert_raises_cuda_not_satisfied(min_version='9.1') @pytest.mark.serial def test_multihead_attention_encdec(): @@ -9179,7 +9015,6 @@ def test_multihead_attention_encdec(): for dtype in dtypes: check_multihead_attention_encdec(dtype=dtype) -@with_seed() @pytest.mark.serial def test_im2col_col2im(): def compute_output_size(spatial, kernel, stride=1, dilate=1, pad=0): @@ -9345,7 +9180,6 @@ def test_elemwise_sum_for_gradient_accumulation(): assert stored_grad['write'] == stored_grad['add'] assert stored_grad['write'] == 2 * nrepeat -@with_seed() def test_elementwise_ops_on_misaligned_input(): a = mx.nd.array([1,2,3,4], dtype='float16') b = mx.nd.array([1,2,3,4], dtype='float16') @@ -9366,7 +9200,6 @@ def test_elementwise_ops_on_misaligned_input(): mx.nd.waitall() assert a[3].asscalar() == 4.0 -@with_seed() @pytest.mark.parametrize('dtype', ['float16', 'float32', 'float64']) @pytest.mark.parametrize('lead_dim', [2, 3, 4, 6, 10]) @pytest.mark.parametrize('both_ways', [False, True]) @@ -9392,7 +9225,6 @@ def test_broadcast_ops_on_misaligned_input(dtype, lead_dim, both_ways): mx.nd.waitall() assert_almost_equal(f, expected) -@with_seed() @pytest.mark.parametrize('dtype', ['float16', 'float32', 'float64']) @pytest.mark.parametrize('lead_dim', [2, 3, 4, 6, 10]) @pytest.mark.parametrize('both_ways', [False, True]) diff --git a/tests/python/unittest/test_optimizer.py b/tests/python/unittest/test_optimizer.py index 74b4de776f8a..b0e29e32e0d5 100644 --- a/tests/python/unittest/test_optimizer.py +++ b/tests/python/unittest/test_optimizer.py @@ -25,9 +25,8 @@ import pytest import math from mxnet.test_utils import * -from common import with_seed, retry, xfail_when_nonstandard_decimal_separator +from common import retry, xfail_when_nonstandard_decimal_separator -@with_seed() def test_learning_rate(): o1 = mx.optimizer.Optimizer(learning_rate=0.01) o1.set_learning_rate(0.2) @@ -44,7 +43,6 @@ def test_learning_rate(): assert o3.learning_rate == 1024 -@with_seed() def test_learning_rate_expect_user_warning(): lr_s = lr_scheduler.FactorScheduler(step=1) o = mx.optimizer.Optimizer(lr_scheduler=lr_s, learning_rate=0.3) @@ -54,7 +52,6 @@ def test_learning_rate_expect_user_warning(): @xfail_when_nonstandard_decimal_separator -@with_seed() def test_sgd(): opt1 = mx.optimizer.SGD opt2 = mx.optimizer.SGD @@ -160,7 +157,6 @@ def step(self, indices, weights, grads, states): @xfail_when_nonstandard_decimal_separator -@with_seed() def test_sparse_sgd(): opt1 = PySparseSGD opt2 = mx.optimizer.SGD @@ -184,7 +180,6 @@ def test_sparse_sgd(): @xfail_when_nonstandard_decimal_separator -@with_seed() def test_std_sparse_sgd(): opt1 = mx.optimizer.SGD opt2 = mx.optimizer.SGD @@ -209,7 +204,6 @@ def test_std_sparse_sgd(): @xfail_when_nonstandard_decimal_separator -@with_seed() def test_nag(): opt1 = mx.optimizer.NAG opt2 = mx.optimizer.NAG @@ -235,7 +229,6 @@ def test_nag(): @xfail_when_nonstandard_decimal_separator -@with_seed() def test_lars(): opt1 = mx.optimizer.LARS opt2 = mx.optimizer.LARS @@ -261,7 +254,6 @@ def test_lars(): @xfail_when_nonstandard_decimal_separator -@with_seed() def test_lamb(): opt1 = mx.optimizer.LAMB opt2 = mx.optimizer.LAMB @@ -292,7 +284,6 @@ def test_lamb(): @xfail_when_nonstandard_decimal_separator -@with_seed() def test_lans(): opt1 = mx.optimizer.LANS opt2 = mx.optimizer.LANS @@ -321,7 +312,6 @@ def test_lans(): shapes, dtype, rtol=1e-3, atol=1e-3) -@with_seed() def test_sgld(): opt1 = mx.optimizer.SGLD opt2 = mx.optimizer.SGLD @@ -348,7 +338,6 @@ def test_sgld(): @xfail_when_nonstandard_decimal_separator -@with_seed() def test_ftml(): opt1 = mx.optimizer.FTML opt2 = mx.optimizer.FTML @@ -449,7 +438,6 @@ def step(self, indices, weights, grads, states): @xfail_when_nonstandard_decimal_separator -@with_seed() def test_adam(): opt1 = mx.optimizer.Adam opt2 = mx.optimizer.Adam @@ -476,7 +464,6 @@ def test_adam(): @xfail_when_nonstandard_decimal_separator -@with_seed() def test_sparse_adam(): opt1 = PySparseAdam opt2 = mx.optimizer.Adam @@ -520,7 +507,6 @@ def test_sparse_adam(): @xfail_when_nonstandard_decimal_separator -@with_seed() @pytest.mark.skip(reason="Flaky test https://github.com/apache/incubator-mxnet/issues/18400") def test_adamax(): opt1 = mx.optimizer.Adamax @@ -545,7 +531,6 @@ def test_adamax(): @xfail_when_nonstandard_decimal_separator -@with_seed() def test_signum(): opt1 = mx.optimizer.Signum opt2 = mx.optimizer.Signum @@ -574,7 +559,6 @@ def test_signum(): @xfail_when_nonstandard_decimal_separator -@with_seed() def test_rms(): opt1 = mx.optimizer.RMSProp opt2 = mx.optimizer.RMSProp @@ -686,7 +670,6 @@ def step(self, indices, weights, grads, states): @xfail_when_nonstandard_decimal_separator -@with_seed() @retry(3) def test_ftrl(): opt1 = mx.optimizer.Ftrl @@ -714,7 +697,6 @@ def test_ftrl(): @xfail_when_nonstandard_decimal_separator -@with_seed() def test_sparse_ftrl(): opt1 = PySparseFtrl opt2 = mx.optimizer.Ftrl @@ -741,7 +723,6 @@ def test_sparse_ftrl(): @xfail_when_nonstandard_decimal_separator -@with_seed() def test_nadam(): opt1 = mx.optimizer.Nadam opt2 = mx.optimizer.Nadam @@ -833,7 +814,6 @@ def step(self, indices, weights, grads, states): weight[row] -= lr * grad[row] / denom -@with_seed() def test_adagrad(): opt1 = mx.optimizer.AdaGrad opt2 = mx.optimizer.AdaGrad @@ -855,7 +835,6 @@ def test_adagrad(): @xfail_when_nonstandard_decimal_separator -@with_seed() def test_sparse_adagrad(): opt1 = PySparseAdaGrad opt2 = mx.optimizer.AdaGrad @@ -879,7 +858,6 @@ def test_sparse_adagrad(): g_stype='row_sparse') -@with_seed() def test_adadelta(): opt1 = mx.optimizer.AdaDelta opt2 = mx.optimizer.AdaDelta @@ -900,7 +878,6 @@ def test_adadelta(): compare_optimizer(opt1(**kwarg), opt2(**kwarg), shapes, dtype) -@with_seed() def test_dcasgd(): opt1 = mx.optimizer.DCASGD opt2 = mx.optimizer.DCASGD diff --git a/tests/python/unittest/test_random.py b/tests/python/unittest/test_random.py index 273a86978a38..a260f6399a47 100644 --- a/tests/python/unittest/test_random.py +++ b/tests/python/unittest/test_random.py @@ -22,7 +22,7 @@ from mxnet.test_utils import verify_generator, gen_buckets_probs_with_ppf, assert_almost_equal import numpy as np import random as rnd -from common import with_seed, retry, random_seed +from common import retry, random_seed import scipy.stats as ss import unittest import pytest @@ -367,7 +367,7 @@ def check_with_device(device, dtype): grad_nodes = ['v1', 'v2'] if symbdic['discrete'] else ['v0', 'v1', 'v2'] check_numeric_gradient(test_pdf, [un1, p1, p2], grad_nodes=grad_nodes, atol=backw_atol, rtol=backw_rtol, dtype=dtype) -@with_seed(1000) +@pytest.mark.seed(1000) @pytest.mark.serial def test_dirichlet(): num_classes = 2 @@ -414,7 +414,6 @@ def set_seed_variously(init_seed, num_init_seeds, final_seed): return end_seed # Tests that seed setting of std (non-parallel) rng is synchronous w.r.t. rng use before and after. -@with_seed() @pytest.mark.serial def test_random_seed_setting(): ctx = mx.context.current_context() @@ -437,7 +436,6 @@ def test_random_seed_setting(): # Tests that seed setting of parallel rng is synchronous w.r.t. rng use before and after. -@with_seed() @pytest.mark.serial def test_parallel_random_seed_setting(): ctx = mx.context.current_context() @@ -488,7 +486,6 @@ def set_seed_variously_for_context(ctx, init_seed, num_init_seeds, final_seed): return end_seed # Tests that seed setting of std (non-parallel) rng for specific context is synchronous w.r.t. rng use before and after. -@with_seed() @pytest.mark.serial def test_random_seed_setting_for_context(): seed_to_test = 1234 @@ -523,7 +520,6 @@ def test_random_seed_setting_for_context(): assert same(samples_sym[i - 1], samples_sym[i]) # Tests that seed setting of parallel rng for specific context is synchronous w.r.t. rng use before and after. -@with_seed() @pytest.mark.serial def test_parallel_random_seed_setting_for_context(): seed_to_test = 1234 @@ -565,7 +561,6 @@ def test_parallel_random_seed_setting_for_context(): for i in range(1, len(samples_sym)): assert same(samples_sym[i - 1], samples_sym[i]) -@with_seed() @pytest.mark.parametrize('dtype', ['uint8', 'int32', 'float16', 'float32', 'float64']) @pytest.mark.parametrize('x', [[[0,1,2,3,4],[4,3,2,1,0]], [0,1,2,3,4]]) @pytest.mark.serial @@ -601,7 +596,6 @@ def test_sample_multinomial(dtype, x): assert_almost_equal(real_dx, dx[i, :], rtol=1e-4, atol=1e-5) # Test the generators with the chi-square testing -@with_seed() @pytest.mark.serial def test_normal_generator(): ctx = mx.context.current_context() @@ -626,7 +620,6 @@ def test_normal_generator(): verify_generator(generator=generator_mx_same_seed, buckets=buckets, probs=probs, nsamples=samples, nrepeat=trials) -@with_seed() @pytest.mark.serial def test_uniform_generator(): ctx = mx.context.current_context() @@ -645,7 +638,6 @@ def test_uniform_generator(): for _ in range(10)]) verify_generator(generator=generator_mx_same_seed, buckets=buckets, probs=probs) -@with_seed() @pytest.mark.serial def test_gamma_generator(): success_rate = 0.05 @@ -661,7 +653,6 @@ def test_gamma_generator(): for _ in range(10)]) verify_generator(generator=generator_mx_same_seed, buckets=buckets, probs=probs, success_rate=success_rate) -@with_seed() @pytest.mark.serial def test_exponential_generator(): ctx = mx.context.current_context() @@ -676,7 +667,6 @@ def test_exponential_generator(): for _ in range(10)]) verify_generator(generator=generator_mx_same_seed, buckets=buckets, probs=probs, success_rate=0.20) -@with_seed() @pytest.mark.serial def test_poisson_generator(): ctx = mx.context.current_context() @@ -692,7 +682,6 @@ def test_poisson_generator(): for _ in range(10)]) verify_generator(generator=generator_mx_same_seed, buckets=buckets, probs=probs) -@with_seed() @pytest.mark.serial def test_negative_binomial_generator(): ctx = mx.context.current_context() @@ -722,7 +711,6 @@ def test_negative_binomial_generator(): for _ in range(10)]) verify_generator(generator=generator_mx_same_seed, buckets=buckets, probs=probs) -@with_seed() @pytest.mark.serial def test_multinomial_generator(): # This test fails with dtype float16 if the probabilities themselves cannot be @@ -767,7 +755,6 @@ def quantize_probs(probs, dtype): nsamples=samples, nrepeat=trials, success_rate=0.20) -@with_seed() @pytest.mark.serial def test_with_random_seed(): ctx = mx.context.current_context() @@ -829,7 +816,6 @@ def check_data(a, b): for j in range(i+1, num_seeds): check_data(data[i],data[j]) -@with_seed() @pytest.mark.serial def test_random_seed(): shape = (5, 5) @@ -856,7 +842,6 @@ def _assert_same_mx_arrays(a, b): except NameError: pass -@with_seed() @pytest.mark.serial def test_unique_zipfian_generator(): ctx = mx.context.current_context() @@ -874,7 +859,6 @@ def test_unique_zipfian_generator(): assert num_trial > 14500 assert num_trial < 17000 -@with_seed() @pytest.mark.serial def test_zipfian_generator(): # dummy true classes @@ -907,7 +891,6 @@ def compute_expected_prob(): assert_almost_equal(exp_cnt_true, exp_cnt[true_classes], rtol=1e-1, atol=1e-2) # Issue #10277 (https://github.com/apache/incubator-mxnet/issues/10277) discusses this test. -@with_seed() @pytest.mark.serial def test_shuffle(): def check_first_axis_shuffle(arr): @@ -987,7 +970,6 @@ def testLarge(data, repeat): testLarge(mx.nd.arange(0, 100000), 10) -@with_seed() @pytest.mark.serial def test_randint(): dtypes = ['int32', 'int64'] @@ -1006,13 +988,11 @@ def test_randint(): assert same(ret1, ret2), \ "ndarray test: `%s` should give the same result with the same seed" -@with_seed() @pytest.mark.serial def test_randint_extremes(): a = mx.nd.random.randint(dtype='int64', low=50000000, high=50000010, ctx=mx.context.current_context()) assert a>=50000000 and a<=50000010 -@with_seed() @pytest.mark.serial def test_randint_generator(): ctx = mx.context.current_context() @@ -1033,14 +1013,12 @@ def test_randint_generator(): for _ in range(10)]) verify_generator(generator=generator_mx_same_seed, buckets=buckets, probs=probs, nrepeat=100) -@with_seed() @pytest.mark.serial def test_randint_without_dtype(): a = mx.nd.random.randint(low=50000000, high=50000010, ctx=mx.context.current_context()) assert a.dtype == np.int32 -@with_seed() @pytest.mark.serial def test_sample_multinomial_num_outputs(): ctx = mx.context.current_context() diff --git a/tests/python/unittest/test_recordio.py b/tests/python/unittest/test_recordio.py index 87ffe01dabd3..669f024aa4b8 100644 --- a/tests/python/unittest/test_recordio.py +++ b/tests/python/unittest/test_recordio.py @@ -21,9 +21,7 @@ import numpy as np import random import string -from common import with_seed -@with_seed() def test_recordio(tmpdir): frec = tmpdir.join('rec') N = 255 @@ -38,7 +36,6 @@ def test_recordio(tmpdir): res = reader.read() assert res == bytes(str(chr(i)), 'utf-8') -@with_seed() def test_indexed_recordio(tmpdir): fidx = tmpdir.join('idx') frec = tmpdir.join('rec') @@ -57,7 +54,6 @@ def test_indexed_recordio(tmpdir): res = reader.read_idx(i) assert res == bytes(str(chr(i)), 'utf-8') -@with_seed() def test_recordio_pack_label(): N = 255 diff --git a/tests/python/unittest/test_sparse_ndarray.py b/tests/python/unittest/test_sparse_ndarray.py index 5aa068423df1..1f4ac20ab746 100644 --- a/tests/python/unittest/test_sparse_ndarray.py +++ b/tests/python/unittest/test_sparse_ndarray.py @@ -20,7 +20,7 @@ from mxnet.ndarray import NDArray import mxnet as mx from mxnet.test_utils import * -from common import with_seed, random_seed +from common import random_seed from mxnet.base import mx_real_t from numpy.testing import assert_allclose import numpy.random as rnd @@ -33,7 +33,6 @@ def sparse_nd_ones(shape, stype): return mx.nd.ones(shape).tostype(stype) -@with_seed() def test_sparse_nd_elemwise_add(): def check_sparse_nd_elemwise_binary(shapes, stypes, f, g): # generate inputs @@ -59,7 +58,6 @@ def check_sparse_nd_elemwise_binary(shapes, stypes, f, g): check_sparse_nd_elemwise_binary(shape, ['row_sparse', 'row_sparse'], op, g) -@with_seed() def test_sparse_nd_copy(): def check_sparse_nd_copy(from_stype, to_stype, shape): from_nd = rand_ndarray(shape, from_stype) @@ -81,7 +79,6 @@ def check_sparse_nd_copy(from_stype, to_stype, shape): check_sparse_nd_copy('row_sparse', 'default', shape_3d) check_sparse_nd_copy('default', 'row_sparse', shape_3d) -@with_seed() def test_sparse_nd_basic(): def check_sparse_nd_basic_rsp(): storage_type = 'row_sparse' @@ -94,7 +91,6 @@ def check_sparse_nd_basic_rsp(): check_sparse_nd_basic_rsp() -@with_seed() def test_sparse_nd_setitem(): def check_sparse_nd_setitem(stype, shape, dst): x = mx.nd.zeros(shape=shape, stype=stype) @@ -112,7 +108,6 @@ def check_sparse_nd_setitem(stype, shape, dst): # scalar assigned to row_sparse NDArray check_sparse_nd_setitem('row_sparse', shape, 2) -@with_seed() def test_sparse_nd_slice(): shape = (rnd.randint(2, 10), rnd.randint(2, 10)) stype = 'csr' @@ -156,7 +151,6 @@ def check_slice_nd_csr_fallback(shape): check_slice_nd_csr_fallback(shape) -@with_seed() def test_sparse_nd_concat(): def check_concat(arrays): ret = np.concatenate([arr.asnumpy() for arr in arrays], axis=0) @@ -173,7 +167,6 @@ def check_concat(arrays): check_concat(zero_nds) -@with_seed() def test_sparse_nd_equal(): for stype in ['row_sparse', 'csr']: shape = rand_shape_2d() @@ -189,7 +182,6 @@ def test_sparse_nd_equal(): assert z.stype == stype -@with_seed() def test_sparse_nd_not_equal(): for stype in ['row_sparse', 'csr']: shape = rand_shape_2d() @@ -205,7 +197,6 @@ def test_sparse_nd_not_equal(): assert z.stype == 'default' -@with_seed() def test_sparse_nd_greater(): for stype in ['row_sparse', 'csr']: shape = rand_shape_2d() @@ -224,7 +215,6 @@ def test_sparse_nd_greater(): assert z.stype == stype -@with_seed() def test_sparse_nd_greater_equal(): for stype in ['row_sparse', 'csr']: shape = rand_shape_2d() @@ -243,7 +233,6 @@ def test_sparse_nd_greater_equal(): assert z.stype == stype -@with_seed() def test_sparse_nd_lesser(): for stype in ['row_sparse', 'csr']: shape = rand_shape_2d() @@ -262,7 +251,6 @@ def test_sparse_nd_lesser(): assert z.stype == 'default' -@with_seed() def test_sparse_nd_lesser_equal(): for stype in ['row_sparse', 'csr']: shape = rand_shape_2d() @@ -281,7 +269,6 @@ def test_sparse_nd_lesser_equal(): assert z.stype == stype -@with_seed() def test_sparse_nd_binary(): N = 3 def check_binary(fn, stype): @@ -320,7 +307,6 @@ def check_binary(fn, stype): @xfail_when_nonstandard_decimal_separator -@with_seed() def test_sparse_nd_binary_scalar_op(): N = 3 def check(fn, stype, out_stype=None): @@ -351,7 +337,6 @@ def check(fn, stype, out_stype=None): check(lambda x: x - 0, stype, out_stype=stype) -@with_seed() def test_sparse_nd_binary_iop(): N = 3 def check_binary(fn, stype): @@ -381,7 +366,6 @@ def inplace_mul(x, y): check_binary(fn, stype) -@with_seed() def test_sparse_nd_negate(): def check_sparse_nd_negate(shape, stype): npy = np.random.uniform(-10, 10, rand_shape_2d()) @@ -400,7 +384,6 @@ def check_sparse_nd_negate(shape, stype): check_sparse_nd_negate(shape, stype) -@with_seed() def test_sparse_nd_broadcast(): sample_num = 1000 # TODO(haibin) test with more than 2 dimensions @@ -449,7 +432,6 @@ def test_broadcast_like(stype): test_broadcast_like(stype) -@with_seed() def test_sparse_nd_transpose(): npy = np.random.uniform(-10, 10, rand_shape_2d()) stypes = ['csr', 'row_sparse'] @@ -458,7 +440,6 @@ def test_sparse_nd_transpose(): assert_almost_equal(npy.T, (nd.T).asnumpy()) -@with_seed() def test_sparse_nd_storage_fallback(): def check_output_fallback(shape): ones = mx.nd.ones(shape) @@ -482,7 +463,6 @@ def check_fallback_with_temp_resource(shape): check_fallback_with_temp_resource(shape) -@with_seed() def test_sparse_nd_random(): """ test sparse random operator on cpu """ # gpu random operator doesn't use fixed seed @@ -500,7 +480,6 @@ def test_sparse_nd_random(): assert_almost_equal(dns_out.asnumpy(), rsp_out.asnumpy()) -@with_seed() def test_sparse_nd_astype(): stypes = ['row_sparse', 'csr'] for stype in stypes: @@ -509,7 +488,6 @@ def test_sparse_nd_astype(): assert(y.dtype == np.int32), y.dtype -@with_seed() def test_sparse_nd_astype_copy(): stypes = ['row_sparse', 'csr'] for stype in stypes: @@ -540,7 +518,6 @@ def test_sparse_nd_astype_copy(): assert (id(x) == id(y)) -@with_seed() def test_sparse_nd_pickle(): dim0 = 40 dim1 = 40 @@ -560,7 +537,6 @@ def test_sparse_nd_pickle(): # @kalyc: Getting rid of fixed seed as flakiness could not be reproduced # tracked at https://github.com/apache/incubator-mxnet/issues/11741 -@with_seed() def test_sparse_nd_save_load(): repeat = 1 stypes = ['default', 'row_sparse', 'csr'] @@ -593,7 +569,6 @@ def test_sparse_nd_save_load(): os.remove(fname) -@with_seed() def test_sparse_nd_unsupported(): nd = mx.nd.zeros((2,2), stype='row_sparse') fn_slice = lambda x: x._slice(None, None) @@ -608,7 +583,6 @@ def test_sparse_nd_unsupported(): pass -@with_seed() def test_create_csr(): def check_create_csr_from_nd(shape, density, dtype): matrix = rand_ndarray(shape, 'csr', density) @@ -686,7 +660,6 @@ def assert_csr_almost_equal(nd, sp): check_create_csr_from_scipy(shape, density, mx.nd.array) -@with_seed() def test_create_row_sparse(): dim0 = 50 dim1 = 50 @@ -715,7 +688,6 @@ def test_create_row_sparse(): -@with_seed() def test_create_sparse_nd_infer_shape(): def check_create_csr_infer_shape(shape, density, dtype): try: @@ -759,7 +731,6 @@ def check_create_rsp_infer_shape(shape, density, dtype): check_create_rsp_infer_shape(shape_3d, density, dtype) -@with_seed() def test_create_sparse_nd_from_dense(): def check_create_from_dns(shape, f, dense_arr, dtype, default_dtype, ctx): arr = f(dense_arr, dtype=dtype, ctx=ctx) @@ -782,7 +753,6 @@ def check_create_from_dns(shape, f, dense_arr, dtype, default_dtype, ctx): else np.float32 check_create_from_dns(shape, f, dense_arr, dtype, default_dtype, ctx) -@with_seed() def test_create_sparse_nd_from_sparse(): def check_create_from_sp(shape, f, sp_arr, dtype, src_dtype, ctx): arr = f(sp_arr, dtype=dtype, ctx=ctx) @@ -815,7 +785,6 @@ def check_create_from_sp(shape, f, sp_arr, dtype, src_dtype, ctx): check_create_from_sp(shape, f_rsp, sp_arr, dtype, src_dtype, ctx) -@with_seed() def test_create_sparse_nd_empty(): def check_empty(shape, stype): arr = mx.nd.empty(shape, stype=stype) @@ -856,7 +825,6 @@ def check_rsp_empty(shape, dtype, ctx): check_rsp_empty(shape_3d, dtype, ctx) -@with_seed() def test_synthetic_dataset_generator(): def test_powerlaw_generator(csr_arr, final_row=1): """Test power law distribution @@ -890,7 +858,6 @@ def test_powerlaw_generator(csr_arr, final_row=1): test_powerlaw_generator(csr_arr_square, final_row=6) -@with_seed() def test_sparse_nd_fluent(): def check_fluent_regular(stype, func, kwargs, shape=(5, 17), equal_nan=False): with mx.name.NameManager(): @@ -923,7 +890,6 @@ def check_fluent_regular(stype, func, kwargs, shape=(5, 17), equal_nan=False): check_fluent_regular('csr', func, {'axis': 0}) -@with_seed() def test_sparse_nd_exception(): """ test invalid sparse operator will throw a exception """ a = mx.nd.ones((2,2)) @@ -933,7 +899,6 @@ def test_sparse_nd_exception(): assertRaises(ValueError, mx.nd.sparse.row_sparse_array, (2,2), shape=(3,2)) assertRaises(ValueError, mx.nd.sparse.zeros, "invalid_stype", (2,2)) -@with_seed() def test_sparse_nd_check_format(): """ test check_format for sparse ndarray """ shape = rand_shape_2d() @@ -985,7 +950,6 @@ def test_sparse_nd_check_format(): a = mx.nd.sparse.row_sparse_array((data_list, indices_list), shape=shape) assertRaises(mx.base.MXNetError, a.check_format) -@with_seed() def test_sparse_nd_norm(): def check_sparse_nd_norm(stype, shape, density, **kwargs): data, _ = rand_sparse_ndarray(shape, stype, density) @@ -1004,7 +968,6 @@ def check_sparse_nd_norm(stype, shape, density, **kwargs): check_sparse_nd_norm(stype, shape, density, axis=0, keepdims=False, ord=2) check_sparse_nd_norm(stype, shape, density, axis=None, keepdims=True, ord=2) -@with_seed() def test_sparse_fc(): def check_sparse_fc(batch_size, dim_in, dim_out, stype): data = rand_ndarray((batch_size, dim_in), stype, density=0.5) @@ -1021,7 +984,6 @@ def check_sparse_fc(batch_size, dim_in, dim_out, stype): # test FC with row_sparse weight w/ density=1, csr data (fallback) check_sparse_fc(5, 10, 8, 'csr') -@with_seed() def test_sparse_take(): def check_sparse_take(density, mode): data_shape = rand_shape_2d() @@ -1040,7 +1002,6 @@ def check_sparse_take(density, mode): for m in modes: check_sparse_take(d, m) -@with_seed() def test_sparse_getnnz(): if default_context().device_type is 'gpu': return diff --git a/tests/python/unittest/test_sparse_operator.py b/tests/python/unittest/test_sparse_operator.py index 4235c421ac2d..0f26df68a055 100644 --- a/tests/python/unittest/test_sparse_operator.py +++ b/tests/python/unittest/test_sparse_operator.py @@ -18,7 +18,7 @@ from mxnet.test_utils import * from mxnet.base import MXNetError import pytest -from common import with_seed, assertRaises +from common import assertRaises import random import warnings @@ -154,7 +154,6 @@ def gen_rsp_random_indices(shape, density=.5, force_indices=None): def all_zero(var): return 0 -@with_seed() @pytest.mark.skip(reason="https://github.com/apache/incubator-mxnet/issues/18740") def test_elemwise_binary_ops(): # skip testing on GPU because only CPU ops are implemented @@ -521,7 +520,6 @@ def check_elemwise_binary_ops(lhs_stype, rhs_stype, shape, ograd_density=ograd_density) -@with_seed() def test_elemwise_csr_same_zeros(): # Zeroes a = mx.nd.sparse.zeros('csr', (1,1)) @@ -696,7 +694,6 @@ def check_sparse_mathematical_core(name, stype, assert_almost_equal(arr_grad, input_grad, equal_nan=True) -@with_seed() @pytest.mark.serial def test_sparse_mathematical_core(): def util_sign(a): @@ -1162,7 +1159,6 @@ def check_mathematical_core(stype, output_grad_stype=None, -@with_seed() @pytest.mark.serial def test_elemwise_add_ex(): def check_elemwise_add_ex(lhs_stype, rhs_stype, shape, lhs_grad_stype=None, rhs_grad_stype=None): @@ -1193,7 +1189,6 @@ def check_elemwise_add_ex(lhs_stype, rhs_stype, shape, lhs_grad_stype=None, rhs_ lhs_grad_stype='row_sparse', rhs_grad_stype='row_sparse') -@with_seed() @pytest.mark.serial def test_cast_storage_ex(): def check_cast_storage(shape, density, from_stype, to_stype, check_numeric_grad=True): @@ -1249,7 +1244,6 @@ def check_cast_storage(shape, density, from_stype, to_stype, check_numeric_grad= check_numeric_grad=False) -@with_seed() @pytest.mark.serial def test_sparse_dot(): def test_infer_forward_stype(lhs_shape, rhs_shape, lhs_density, rhs_density, trans_a, trans_b): @@ -1380,7 +1374,6 @@ def test_sparse_dot_zero_output(lhs_shape, trans_lhs, rhs_num_cols): test_sparse_dot_zero_output(rand_shape_2d(50, 200), False, 40) test_sparse_dot_zero_output(rand_shape_2d(50, 200), True, 40) -@with_seed() @pytest.mark.serial def test_sparse_dot_determinism(): def check_dot_determinism(lhs_stype, rhs_stype, lhs_density, rhs_density, transpose_a, transpose_b, forward_stype): @@ -1410,7 +1403,6 @@ def check_dot_determinism(lhs_stype, rhs_stype, lhs_density, rhs_density, transp check_dot_determinism('csr', 'default', 0.1, 1.0, True, False, 'default') -@with_seed() def test_sparse_slice(): def check_csr_slice(shape, slice_input): storage_type = 'csr' @@ -1426,7 +1418,6 @@ def check_csr_slice(shape, slice_input): check_csr_slice(shape, False) -@with_seed() @pytest.mark.serial def test_sparse_retain(): def check_sparse_retain(shape, density, index_type=np.int64): @@ -1460,7 +1451,6 @@ def check_sparse_retain(shape, density, index_type=np.int64): check_sparse_retain(shape_3d, density, itype) -@with_seed() def test_sparse_unary_with_numerics(): def check_sparse_simple(name, stype, mxnet_func, forward_numpy_call, backward_numpy_call, output_grad_stype=None, @@ -1538,7 +1528,6 @@ def check_sparse_function(name, mxnet_func, forward_numpy_call, backward_numpy_c backward_is_use_output=True) -@with_seed() @pytest.mark.serial def test_sparse_nd_zeros(): def check_sparse_nd_zeros(stype, shape): @@ -1552,7 +1541,6 @@ def check_sparse_nd_zeros(stype, shape): check_sparse_nd_zeros('default', shape) -@with_seed() @pytest.mark.serial def test_sparse_nd_zeros_like(): def check_sparse_nd_zeros_like(stype, shape): @@ -1565,7 +1553,6 @@ def check_sparse_nd_zeros_like(stype, shape): check_sparse_nd_zeros_like('csr', shape) -@with_seed() @pytest.mark.serial def test_sparse_axis_operations(): def test_variations(func_name): @@ -1597,7 +1584,6 @@ def test_fallback(func_name, axis=0, keepdims=True, exclude=True): test_fallback(mx.nd.mean, axis=0, keepdims=True, exclude=True) -@with_seed() @pytest.mark.serial def test_sparse_square_sum(): dim0 = 30 @@ -1658,7 +1644,6 @@ def test_sparse_square_sum(): atol=1e-2, rtol=0.1) -@with_seed() @pytest.mark.serial def test_sparse_storage_fallback(): """ test operators which don't implement FComputeEx or FStatefulComputeEx """ @@ -1709,7 +1694,6 @@ def check_operator_with_temp_resource(shape, stype): check_concat(shape, lhs, rhs) -@with_seed() @pytest.mark.serial def test_sparse_elementwise_sum(): def check_sparse_elementwise_sum_with_shape(stypes, shape, n): @@ -1754,7 +1738,6 @@ def check_sparse_elementwise_sum_with_shape(stypes, shape, n): check_sparse_elementwise_sum_with_shape(stypes, shape, test_len+1) -@with_seed() @pytest.mark.serial def test_sparse_embedding(): ''' test sparse embedding operator ''' @@ -1805,7 +1788,6 @@ def check_sparse_embedding(in_dim, out_dim, batch, densities, sparse_grad): for sparse_grad in sparse_grads: check_sparse_embedding(in_dim, out_dim, batch, densities, sparse_grad) -@with_seed() def test_sparse_broadcast_add_sub(): def check_broadcast_add(mx_lhs, mx_rhs, np_lhs, np_rhs, dtype): assert_almost_equal(mx.nd.sparse.add(mx_lhs, mx_rhs).asnumpy(), np.add(np_lhs, np_rhs), atol=1e-4) @@ -1830,7 +1812,6 @@ def check_broadcast_sub(mx_lhs, mx_rhs, np_lhs, np_rhs, dtype): check_broadcast_add(mx_rhs, mx_lhs, np_rhs, np_lhs, np.float32) check_broadcast_sub(mx_rhs, mx_lhs, np_rhs, np_lhs, np.float32) -@with_seed() def test_sparse_broadcast_mul_div(): def check_broadcast_mul(mx_lhs, mx_rhs, np_lhs, np_rhs, dtype): assert_almost_equal(mx.nd.sparse.multiply(mx_lhs, mx_rhs).asnumpy(), np.multiply(np_lhs, np_rhs), atol=1e-4) @@ -1853,7 +1834,6 @@ def check_broadcast_div(mx_lhs, mx_rhs, np_lhs, np_rhs, dtype): check_broadcast_mul(mx_lhs, mx_rhs, np_lhs, np_rhs, np.float32) check_broadcast_div(mx_lhs, mx_rhs, np_lhs, np_rhs, np.float32) -@with_seed() def test_batchnorm_fallback(): # same test as test_operator.test_batchnorm_training, but tests fallback logic of batchnorm stype = 'row_sparse' @@ -1924,7 +1904,6 @@ def test_batchnorm_fallback(): check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-3, rtol=0.2, atol=0.01) -@with_seed() @pytest.mark.serial def test_mkldnn_sparse(): # This test is trying to create a race condition describedd in @@ -1941,7 +1920,6 @@ def test_mkldnn_sparse(): print(res1 - fc_res.asnumpy()) almost_equal(res1, fc_res.asnumpy()) -@with_seed() @pytest.mark.serial def test_sparse_nd_where(): def get_forward_expected_output(condition, x, y): @@ -2039,7 +2017,6 @@ def test_where_numeric_gradient(shape): test_where_helper((5, 9)) test_where_numeric_gradient((5, 9)) -@with_seed() @pytest.mark.serial def test_sparse_quadratic_function(): def f(x, a, b, c): diff --git a/tests/python/unittest/test_subgraph.py b/tests/python/unittest/test_subgraph.py index b956cf8b40a4..947d18e2d845 100644 --- a/tests/python/unittest/test_subgraph.py +++ b/tests/python/unittest/test_subgraph.py @@ -22,14 +22,12 @@ import copy from mxnet.test_utils import * import pytest -from common import with_seed from mxnet.gluon.model_zoo.vision import get_model def make_subgraph(subg, *args): js = subg.tojson() return subg -@with_seed() @pytest.mark.serial def test_make_subgraph(): def make_subgraph1(stype): diff --git a/tests/python/unittest/test_tvm_op.py b/tests/python/unittest/test_tvm_op.py index 55bb7cc2bd92..6c1d8c58dd8d 100644 --- a/tests/python/unittest/test_tvm_op.py +++ b/tests/python/unittest/test_tvm_op.py @@ -19,11 +19,9 @@ import numpy as _np from mxnet.test_utils import same, rand_shape_nd from mxnet.runtime import Features -from common import with_seed _features = Features() -@with_seed() def test_tvm_broadcast_add(): if _features.is_enabled("TVM_OP"): configs = [