Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

[MXNET-788] Fix for issue #11733 #12067

Merged
merged 5 commits into from
Aug 12, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CONTRIBUTORS.md
Original file line number Diff line number Diff line change
Expand Up @@ -176,3 +176,4 @@ List of Contributors
* [Kou Ding](https://github.com/chinakook)
* [Istvan Fehervari](https://github.com/ifeherva)
* [Aaron Markham](https://github.com/aaronmarkham)
* [Sam Skalicky](https://github.com/samskalicky)
17 changes: 11 additions & 6 deletions python/mxnet/test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -479,10 +479,8 @@ def assert_almost_equal(a, b, rtol=None, atol=None, names=('a', 'b'), equal_nan=
"""
rtol = get_rtol(rtol)
atol = get_atol(atol)

if almost_equal(a, b, rtol, atol, equal_nan=equal_nan):
return

index, rel = find_max_violation(a, b, rtol, atol)
np.set_printoptions(threshold=4, suppress=True)
msg = npt.build_err_msg([a, b],
Expand Down Expand Up @@ -1203,10 +1201,10 @@ def check_speed(sym, location=None, ctx=None, N=20, grad_req=None, typ="whole",
else:
raise ValueError('typ can only be "whole" or "forward".')


def check_consistency(sym, ctx_list, scale=1.0, grad_req='write',
arg_params=None, aux_params=None, tol=None,
raise_on_err=True, ground_truth=None, equal_nan=False, use_uniform=False):
raise_on_err=True, ground_truth=None, equal_nan=False,
use_uniform=False, rand_type=np.float64):
"""Check symbol gives the same output for different running context

Parameters
Expand All @@ -1223,6 +1221,11 @@ def check_consistency(sym, ctx_list, scale=1.0, grad_req='write',
Optional, When flag set to true,
random input data generated follows uniform distribution,
not normal distribution
rand_type: np.dtype
casts the randomly generated data to this type
Optional, when input data is passed via arg_params,
defaults to np.float64 (numpy float default)

Examples
--------
>>> # create the symbol
Expand Down Expand Up @@ -1283,9 +1286,11 @@ def check_consistency(sym, ctx_list, scale=1.0, grad_req='write',
for n, arr in exe_list[0].arg_dict.items():
if n not in arg_params:
if use_uniform:
arg_params[n] = np.random.uniform(low=-0.92, high=0.92, size=arr.shape)
arg_params[n] = np.random.uniform(low=-0.92, high=0.92,
size=arr.shape).astype(rand_type)
else:
arg_params[n] = np.random.normal(size=arr.shape, scale=scale)
arg_params[n] = np.random.normal(size=arr.shape,
scale=scale).astype(rand_type)
for n, arr in exe_list[0].aux_dict.items():
if n not in aux_params:
aux_params[n] = 0
Expand Down
3 changes: 1 addition & 2 deletions src/operator/nn/pooling.cc
Original file line number Diff line number Diff line change
Expand Up @@ -377,8 +377,7 @@ We can see that Lp pooling stands between those two, in practice the most common

For each window ``X``, the mathematical expression for Lp pooling is:

..math::
f(X) = \sqrt{p}{\sum\limits_{x \in X} x^p}
:math:`f(X) = \sqrt[p]{\sum_{x}^{X} x^p}`
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I am not an expert in this. Someone familiar with it may want to check the math here since this change is not mentioned in the PR description.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This change is mentioned in the description in the changes section.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks


)code" ADD_FILELINE)
.set_num_inputs(1)
Expand Down
21 changes: 8 additions & 13 deletions tests/python/gpu/test_operator_gpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -614,13 +614,13 @@ def test_pooling_with_type():
{'ctx': mx.cpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},
{'ctx': mx.cpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float32}}]
sym = mx.sym.Pooling(kernel=(3,3), pool_type='max', pooling_convention='valid', name='pool')
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, rand_type=np.float16)

sym = mx.sym.Pooling(kernel=(3,3), pool_type='max', pooling_convention='full', name='pool')
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, rand_type=np.float16)

sym = mx.sym.Pooling(kernel=(300,300), pool_type='max', global_pool=True, name='pool')
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, rand_type=np.float16)


@with_seed()
Expand Down Expand Up @@ -765,31 +765,26 @@ def test_spatial_transformer_with_type():
check_consistency(sym, ctx_list, grad_req="add")


# Checking max pooling consistency over the data sets of different float types is problematic
# as one max value in a float32 data set may not be the max value in a float16 data set.
# This function will not be called.
@with_seed(1234)
def test_pooling_with_type():
@with_seed()
def test_pooling_with_type2():
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why rename the method since there is no test_pooling_with_type()?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There is another test_pooling_with_type if you do a bit of search in the file.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sorry, missed it, because this change shows an update on the previous method. It's also more informative to give a more descriptive method name than numerical suffix.

ctx_list = [{'ctx': mx.gpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},
{'ctx': mx.gpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': np.float32}},
{'ctx': mx.gpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': np.float16}},
{'ctx': mx.cpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},
{'ctx': mx.cpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': np.float32}}]

sym = mx.sym.Pooling(name='pool', kernel=(3,3), stride=(2,2), pool_type='max')
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, rand_type=np.float16)

sym = mx.sym.Pooling(name='pool', kernel=(3,3), pad=(1,1), pool_type='avg')
check_consistency(sym, ctx_list)

# this is unstable
# sym = mx.sym.Pooling(name='pool', kernel=(5,5), pad=(2,2), pool_type='max')
# check_consistency(sym, ctx_list)
sym = mx.sym.Pooling(name='pool', kernel=(5,5), pad=(2,2), pool_type='max')
check_consistency(sym, ctx_list, rand_type=np.float16)

sym = mx.sym.Pooling(name='pool', kernel=(3,3), pad=(1,1), pool_type='sum')
check_consistency(sym, ctx_list)


@unittest.skip("Flaky test https://github.com/apache/incubator-mxnet/issues/11517")
@with_seed()
def test_pooling_versions():
Expand Down