From 7e1c6a00a8929fed38ce3bded0c772494bc4ebe5 Mon Sep 17 00:00:00 2001 From: Gabriel de Marmiesse Date: Mon, 24 Feb 2020 02:20:48 +0100 Subject: [PATCH] Add python implementation of softshrink (#1140) * Add softshrink python op * Added check. --- tensorflow_addons/activations/softshrink.py | 16 ++++++++++++ .../activations/softshrink_test.py | 25 +++++++++++++++++++ 2 files changed, 41 insertions(+) diff --git a/tensorflow_addons/activations/softshrink.py b/tensorflow_addons/activations/softshrink.py index a93faf1a98..238cc19036 100644 --- a/tensorflow_addons/activations/softshrink.py +++ b/tensorflow_addons/activations/softshrink.py @@ -48,3 +48,19 @@ def _softshrink_grad(op, grad): return _activation_so.ops.addons_softshrink_grad( grad, op.inputs[0], op.get_attr("lower"), op.get_attr("upper") ) + + +def _softshrink_py(x, lower, upper): + if lower > upper: + raise ValueError( + "The value of lower is {} and should" + " not be higher than the value " + "variable upper, which is {} .".format(lower, upper) + ) + mask_lower = x < lower + mask_upper = upper < x + mask_middle = tf.logical_not(tf.logical_or(mask_lower, mask_upper)) + mask_lower = tf.cast(mask_lower, x.dtype) + mask_upper = tf.cast(mask_upper, x.dtype) + mask_middle = tf.cast(mask_middle, x.dtype) + return x * (1 - mask_middle) - mask_lower * lower - mask_upper * upper diff --git a/tensorflow_addons/activations/softshrink_test.py b/tensorflow_addons/activations/softshrink_test.py index 7aa487f6eb..5338726026 100644 --- a/tensorflow_addons/activations/softshrink_test.py +++ b/tensorflow_addons/activations/softshrink_test.py @@ -18,6 +18,7 @@ import numpy as np import tensorflow as tf from tensorflow_addons.activations import softshrink +from tensorflow_addons.activations.softshrink import _softshrink_py from tensorflow_addons.utils import test_utils @@ -53,6 +54,30 @@ def test_theoretical_gradients(self, dtype): theoretical, numerical = tf.test.compute_gradient(softshrink, [x]) self.assertAllCloseAccordingToType(theoretical, numerical, atol=1e-4) + @parameterized.named_parameters(("float32", np.float32), ("float64", np.float64)) + def test_same_as_py_func(self, dtype): + np.random.seed(1234) + for _ in range(20): + self.verify_funcs_are_equivalent(dtype) + + def verify_funcs_are_equivalent(self, dtype): + x_np = np.random.uniform(-10, 10, size=(4, 4)).astype(dtype) + x = tf.convert_to_tensor(x_np) + lower = np.random.uniform(-10, 10) + upper = lower + np.random.uniform(0, 10) + + with tf.GradientTape(persistent=True) as t: + t.watch(x) + y_native = softshrink(x, lower, upper) + y_py = _softshrink_py(x, lower, upper) + + self.assertAllCloseAccordingToType(y_native, y_py, atol=1e-4) + + grad_native = t.gradient(y_native, x) + grad_py = t.gradient(y_py, x) + + self.assertAllCloseAccordingToType(grad_native, grad_py, atol=1e-4) + if __name__ == "__main__": tf.test.main()