diff --git a/ivy/functional/frontends/tensorflow/raw_ops.py b/ivy/functional/frontends/tensorflow/raw_ops.py index 90f05c42c8a76..5e4e2a61ddd43 100644 --- a/ivy/functional/frontends/tensorflow/raw_ops.py +++ b/ivy/functional/frontends/tensorflow/raw_ops.py @@ -569,6 +569,9 @@ def TanhGrad(*, y, dy, name="TanhGrad"): return ivy.multiply(dy, ivy.subtract(1, ivy.multiply(y, y))) +Tile = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.general_functions.tile)) + + @to_ivy_arrays_and_back def Transpose(*, x, perm, name="Transpose"): ret = ivy.permute_dims(x, axes=perm) diff --git a/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_raw_ops.py b/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_raw_ops.py index d8edff4ae5933..130afb1b46cc8 100644 --- a/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_raw_ops.py +++ b/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_raw_ops.py @@ -1198,6 +1198,59 @@ def test_tensorflow_TanhGrad( # NOQA ) +# Tile +@st.composite +def _multiple_shape_helper(draw): + input_dtype, input_array, input_shape = draw( + helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("valid"), + ret_shape=True + ) + ) + input_dims = len(input_shape) + + dt_n_multiples = draw( + helpers.dtype_and_values( + available_dtypes=["int32", "int64"], + min_value=0, + max_value=10, + shape=draw( + helpers.get_shape( + min_num_dims=1, + max_num_dims=1, + min_dim_size=input_dims, + max_dim_size=input_dims + ) + ) + ) + ) + + return input_dtype, input_array, dt_n_multiples + +@handle_frontend_test(fn_tree="tensorflow.raw_ops.Tile", all_arguments=_multiple_shape_helper()) +def test_tensorflow_Tile( + *, + all_arguments, + test_flags, + frontend, + fn_tree, + on_device, + backend_fw, +): + input_dtype, input_matrix, dt_and_multiples = all_arguments + dt_mul, multiples = dt_and_multiples + helpers.test_frontend_function( + input_dtypes=input_dtype + dt_mul, + input=input_matrix[0], + multiples=multiples[0], + test_flags=test_flags, + backend_to_test=backend_fw, + frontend=frontend, + fn_tree=fn_tree, + on_device=on_device, + ) + + @st.composite def _permute_dims_helper(draw): shape = draw(st.shared(helpers.get_shape(min_num_dims=1), key="shape"))