Skip to content

Commit

Permalink
[PaddlePaddle Hackathon 4][Frontend][Paddle]add grid-sample/gaussian_…
Browse files Browse the repository at this point in the history
…random/flip/fill_zeros_like/unique for paddle frontend (#14277)

Add grid-sample/gaussian_random/flip/fill_zeros_like/unique for paddle frontend.
  • Loading branch information
MayYouBeProsperous authored Mar 13, 2023
1 parent 6fa88e3 commit 06fabe4
Show file tree
Hide file tree
Showing 2 changed files with 208 additions and 0 deletions.
102 changes: 102 additions & 0 deletions python/tvm/relay/frontend/paddlepaddle.py
Original file line number Diff line number Diff line change
Expand Up @@ -680,6 +680,17 @@ def convert_fill_constant_batch_size_like(g, op, block):
g.add_node(op.output("Out")[0], out)


def convert_fill_zeros_like(g, op, block):
"""Operator converter for fill_zeros_like."""

x = g.get_node(op.input("X")[0])
dtype = op.attr("dtype")
dtype = _convert_dtype_value(dtype)
value = _expr.const(0, dtype=dtype)
out = _op.transform.full_like(x, value).astype(dtype)
g.add_node(op.output("Out")[0], out)


def convert_flatten(g, op, block):
"""Operator converter for flatten."""

Expand Down Expand Up @@ -707,6 +718,21 @@ def convert_flatten(g, op, block):
g.add_node(op.output("Out")[0], out)


def convert_flip(g, op, block):
"""Operator converter for flip."""

x = g.get_node(op.input("X")[0])
axis = op.attr("axis")

for i, ax in enumerate(axis):
if i == 0:
out = _op.reverse(x, ax)
else:
out = _op.reverse(out, ax)

g.add_node(op.output("Out")[0], out)


def convert_gather(g, op, block):
"""Operator converter for gather."""

Expand All @@ -730,6 +756,17 @@ def convert_gather_nd(g, op, block):
g.add_node(op.output("Out")[0], out)


def convert_gaussian_random(g, op, block):
"""Operator converter for convert_gaussian_random."""

mean = op.attr("mean")
std = op.attr("std")
shape = op.attr("shape")
seed = op.attr("seed")
out = _op.random.normal(key=seed, shape=shape, mean=mean, scale=std)
g.add_node(op.output("Out")[0], out)


def convert_gelu(g, op, block):
"""Operator converter for gelu."""

Expand All @@ -741,6 +778,32 @@ def convert_gelu(g, op, block):
g.add_node(op.output("Out")[0], out)


def convert_grid_sampler(g, op, block):
"""Operator converter for grid_sampler."""

x = g.get_node(op.input("X")[0])
data_shape = infer_shape(x)
grid = g.get_node(op.input("Grid")[0])
mode = op.attr("mode")
padding_mode = op.attr("padding_mode")
align_corners = op.attr("align_corners")

if len(data_shape) == 4:
layout = "NCHW"
axes = [0, 3, 1, 2]
grid = _op.transform.transpose(grid, axes)
elif len(data_shape) == 5:
layout = "NCDHW"
axes = [0, 4, 1, 2, 3]
grid = _op.transform.transpose(grid, axes)
else:
msg = f"only 4D and 5D are supported."
raise ValueError(msg)

out = _op.image.grid_sample(x, grid, mode, layout, padding_mode, align_corners)
g.add_node(op.output("Output")[0], out)


def convert_group_norm(g, op, block):
"""Operator converter for group_norm."""

Expand Down Expand Up @@ -2255,6 +2318,40 @@ def convert_transpose(g, op, block):
g.add_node(op.output("Out")[0], out)


def convert_unique(g, op, block):
"""Operator converter for unique."""

x = g.get_node(op.input("X")[0])
return_index = op.attr("return_index")
return_inverse = op.attr("return_inverse")
return_counts = op.attr("return_counts")
axis = op.attr("axis")
dtype = op.attr("dtype")
dtype = _convert_dtype_value(dtype)

if len(axis) == 0:
x = _op.reshape(x, [-1])

if return_counts:
unique, indices, inverse_indices, _, counts = _op.unique(
x, is_sorted=True, return_counts=True
)
else:
unique, indices, inverse_indices, _ = _op.unique(x, is_sorted=True, return_counts=False)

out = unique
if dtype != infer_type(out).checked_type.dtype:
out = _op.cast(out, dtype)
g.add_node(op.output("Out")[0], unique)

if return_index:
g.add_node(op.output("Indices")[0], indices)
if return_inverse:
g.add_node(op.output("Index")[0], inverse_indices)
if return_counts:
g.add_node(op.output("Counts")[0], counts)


def convert_unsqueeze(g, op, block):
"""Operator converter for unsqueeze."""

Expand Down Expand Up @@ -2346,14 +2443,18 @@ def convert_where_index(g, op, block):
"fill_any_like": convert_fill_any_like,
"fill_constant": convert_fill_constant,
"fill_constant_batch_size_like": convert_fill_constant_batch_size_like,
"fill_zeros_like": convert_fill_zeros_like,
"flatten_contiguous_range": convert_flatten,
"floor": convert_unary_op,
"floor_mod": convert_elementwise_op,
"flip": convert_flip,
"gather": convert_gather,
"gather_nd": convert_gather_nd,
"gaussian_random": convert_gaussian_random,
"gelu": convert_gelu,
"greater_equal": convert_elementwise_op,
"greater_than": convert_elementwise_op,
"grid_sampler": convert_grid_sampler,
"group_norm": convert_group_norm,
"hard_shrink": convert_hard_shrink,
"hard_sigmoid": convert_hard_sigmoid,
Expand Down Expand Up @@ -2443,6 +2544,7 @@ def convert_where_index(g, op, block):
"tile": convert_tile,
"top_k_v2": convert_topk,
"transpose2": convert_transpose,
"unique": convert_unique,
"unsqueeze2": convert_unsqueeze,
"unstack": convert_unstack,
"where": convert_where,
Expand Down
106 changes: 106 additions & 0 deletions tests/python/frontend/paddlepaddle/test_forward.py
Original file line number Diff line number Diff line change
Expand Up @@ -679,6 +679,23 @@ def forward(self, x, y):
verify_model(ExpandAs(), [x_data, y_data])


@tvm.testing.uses_gpu
def test_forward_fill_zeros_like():
class FilZeroLike(nn.Layer):
def __init__(self, dtype=None):
super(FilZeroLike, self).__init__()
self.dtype = dtype

@paddle.jit.to_static
def forward(self, x):
return paddle.zeros_like(x, dtype=self.dtype)

input_shape = [2, 3, 5]
input_data = paddle.rand(input_shape, dtype="float32")
verify_model(FilZeroLike("float32"), input_data=input_data)
verify_model(FilZeroLike("int32"), input_data=input_data)


@tvm.testing.uses_gpu
def test_forward_flatten():
class Flatten(nn.Layer):
Expand All @@ -697,6 +714,23 @@ def forward(self, x):
verify_model(Flatten(2, -2), input_data=input_data)


@tvm.testing.uses_gpu
def test_forward_flip():
class Flip(nn.Layer):
def __init__(self, axis):
super(Flip, self).__init__()
self.axis = axis

@paddle.jit.to_static
def forward(self, x):
return paddle.flip(x, axis=self.axis)

input_data = paddle.rand([2, 3, 4], dtype="float32")
verify_model(Flip(0), input_data)
verify_model(Flip(-1), input_data)
verify_model(Flip([0, 1]), input_data)


@tvm.testing.uses_gpu
def test_forward_gather():
class Gather(nn.Layer):
Expand Down Expand Up @@ -750,6 +784,39 @@ def forward(self, inputs):
verify_model(GroupNorm(num_channels, 2), input_data, rtol=1e-4, atol=1e-4)


@tvm.testing.uses_gpu
def test_forward_grid_sampler():
class GridSampler(nn.Layer):
def __init__(self, mode="bilinear", padding_mode="zeros", align_corners=True):
super(GridSampler, self).__init__()
self.mode = mode
self.padding_mode = padding_mode
self.align_corners = align_corners

def forward(self, x, grid):
return paddle.nn.functional.grid_sample(
x,
grid,
mode=self.mode,
padding_mode=self.padding_mode,
align_corners=self.align_corners,
)

x_2D = paddle.rand(shape=[4, 4, 8, 8], dtype="float32")
grid_2D = paddle.rand(shape=[4, 8, 8, 2], dtype="float32")
verify_model(GridSampler(mode="nearest"), input_data=[x_2D, grid_2D])
verify_model(GridSampler(padding_mode="reflection"), input_data=[x_2D, grid_2D])
verify_model(GridSampler(padding_mode="border"), input_data=[x_2D, grid_2D])
verify_model(GridSampler(align_corners=False), input_data=[x_2D, grid_2D])

x_3D = paddle.rand(shape=[4, 4, 4, 4, 4], dtype="float32")
grid_3D = paddle.rand(shape=[4, 8, 8, 8, 3], dtype="float32")
verify_model(GridSampler(mode="nearest"), input_data=[x_3D, grid_3D])
verify_model(GridSampler(padding_mode="reflection"), input_data=[x_3D, grid_3D])
verify_model(GridSampler(padding_mode="border"), input_data=[x_3D, grid_3D])
verify_model(GridSampler(align_corners=False), input_data=[x_3D, grid_3D])


@tvm.testing.uses_gpu
def test_forward_scatter():
class Scatter(nn.Layer):
Expand Down Expand Up @@ -1394,6 +1461,45 @@ def slice5(inputs):
# verify_model(slice5, input_data=paddle.randn((4,)))


@tvm.testing.uses_gpu
def test_forward_unique():
class Unique(nn.Layer):
def __init__(
self,
return_index=False,
return_inverse=False,
return_counts=False,
axis=None,
dtype="int64",
):
super(Unique, self).__init__()
self.return_index = return_index
self.return_inverse = return_inverse
self.return_counts = return_counts
self.axis = None
self.dtype = dtype

@paddle.jit.to_static
def forward(self, inputs):
result = paddle.unique(
inputs,
return_inverse=self.return_inverse,
return_counts=self.return_counts,
axis=self.axis,
dtype=self.dtype,
)
return result

input_shape = [2, 3, 5]
input_data = paddle.rand(input_shape)
verify_model(Unique(), input_data=input_data)
verify_model(Unique(return_index=True), input_data=input_data)
verify_model(Unique(return_index=True, return_inverse=True), input_data=input_data)
verify_model(
Unique(return_index=True, return_inverse=True, return_counts=True), input_data=input_data
)


@tvm.testing.uses_gpu
def run_math_api(func):
api_name = func.__name__.split("_")[-1]
Expand Down

0 comments on commit 06fabe4

Please sign in to comment.