Skip to content

Commit

Permalink
Merge pull request apache#40 from wjj19950828/paddle_frontend
Browse files Browse the repository at this point in the history
Finished Paddle common api
  • Loading branch information
jiangjiajun authored Sep 17, 2021
2 parents e8c8086 + 2355add commit c5b5273
Show file tree
Hide file tree
Showing 2 changed files with 330 additions and 29 deletions.
86 changes: 77 additions & 9 deletions python/tvm/relay/frontend/paddlepaddle.py
Original file line number Diff line number Diff line change
Expand Up @@ -291,11 +291,14 @@ def convert_arg_min(g, op, block):
def convert_argsort(g, op, block):
"""Operator converter for argsort."""

x = g.get_node(op.inputs("X")[0])
x = g.get_node(op.input("X")[0])
axis = op.attr("axis")
descending = op.attr("descending")
out = _op.argsort(x, axis, not descending, dtype="int64")
g.add_node(op.output("Indices")[0], out)

out = _op.sort(x, axis, not descending)
out_indice = _op.argsort(x, axis, not descending, dtype="int64")
g.add_node(op.output("Out")[0], out)
g.add_node(op.output("Indices")[0], out_indice)


def convert_assign(g, op, block):
Expand Down Expand Up @@ -1296,6 +1299,18 @@ def convert_mul(g, op, block):
g.add_node(op.output("Out")[0], out)


def convert_mv(g, op, block):
"""Operator converter for mv."""

x = g.get_node(op.input("X")[0])
y = g.get_node(op.input("Vec")[0])
y = _op.expand_dims(y, axis=-1)
y = _op.transpose(y)
out = _op.nn.dense(x, y)
out = _op.squeeze(out, axis=[-1])
g.add_node(op.output("Out")[0], out)


def convert_numel(g, op, block):
"""Operator converter for numel."""

Expand Down Expand Up @@ -1639,9 +1654,9 @@ def make_init_param_inputs(g, node, layer):
if is_bidirec:
num_directions = 2

X_shape = infer_shape(input_x)
time_steps = X_shape[0]
X_steps = _op.split(input_x, indices_or_sections=time_steps, axis=0)
x_shape = infer_shape(input_x)
time_steps = x_shape[0]
x_steps = _op.split(input_x, indices_or_sections=time_steps, axis=0)
for layer in range(num_layers):
input_weights, hidden_weights, input_bias, hidden_bias = make_param_inputs(
g, op, layer, hidden_size, num_layers
Expand All @@ -1661,7 +1676,7 @@ def make_init_param_inputs(g, node, layer):
WB = g.get_node(input_bias[i])
RB = g.get_node(hidden_bias[i])
output, H, C = generate_lstm(
X_steps=X_steps,
X_steps=x_steps,
H_t=H_t,
C_t=C_t,
W=W,
Expand All @@ -1682,8 +1697,7 @@ def make_init_param_inputs(g, node, layer):

output = _op.transpose(output, axes=[0, 2, 1, 3])
output = _op.reshape(output, newshape=(0, 0, -1))
X_steps = output
X_steps = _op.split(X_steps, indices_or_sections=time_steps, axis=0)
x_steps = _op.split(output, indices_or_sections=time_steps, axis=0)

g.add_node(op.output("Out")[0], output)

Expand Down Expand Up @@ -2054,6 +2068,53 @@ def convert_unsqueeze(g, op, block):
g.add_node(op.output("Out")[0], x)


def convert_unstack(g, op, block):
"""Operator converter for unstack."""

x = g.get_node(op.input("X")[0])
axis = op.attr("axis")
num = op.attr("num")
out = _op.split(x, num, axis=axis)
for i, out_i in enumerate(out):
out_i = _op.squeeze(out_i, axis=[axis])
g.add_node(op.output("Y")[i], out_i)


def convert_unique(g, op, block):
"""Operator converter for unique."""

x = g.get_node(op.input("X")[0])
ndim = len(infer_shape(x))
assert ndim == 1, "Only support 1D Tensor for PaddlePaddle's unique"
is_sorted = op.attr("is_sorted")
return_counts = op.attr("return_counts")
return_index = op.attr("return_index")
return_inverse = op.attr("return_inverse")
if return_counts:
[unique, indices, inverse_indices, num_uniq, counts] = _op.unique(
x, is_sorted=is_sorted, return_counts=True
)
unique_sliced = _op.strided_slice(unique, begin=[0], end=num_uniq, slice_mode="size")
counts_sliced = _op.strided_slice(counts, begin=[0], end=num_uniq, slice_mode="size")
indices_sliced = _op.strided_slice(indices, begin=[0], end=num_uniq, slice_mode="size")
counts_sliced = _op.cast(counts_sliced, "int64")
g.add_node(op.output("Counts")[0], counts_sliced)
else:
[unique, indices, inverse_indices, num_uniq] = _op.unique(
x, is_sorted=is_sorted, return_counts=False
)
unique_sliced = _op.strided_slice(unique, begin=[0], end=num_uniq, slice_mode="size")
indices_sliced = _op.strided_slice(indices, begin=[0], end=num_uniq, slice_mode="size")

inverse_indices = _op.cast(inverse_indices, "int64")
indices_sliced = _op.cast(indices_sliced, "int64")
g.add_node(op.output("Out")[0], unique_sliced)
if return_index:
g.add_node(op.output("Indices")[0], indices_sliced)
if return_inverse:
g.add_node(op.output("Index")[0], inverse_indices)


def convert_where(g, op, block):
"""Operator converter for where."""

Expand Down Expand Up @@ -2152,6 +2213,7 @@ def convert_where(g, op, block):
"logsumexp": convert_logsumexp,
"matmul": convert_matmul,
"matmul_v2": convert_matmul,
"mv": convert_mv,
"mul": convert_mul,
"nearest_interp_v2": convert_interpolate,
"not_equal": convert_elementwise_op,
Expand All @@ -2176,19 +2238,23 @@ def convert_where(g, op, block):
"relu6": convert_relu6,
"reshape2": convert_reshape,
"rnn": convert_rnn,
"round": convert_unary_op,
"rsqrt": convert_unary_op,
"scale": convert_scale,
"selu": convert_selu,
"shape": convert_shape,
"sigmoid": convert_unary_op,
"sign": convert_unary_op,
"sin": convert_unary_op,
"sinh": convert_unary_op,
"size": convert_numel,
"slice": convert_slice,
"softmax": convert_softmax,
"softplus": convert_softplus,
"softshrink": convert_softshrink,
"softsign": convert_softsign,
"split": convert_split,
"sqrt": convert_unary_op,
"square": convert_square,
"squeeze2": convert_squeeze,
"stack": convert_stack,
Expand All @@ -2203,6 +2269,8 @@ def convert_where(g, op, block):
"tile": convert_tile,
"transpose2": convert_transpose,
"unsqueeze2": convert_unsqueeze,
"unstack": convert_unstack,
"unique": convert_unique,
"where": convert_where,
"where_index": convert_nonzero,
}
Expand Down
Loading

0 comments on commit c5b5273

Please sign in to comment.