Skip to content

Commit

Permalink
fix adaptive_avg_pool exporting to onnx (#857)
Browse files Browse the repository at this point in the history
* fix adaptive_avg_pool exporting to onnx

* remove debug codes

* fix ci

* resolve comment
  • Loading branch information
RunningLeon authored Aug 12, 2022
1 parent 5fb342e commit 670a504
Show file tree
Hide file tree
Showing 13 changed files with 104 additions and 221 deletions.
1 change: 0 additions & 1 deletion mmdeploy/codebase/mmpose/models/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
# Copyright (c) OpenMMLab. All rights reserved.

from .backbones import * # noqa: F401,F403
from .detectors import * # noqa: F401,F403
from .heads import * # noqa: F401,F403
5 changes: 0 additions & 5 deletions mmdeploy/codebase/mmpose/models/backbones/__init__.py

This file was deleted.

29 changes: 0 additions & 29 deletions mmdeploy/codebase/mmpose/models/backbones/litehrnet.py

This file was deleted.

3 changes: 1 addition & 2 deletions mmdeploy/codebase/mmseg/models/decode_heads/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
# Copyright (c) OpenMMLab. All rights reserved.
from .aspp_head import aspp_head__forward
from .ema_head import ema_module__forward
from .psp_head import ppm__forward

__all__ = ['aspp_head__forward', 'ppm__forward', 'ema_module__forward']
__all__ = ['aspp_head__forward', 'ema_module__forward']
52 changes: 0 additions & 52 deletions mmdeploy/codebase/mmseg/models/decode_heads/psp_head.py

This file was deleted.

5 changes: 4 additions & 1 deletion mmdeploy/pytorch/functions/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
# Copyright (c) OpenMMLab. All rights reserved.
from .adaptive_pool import (adaptive_avg_pool2d__default,
adaptive_avg_pool2d__ncnn)
from .atan2 import atan2__default
from .chunk import chunk__ncnn, chunk__torchscript
from .expand import expand__ncnn
Expand All @@ -20,5 +22,6 @@
'tensor__size__ncnn', 'topk__dynamic', 'topk__tensorrt', 'chunk__ncnn',
'triu__default', 'atan2__default', 'normalize__ncnn', 'expand__ncnn',
'chunk__torchscript', 'masked_fill__onnxruntime',
'tensor__setitem__default'
'tensor__setitem__default', 'adaptive_avg_pool2d__default',
'adaptive_avg_pool2d__ncnn'
]
44 changes: 44 additions & 0 deletions mmdeploy/pytorch/functions/adaptive_pool.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
# Copyright (c) OpenMMLab. All rights reserved.

import torch.nn.functional as F
from torch.nn.modules.utils import _pair

from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.utils import Backend, get_root_logger, is_dynamic_shape


@FUNCTION_REWRITER.register_rewriter(
func_name='torch.nn.functional.adaptive_avg_pool2d')
def adaptive_avg_pool2d__default(ctx, input, output_size):
"""Rewrite `adaptive_avg_pool2d` for default backend."""
output_size = _pair(output_size)
if int(output_size[0]) == int(output_size[1]) == 1:
out = ctx.origin_func(input, output_size)
else:
deploy_cfg = ctx.cfg
is_dynamic_flag = is_dynamic_shape(deploy_cfg)
if is_dynamic_flag:
logger = get_root_logger()
logger.warning('`adaptive_avg_pool2d` would be '
'replaced to `avg_pool2d` explicitly')
size = input.shape[2:]
k = [int(size[i] / output_size[i]) for i in range(0, len(size))]
out = F.avg_pool2d(
input,
kernel_size=k,
stride=k,
padding=0,
ceil_mode=False,
count_include_pad=False)
return out


@FUNCTION_REWRITER.register_rewriter(
func_name='torch.nn.functional.adaptive_avg_pool2d',
backend=Backend.NCNN.value)
@FUNCTION_REWRITER.register_rewriter(
func_name='torch.nn.functional.adaptive_avg_pool2d',
backend=Backend.TORCHSCRIPT.value)
def adaptive_avg_pool2d__ncnn(ctx, input, output_size):
"""Rewrite `adaptive_avg_pool2d` for ncnn and torchscript backend."""
return ctx.origin_func(input, output_size)
15 changes: 5 additions & 10 deletions mmdeploy/pytorch/ops/__init__.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,5 @@
# Copyright (c) OpenMMLab. All rights reserved.
from .adaptive_avg_pool import (adaptive_avg_pool1d__default,
adaptive_avg_pool2d__default,
adaptive_avg_pool2d__ncnn,
adaptive_avg_pool3d__default)
from .adaptive_pool import adaptive_avg_pool2d__ncnn
from .gelu import gelu__ncnn
from .grid_sampler import grid_sampler__default
from .hardsigmoid import hardsigmoid__default
Expand All @@ -15,10 +12,8 @@
from .squeeze import squeeze__default

__all__ = [
'adaptive_avg_pool1d__default', 'adaptive_avg_pool2d__default',
'adaptive_avg_pool3d__default', 'grid_sampler__default',
'hardsigmoid__default', 'instance_norm__tensorrt', 'generic_rnn__ncnn',
'squeeze__default', 'adaptive_avg_pool2d__ncnn', 'gelu__ncnn',
'layer_norm__ncnn', 'linear__ncnn', '_prepare_onnx_paddings__tensorrt',
'roll_default'
'grid_sampler__default', 'hardsigmoid__default', 'instance_norm__tensorrt',
'generic_rnn__ncnn', 'squeeze__default', 'adaptive_avg_pool2d__ncnn',
'gelu__ncnn', 'layer_norm__ncnn', 'linear__ncnn',
'_prepare_onnx_paddings__tensorrt', 'roll_default'
]
90 changes: 0 additions & 90 deletions mmdeploy/pytorch/ops/adaptive_avg_pool.py

This file was deleted.

13 changes: 13 additions & 0 deletions mmdeploy/pytorch/ops/adaptive_pool.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
# Copyright (c) OpenMMLab. All rights reserved.

from mmdeploy.core import SYMBOLIC_REWRITER


@SYMBOLIC_REWRITER.register_symbolic(
'adaptive_avg_pool2d', is_pytorch=True, backend='ncnn')
def adaptive_avg_pool2d__ncnn(ctx, g, x, output_size):
"""Register ncnn symbolic function for `adaptive_avg_pool2d`.
Align symbolic of adaptive_avg_pool2d in ncnn.
"""
return g.op('mmdeploy::AdaptiveAvgPool2d', x, output_size)
5 changes: 4 additions & 1 deletion tests/test_apis/test_onnx_passes.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,9 @@

onnx_file = tempfile.NamedTemporaryFile(suffix='.onnx').name

ort_cfg = dict(
backend_config=dict(type='onnxruntime'), onnx_config=dict(type='onnx'))


def _find_next_node(start: int, nodes: List, op_type: str) -> Tuple[Any, int]:
for idx, n in enumerate(nodes[start:]):
Expand Down Expand Up @@ -166,7 +169,7 @@ def forward(self, x):
model = TestModel()
x = torch.rand(1, 4, 8, 8)

with RewriterContext({}, onnx_custom_passes=_optimize_onnx):
with RewriterContext(ort_cfg, onnx_custom_passes=_optimize_onnx):
torch.onnx.export(
model,
x,
Expand Down
18 changes: 18 additions & 0 deletions tests/test_pytorch/test_pytorch_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -341,3 +341,21 @@ def setitem_slice(x, y):
nodes = onnx_model.graph.node
for node in nodes:
assert node.op_type != 'ScatterND'


@pytest.mark.parametrize('output_size', [1, 3])
def test_adaptive_avg_pool2d(output_size):
input = torch.rand(1, 3, 6, 6)
model = WrapFunction(F.adaptive_avg_pool2d, output_size=output_size)
pytorch_output = model(input)
deploy_cfg_ort = mmcv.Config(
dict(
onnx_config=dict(input_shape=None),
backend_config=dict(type='onnxruntime'),
codebase_config=dict(type='mmdet', task='ObjectDetection')))
rewrite_output, _ = get_rewrite_outputs(
model,
model_inputs={'input': input},
deploy_cfg=deploy_cfg_ort,
run_with_backend=True)
assert torch.allclose(pytorch_output, rewrite_output[0])
45 changes: 15 additions & 30 deletions tests/test_pytorch/test_pytorch_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,20 @@
@pytest.fixture(autouse=False, scope='function')
def prepare_symbolics():
context = RewriterContext(
Config({'backend_config': {
'type': 'tensorrt'
}}), 'tensorrt', opset=11)
Config(
dict(
onnx_config=dict(
type='onnx',
export_params=True,
keep_initializers_as_inputs=False,
opset_version=11,
save_file='end2end.onnx',
input_names=['input'],
output_names=['output'],
input_shape=None),
backend_config=dict(type='tensorrt'))),
'tensorrt',
opset=11)
context.enter()

yield
Expand Down Expand Up @@ -58,18 +69,6 @@ def get_model_onnx_nodes(model, x, onnx_file=onnx_file):
@pytest.mark.usefixtures('prepare_symbolics')
class TestAdaptivePool:

def test_adaptive_pool_1d_global(self):
x = torch.rand(2, 2, 2)
model = OpModel(torch.nn.functional.adaptive_avg_pool1d, [1]).eval()
nodes = get_model_onnx_nodes(model, x)
assert nodes[0].op_type == 'GlobalAveragePool'

def test_adaptive_pool_1d(self):
x = torch.rand(2, 2, 2)
model = OpModel(torch.nn.functional.adaptive_avg_pool1d, [2]).eval()
nodes = get_model_onnx_nodes(model, x)
assert nodes[0].op_type == 'AveragePool'

def test_adaptive_pool_2d_global(self):
x = torch.rand(2, 2, 2)
model = OpModel(torch.nn.functional.adaptive_avg_pool2d, [1, 1]).eval()
Expand All @@ -80,21 +79,7 @@ def test_adaptive_pool_2d(self):
x = torch.rand(2, 2, 2)
model = OpModel(torch.nn.functional.adaptive_avg_pool2d, [2, 2]).eval()
nodes = get_model_onnx_nodes(model, x)
assert nodes[0].op_type == 'AveragePool'

def test_adaptive_pool_3d_global(self):
x = torch.rand(2, 2, 2, 2)
model = OpModel(torch.nn.functional.adaptive_avg_pool3d,
[1, 1, 1]).eval()
nodes = get_model_onnx_nodes(model, x)
assert nodes[0].op_type == 'GlobalAveragePool'

def test_adaptive_pool_3d(self):
x = torch.rand(2, 2, 2, 2)
model = OpModel(torch.nn.functional.adaptive_avg_pool3d,
[2, 2, 2]).eval()
nodes = get_model_onnx_nodes(model, x)
assert nodes[0].op_type == 'AveragePool'
assert nodes[-1].op_type == 'AveragePool'


@pytest.mark.usefixtures('prepare_symbolics_ncnn')
Expand Down

0 comments on commit 670a504

Please sign in to comment.