Skip to content

Commit

Permalink
[CI] Update to PyTorch v1.10 in GPU image (#9866)
Browse files Browse the repository at this point in the history
* apply PT vs LLVM symbol conflict mitigation

* update ci-gpu to v0.79 with PT 1.10.1

* disable quantized mv3 test due to weird segfault from torch
  • Loading branch information
masahi authored Jan 8, 2022
1 parent f6f252f commit 38f0239
Show file tree
Hide file tree
Showing 4 changed files with 18 additions and 23 deletions.
2 changes: 1 addition & 1 deletion Jenkinsfile
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ import org.jenkinsci.plugins.pipeline.modeldefinition.Utils

// NOTE: these lines are scanned by docker/dev_common.sh. Please update the regex as needed. -->
ci_lint = "tlcpack/ci-lint:v0.67"
ci_gpu = "tlcpack/ci-gpu:v0.78"
ci_gpu = "tlcpack/ci-gpu:v0.79"
ci_cpu = "tlcpack/ci-cpu:v0.79"
ci_wasm = "tlcpack/ci-wasm:v0.71"
ci_i386 = "tlcpack/ci-i386:v0.74"
Expand Down
34 changes: 14 additions & 20 deletions tests/python/frontend/pytorch/qnn_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -378,26 +378,20 @@ def get_imagenet_input():
from torchvision.models.quantization import mobilenet as qmobilenet
from torchvision.models.quantization import inception as qinception
from torchvision.models.quantization import googlenet as qgooglenet

qmodels = []

for per_channel in [False, True]:
qmodels += [
("resnet18", qresnet.resnet18(pretrained=True), per_channel),
("mobilenet_v2", qmobilenet.mobilenet_v2(pretrained=True), per_channel),
# disable inception test for now, since loading it takes ~5min on torchvision-0.5 due to scipy bug
# See https://discuss.pytorch.org/t/torchvisions-inception-v3-takes-much-longer-to-load-than-other-models/68756
# ("inception_v3", qinception.inception_v3(pretrained=True), per_channel),
# tracing quantized googlenet broken as of v1.6
# ("googlenet", qgooglenet(pretrained=True), per_channel),
]

if is_version_greater_than("1.7.1"):
from torchvision.models.quantization import mobilenet_v3_large as qmobilenet_v3_large

qmodels.append(
("mobilenet_v3_large", qmobilenet_v3_large(pretrained=True, quantize=True).eval(), True)
)
from torchvision.models.quantization import mobilenet_v3_large as qmobilenet_v3_large

per_channel = True
qmodels = [
("resnet18", qresnet.resnet18(pretrained=True), per_channel),
("mobilenet_v2", qmobilenet.mobilenet_v2(pretrained=True), per_channel),
("inception_v3", qinception.inception_v3(pretrained=True), per_channel),
# tracing quantized googlenet broken as of v1.6
# ("googlenet", qgooglenet(pretrained=True), per_channel),
# As of v1.10, quantized mobilenet v3 has a weird segfault issue
# during make_conv_packed_param
# See https://ci.tlcpack.ai/blue/organizations/jenkins/tvm/detail/ci-docker-staging/192
# ("mobilenet_v3_large", qmobilenet_v3_large(pretrained=True, quantize=True).eval(), True)
]

results = []

Expand Down
3 changes: 2 additions & 1 deletion tests/scripts/task_config_build_gpu.sh
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ echo set\(USE_VULKAN ON\) >> config.cmake
echo set\(USE_OPENGL ON\) >> config.cmake
echo set\(USE_MICRO ON\) >> config.cmake
echo set\(USE_MICRO_STANDALONE_RUNTIME ON\) >> config.cmake
echo set\(USE_LLVM llvm-config-9\) >> config.cmake
echo set\(USE_LLVM \"/usr/bin/llvm-config-9 --link-static\"\) >> config.cmake
echo set\(USE_NNPACK ON\) >> config.cmake
echo set\(NNPACK_PATH /NNPACK/build/\) >> config.cmake
echo set\(USE_RPC ON\) >> config.cmake
Expand All @@ -47,3 +47,4 @@ echo set\(USE_TENSORRT_CODEGEN ON\) >> config.cmake
echo set\(USE_LIBBACKTRACE AUTO\) >> config.cmake
echo set\(USE_CCACHE OFF\) >> config.cmake
echo set\(SUMMARIZE ON\) >> config.cmake
echo set\(HIDE_PRIVATE_SYMBOLS ON\) >> config.cmake
2 changes: 1 addition & 1 deletion tests/scripts/task_sphinx_precheck.sh
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ cd docs
make clean
TVM_TUTORIAL_EXEC_PATTERN=none make html 2>&1 | tee /tmp/$$.log.txt

grep -v -E "__mro__|UserWarning|FutureWarning|tensorflow|Keras|pytorch|TensorFlow|403|git describe|scikit-learn version" < /tmp/$$.log.txt > /tmp/$$.logclean.txt || true
grep -v -E "__mro__|UserWarning|FutureWarning|tensorflow|Keras|pytorch|TensorFlow|coremltools|403|git describe|scikit-learn version" < /tmp/$$.log.txt > /tmp/$$.logclean.txt || true
echo "---------Sphinx Log----------"
cat /tmp/$$.logclean.txt
echo "-----------------------------"
Expand Down

0 comments on commit 38f0239

Please sign in to comment.