Skip to content

Commit

Permalink
[Docker] Update to Torch 1.10.1 (#9781)
Browse files Browse the repository at this point in the history
* update pytorch to 1.10.1

* fix missing import test only on llvm and cuda

* Revert "[Docker][Onnx] Upgrade ONNX to latest version (#9519)"

This reverts commit 3f5dca5.

* skip testing if target is not enabled
  • Loading branch information
masahi committed Dec 23, 2021
1 parent b35fc83 commit b29a443
Show file tree
Hide file tree
Showing 4 changed files with 16 additions and 70 deletions.
11 changes: 5 additions & 6 deletions docker/install/ubuntu_install_onnx.sh
Original file line number Diff line number Diff line change
Expand Up @@ -27,14 +27,13 @@ set -o pipefail
# https://github.com/onnx/onnx/pull/2834). When updating the CI image
# to onnx>=1.9, onnxoptimizer should also be installed.
pip3 install \
onnx==1.10.2 \
onnxruntime==1.9.0 \
onnxoptimizer==0.2.6

onnx==1.8.1 \
onnxruntime==1.7.0

# torch depends on a number of other packages, but unhelpfully, does
# not expose that in the wheel!!!
pip3 install future

pip3 install \
torch==1.7.0 \
torchvision==0.8.1
torch==1.10.1 \
torchvision==0.11.2
1 change: 1 addition & 0 deletions python/tvm/relay/frontend/pytorch.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
import itertools
import math
import sys
import logging

import numpy as np
import tvm
Expand Down
60 changes: 0 additions & 60 deletions tests/python/frontend/onnx/test_forward.py
Original file line number Diff line number Diff line change
Expand Up @@ -4967,38 +4967,11 @@ def verify_eyelike(indata):
)

unsupported_onnx_tests = [
"test_basic_convinteger",
"test_batchnorm_epsilon_training_mode",
"test_batchnorm_example_training_mode",
"test_bernoulli",
"test_bernoulli_expanded",
"test_bernoulli_double",
"test_bernoulli_double_expanded",
"test_bernoulli_seed",
"test_bernoulli_seed_expanded",
"test_cast_BFLOAT16_to_FLOAT",
"test_cast_DOUBLE_to_FLOAT16",
"test_cast_FLOAT_to_BFLOAT16",
"test_cast_FLOAT_to_STRING",
"test_cast_STRING_to_FLOAT",
"test_castlike_BFLOAT16_to_FLOAT",
"test_castlike_BFLOAT16_to_FLOAT_expanded",
"test_castlike_DOUBLE_to_FLOAT",
"test_castlike_DOUBLE_to_FLOAT16",
"test_castlike_DOUBLE_to_FLOAT16_expanded",
"test_castlike_FLOAT16_to_DOUBLE",
"test_castlike_FLOAT16_to_FLOAT",
"test_castlike_FLOAT_to_BFLOAT16",
"test_castlike_FLOAT_to_BFLOAT16_expanded",
"test_castlike_FLOAT_to_DOUBLE",
"test_castlike_FLOAT_to_FLOAT16",
"test_castlike_FLOAT_to_STRING",
"test_castlike_FLOAT_to_STRING_expanded",
"test_castlike_STRING_to_FLOAT",
"test_castlike_STRING_to_FLOAT_expanded",
"test_convinteger_with_padding",
"test_convinteger_without_padding",
"test_convtranspose_autopad_same",
"test_convtranspose_dilations",
"test_convtranspose_output_shape",
"test_cumsum_1d",
Expand All @@ -5014,13 +4987,9 @@ def verify_eyelike(indata):
"test_dropout_default_mask",
"test_dropout_default_mask_ratio",
"test_dropout_default_ratio",
"test_gru_batchwise",
"test_hardswish",
"test_identity_sequence",
"test_if_seq",
"test_loop11",
"test_loop13_seq",
"test_lstm_batchwise",
"test_matmulinteger",
"test_maxpool_2d_same_lower",
"test_maxpool_2d_same_upper",
Expand All @@ -5030,10 +4999,6 @@ def verify_eyelike(indata):
"test_mvn",
# This test fails llvm with a lowering error:
"test_nllloss_NCd1d2d3_none_no_weight_negative_ii_expanded",
"test_optional_has_element",
"test_optional_get_element",
"test_optional_get_element_sequence",
"test_optional_has_element_empty",
"test_qlinearmatmul_3D",
"test_range_float_type_positive_delta_expanded",
"test_range_int32_type_negative_delta_expanded",
Expand All @@ -5051,13 +5016,6 @@ def verify_eyelike(indata):
"test_round",
"test_sequence_insert_at_back",
"test_sequence_insert_at_front",
"test_shape_end_1",
"test_shape_end_negative_1",
"test_shape_start_1",
"test_shape_start_1_end_2",
"test_shape_start_1_end_negative_1",
"test_shape_start_negative_1",
"test_simple_rnn_batchwise",
"test_simple_rnn_defaults",
"test_simple_rnn_with_initial_bias",
"test_split_variable_parts_1d",
Expand All @@ -5083,24 +5041,6 @@ def verify_eyelike(indata):
"test_training_dropout_mask",
"test_training_dropout_zero_ratio",
"test_training_dropout_zero_ratio_mask",
"test_tril",
"test_tril_pos",
"test_tril_square",
"test_tril_square_neg",
"test_tril_neg",
"test_tril_one_row_neg",
"test_tril_out_neg",
"test_tril_out_pos",
"test_tril_zero",
"test_triu",
"test_triu_one_row",
"test_triu_out_neg_out",
"test_triu_out_pos",
"test_triu_neg",
"test_triu_pos",
"test_triu_square",
"test_triu_square_neg",
"test_triu_zero",
# These unsqueeze tests work, but take 2+ hrs to run
"test_unsqueeze_three_axes",
"test_unsqueeze_two_axes",
Expand Down
14 changes: 10 additions & 4 deletions tests/python/frontend/pytorch/test_forward.py
Original file line number Diff line number Diff line change
Expand Up @@ -212,7 +212,10 @@ def verify_model(
compiled_input = dict(zip(input_names, [inp.clone().cpu().numpy() for inp in baseline_input]))

with tvm.transform.PassContext(opt_level=3):
for target, dev in tvm.testing.enabled_targets():
for target in ["llvm", "cuda"]:
if not tvm.runtime.enabled(target):
continue
dev = tvm.device(target, 0)
relay_graph, relay_lib, relay_params = relay.build(mod, target=target, params=params)
relay_model = graph_executor.create(relay_graph, relay_lib, dev)
relay_model.set_input(**relay_params)
Expand Down Expand Up @@ -2196,7 +2199,7 @@ def test_3d_models():


def _get_default_vm_targets():
return [tgt for (tgt, _) in tvm.testing.enabled_targets()]
return ["llvm", "cuda"]


def verify_script_model(pt_model, ishapes, targets, idtype=None):
Expand Down Expand Up @@ -2269,7 +2272,10 @@ def verify_model_vm(input_model, ishapes, idtype=None, idata=None, targets=["llv
mod, params = relay.frontend.from_pytorch(input_model, input_shapes)

for tgt in targets:
if not tvm.runtime.enabled(tgt):
continue
print("Running on target", tgt)

dev = tvm.device(tgt, 0)

evaluator = relay.create_executor("vm", mod=mod, device=dev, target=tgt).evaluate()
Expand Down Expand Up @@ -3897,15 +3903,15 @@ def test_fn(x, mask):
for shape in [(10,), (3, 4), (16, 32, 64)]:
x = torch.randn(*shape)
mask = x.ge(0.5)
verify_trace_model(test_fn, [x, mask], ["llvm", "cuda", "nvptx"])
verify_trace_model(test_fn, [x, mask], ["llvm", "cuda"])


def test_unique():
def test_fn(is_sorted, return_inverse, return_counts):
return lambda x: torch.unique(x, is_sorted, return_inverse, return_counts)

in_data = torch.randint(0, 20, (10,), dtype=torch.int32)
targets = ["llvm", "cuda", "nvptx"]
targets = ["llvm", "cuda"]
verify_trace_model(test_fn(True, True, True), [in_data], targets)
verify_trace_model(test_fn(True, False, True), [in_data], targets)
verify_trace_model(test_fn(True, True, False), [in_data], targets)
Expand Down

0 comments on commit b29a443

Please sign in to comment.