diff --git a/tests/python/driver/tvmc/test_autotuner.py b/tests/python/driver/tvmc/test_autotuner.py index 66017823a669..7c05ff804fa4 100644 --- a/tests/python/driver/tvmc/test_autotuner.py +++ b/tests/python/driver/tvmc/test_autotuner.py @@ -14,6 +14,7 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. +import platform import pytest import os @@ -73,6 +74,10 @@ def test_get_tuning_tasks(onnx_mnist): assert all([type(x) is expected_task_type for x in sut]) is True +@pytest.mark.skipif( + platform.machine() == "aarch64", + reason="Currently failing on AArch64 - see https://github.com/apache/tvm/issues/10673", +) def test_tune_tasks__tuner__xgb(onnx_mnist, tmpdir_factory): pytest.importorskip("onnx") @@ -141,6 +146,10 @@ def test_tune_tasks__tuner__xgb__no_early_stopping(onnx_mnist, tmpdir_factory): _tuner_test_helper(onnx_mnist, "xgb", tmpdir_name, early_stopping=None) +@pytest.mark.skipif( + platform.machine() == "aarch64", + reason="Currently failing on AArch64 - see https://github.com/apache/tvm/issues/10673", +) def test_tune_tasks__tuner__xgb__no_tuning_records(onnx_mnist, tmpdir_factory): pytest.importorskip("onnx") diff --git a/tests/python/driver/tvmc/test_frontends.py b/tests/python/driver/tvmc/test_frontends.py index 0cd02181ac40..98659b05ae5c 100644 --- a/tests/python/driver/tvmc/test_frontends.py +++ b/tests/python/driver/tvmc/test_frontends.py @@ -15,6 +15,7 @@ # specific language governing permissions and limitations # under the License. +import platform import pytest import builtins import importlib @@ -74,6 +75,10 @@ def test_guess_frontend_onnx(): assert type(sut) is tvmc.frontends.OnnxFrontend +@pytest.mark.skipif( + platform.machine() == "aarch64", + reason="Currently failing on AArch64 - see https://github.com/apache/tvm/issues/10673", +) def test_guess_frontend_pytorch(): # some CI environments wont offer pytorch, so skip in case it is not present pytest.importorskip("torch") @@ -245,6 +250,10 @@ def test_load_model__pth(pytorch_resnet18): assert "layer1.0.conv1.weight" in tvmc_model.params.keys() +@pytest.mark.skipif( + platform.machine() == "aarch64", + reason="Currently failing on AArch64 - see https://github.com/apache/tvm/issues/10673", +) def test_load_quantized_model__pth(pytorch_mobilenetv2_quantized): # some CI environments wont offer torch, so skip in case it is not present pytest.importorskip("torch") diff --git a/tests/python/driver/tvmc/test_model.py b/tests/python/driver/tvmc/test_model.py index 74c1c4ded8a4..fb1f718c1bed 100644 --- a/tests/python/driver/tvmc/test_model.py +++ b/tests/python/driver/tvmc/test_model.py @@ -55,6 +55,10 @@ def test_tvmc_workflow(use_vm, keras_simple): assert "output_0" in result.outputs.keys() +@pytest.mark.skipif( + platform.machine() == "aarch64", + reason="Currently failing on AArch64 - see https://github.com/apache/tvm/issues/10673", +) @pytest.mark.parametrize("use_vm", [True, False]) def test_save_load_model(use_vm, keras_simple, tmpdir_factory): pytest.importorskip("onnx")