From f9328e4e14240f47271ef9e9954f8f2719f3a4ce Mon Sep 17 00:00:00 2001 From: Tristan Konolige Date: Wed, 5 May 2021 19:58:48 -0700 Subject: [PATCH] [FIX] Fix autoscheduler tuning on sparse matrices where there are multiple with the same shape (#7974) * [FIX] Fix autoscheduler tuning on sparse matrices where there are multiple with the same shape * formatting * remove unreachable code --- python/tvm/auto_scheduler/measure.py | 2 +- python/tvm/auto_scheduler/search_task.py | 11 ++++------- python/tvm/relay/analysis/sparse_dense.py | 20 +++++++++++++++----- python/tvm/topi/nn/sparse.py | 10 +++++++++- src/relay/transforms/memory_alloc.cc | 1 - tutorials/auto_scheduler/tune_sparse_x86.py | 10 +++++++++- 6 files changed, 38 insertions(+), 16 deletions(-) diff --git a/python/tvm/auto_scheduler/measure.py b/python/tvm/auto_scheduler/measure.py index 55a91148bc94b..ea4a129727c35 100644 --- a/python/tvm/auto_scheduler/measure.py +++ b/python/tvm/auto_scheduler/measure.py @@ -886,7 +886,7 @@ def _timed_eval_func( random_fill(empty_array) args.append(empty_array) if task_inputs_count != len(task_input_names): - logger.warning( + raise RuntimeError( "task_inputs not fully matched, check if there's any unexpected error" ) dev.sync() diff --git a/python/tvm/auto_scheduler/search_task.py b/python/tvm/auto_scheduler/search_task.py index fca889448180f..4bc3968e7fe89 100644 --- a/python/tvm/auto_scheduler/search_task.py +++ b/python/tvm/auto_scheduler/search_task.py @@ -309,14 +309,11 @@ def register_task_input_buffer( tensor_from_file = _try_load_buffer_from_file(input_name) if tensor_from_file: input_table[input_name] = tensor_from_file - - if input_name in input_table.keys(): - logger.warning( - "Tensor %s exists in TASK_INPUT_BUFFER_TABLE, %s", - input_name, - "set overwrite to True or this Tensor will not be registered", + elif input_name in input_table.keys(): + raise RuntimeError( + "Tensor %s exists in TASK_INPUT_BUFFER_TABLE, %s" + % (input_name, "set overwrite to True or this Tensor will not be registered") ) - return input_table[input_name] input_table[input_name] = input_data if save_to_file: diff --git a/python/tvm/relay/analysis/sparse_dense.py b/python/tvm/relay/analysis/sparse_dense.py index 23929f45917d1..67e40a150e612 100644 --- a/python/tvm/relay/analysis/sparse_dense.py +++ b/python/tvm/relay/analysis/sparse_dense.py @@ -99,21 +99,31 @@ def process_params(expr, params, block_size, sparsity_threshold): params[name + ".indices"] = tvm.nd.array(sparse_weight.indices) params[name + ".indptr"] = tvm.nd.array(sparse_weight.indptr) - prefix = "sparse_dense_bsr_%d_%d_%d_%d_%.2f_" % ( + prefix = "sparse_dense_bsr_%d_%d_%d_%d_%d_%d_" % ( w_np.shape[0], w_np.shape[1], block_size[0], block_size[1], - 1 - sparsity, + sparse_weight.indices.shape[0], + sparse_weight.indptr.shape[0], ) register_task_input_buffer( - "default", prefix + "W_data", tvm.runtime.ndarray.array(sparse_weight.data) + "default", + prefix + "W_data", + tvm.runtime.ndarray.array(sparse_weight.data), + overwrite=True, ) register_task_input_buffer( - "default", prefix + "W_indices", tvm.runtime.ndarray.array(sparse_weight.indices) + "default", + prefix + "W_indices", + tvm.runtime.ndarray.array(sparse_weight.indices), + overwrite=True, ) register_task_input_buffer( - "default", prefix + "W_indptr", tvm.runtime.ndarray.array(sparse_weight.indptr) + "default", + prefix + "W_indptr", + tvm.runtime.ndarray.array(sparse_weight.indptr), + overwrite=True, ) ret = SparseAnalysisResult( weight_name=tvm.runtime.convert(memo.weight_name), diff --git a/python/tvm/topi/nn/sparse.py b/python/tvm/topi/nn/sparse.py index f5737d087fc73..60d7dde91a0c1 100644 --- a/python/tvm/topi/nn/sparse.py +++ b/python/tvm/topi/nn/sparse.py @@ -426,7 +426,15 @@ def _process_inputs(input_tensors, m, n, prefix_init): density *= i density /= k * n density = density.value - sparse_prefix = "%s_%d_%d_%d_%d_%.2f_" % (prefix_init, n, k, bs_r, bs_c, density) + sparse_prefix = "%s_%d_%d_%d_%d_%d_%d_" % ( + prefix_init, + n, + k, + bs_r, + bs_c, + sparse_indices.shape[0], + sparse_indptr.shape[0], + ) visited = set() diff --git a/src/relay/transforms/memory_alloc.cc b/src/relay/transforms/memory_alloc.cc index 2b69b02ab9993..03473b7d7455f 100644 --- a/src/relay/transforms/memory_alloc.cc +++ b/src/relay/transforms/memory_alloc.cc @@ -64,7 +64,6 @@ inline Expr AllocTensor(const Expr& storage, tvm::relay::Expr shape, DataType dt return AllocTensor(storage, offset, shape, dtype, assert_shape); } - // Check if the primitive function contains only reshape ops. bool IsReshapeOnly(const Expr& expr) { if (auto* func = expr.as()) { diff --git a/tutorials/auto_scheduler/tune_sparse_x86.py b/tutorials/auto_scheduler/tune_sparse_x86.py index a635a740e1047..d8e6404e31f6a 100644 --- a/tutorials/auto_scheduler/tune_sparse_x86.py +++ b/tutorials/auto_scheduler/tune_sparse_x86.py @@ -39,6 +39,7 @@ import numpy as np import tvm +import tvm.testing from tvm import te, auto_scheduler, runtime, topi from tvm.auto_scheduler import _ffi_api from tvm.topi.utils import get_const_tuple @@ -108,7 +109,14 @@ def sparse_dense(M, N, K, w_data_shape, w_indices_shape, w_indptr_shape, dtype): target = tvm.target.Target("llvm") # Register the sparse data to task inputs -prefix = "sparse_dense_bsr_%d_%d_%d_%d_%.2f_" % (N, K, BS_R, BS_C, density) +prefix = "sparse_dense_bsr_%d_%d_%d_%d_%d_%d_" % ( + N, + K, + BS_R, + BS_C, + W_sp_np.indices.shape[0], + W_sp_np.indptr.shape[0], +) task = tvm.auto_scheduler.SearchTask( func=sparse_dense, args=(M, N, K, W_sp_np.data.shape, W_sp_np.indices.shape, W_sp_np.indptr.shape, "float32"),