From 7506fd6154b2d9fa6eea690e87a40e91ad382ae1 Mon Sep 17 00:00:00 2001 From: tqchen Date: Sat, 8 Feb 2020 14:40:02 -0800 Subject: [PATCH] [LINT][PY] Fixes for pylint==2.4.4 --- Makefile | 5 +- python/tvm/_ffi/base.py | 16 ++-- python/tvm/autotvm/database.py | 1 + python/tvm/autotvm/feature.py | 3 +- .../autotvm/graph_tuner/base_graph_tuner.py | 1 - .../graph_tuner/utils/traverse_graph.py | 1 + python/tvm/autotvm/measure/measure.py | 1 + python/tvm/autotvm/measure/measure_methods.py | 6 +- python/tvm/autotvm/task/dispatcher.py | 2 + python/tvm/autotvm/task/relay_integration.py | 5 +- python/tvm/autotvm/task/topi_integration.py | 2 + python/tvm/autotvm/tophub.py | 1 + python/tvm/autotvm/tuner/callback.py | 1 + .../tvm/autotvm/tuner/sa_model_optimizer.py | 2 +- .../tvm/autotvm/tuner/xgboost_cost_model.py | 1 + python/tvm/build_module.py | 2 +- python/tvm/contrib/cc.py | 3 +- python/tvm/contrib/dlpack.py | 1 + python/tvm/contrib/download.py | 14 +--- python/tvm/contrib/mxnet.py | 2 +- python/tvm/contrib/util.py | 33 -------- python/tvm/hybrid/__init__.py | 3 +- python/tvm/hybrid/calls.py | 1 + python/tvm/hybrid/parser.py | 2 +- python/tvm/hybrid/util.py | 1 + python/tvm/relay/_parser.py | 6 +- python/tvm/relay/analysis.py | 2 +- python/tvm/relay/backend/_backend.py | 3 +- python/tvm/relay/backend/compile_engine.py | 2 +- python/tvm/relay/build_module.py | 5 +- python/tvm/relay/debug.py | 2 +- python/tvm/relay/expr.py | 6 +- python/tvm/relay/frontend/caffe2.py | 1 + python/tvm/relay/frontend/common.py | 5 +- python/tvm/relay/frontend/coreml.py | 83 +++++++++---------- python/tvm/relay/frontend/keras.py | 5 +- python/tvm/relay/frontend/nnvm_common.py | 4 +- python/tvm/relay/frontend/onnx.py | 1 + python/tvm/relay/frontend/tensorflow.py | 1 + .../tvm/relay/frontend/tensorflow_parser.py | 4 +- python/tvm/relay/frontend/tflite.py | 6 +- python/tvm/relay/op/__init__.py | 1 + python/tvm/relay/op/_transform.py | 21 +++-- python/tvm/relay/op/nn/_nn.py | 5 +- python/tvm/relay/op/transform.py | 2 + python/tvm/relay/parser.py | 1 + python/tvm/relay/qnn/op/op.py | 2 +- python/tvm/relay/quantize/_partition.py | 10 +-- python/tvm/relay/quantize/quantize.py | 2 +- python/tvm/relay/scope_builder.py | 3 +- python/tvm/relay/testing/darknet.py | 2 +- python/tvm/relay/testing/resnet.py | 35 ++++---- python/tvm/relay/testing/tf.py | 4 +- python/tvm/relay/testing/yolo_detection.py | 3 +- python/tvm/rpc/proxy.py | 5 +- python/tvm/rpc/server.py | 16 ++-- python/tvm/rpc/tornado_util.py | 4 +- python/tvm/rpc/tracker.py | 3 +- python/tvm/runtime/module.py | 2 +- python/tvm/runtime/ndarray.py | 2 +- python/tvm/tensor.py | 1 - python/tvm/tensor_intrin.py | 2 +- topi/python/topi/arm_cpu/conv2d.py | 5 +- topi/python/topi/bifrost/conv2d.py | 3 +- topi/python/topi/cuda/conv2d_winograd.py | 2 +- topi/python/topi/cuda/nms.py | 5 +- topi/python/topi/cuda/rcnn/proposal.py | 4 +- topi/python/topi/cuda/softmax.py | 6 +- topi/python/topi/cuda/sort.py | 1 + topi/python/topi/cuda/vision.py | 2 +- topi/python/topi/hls/nn.py | 2 +- topi/python/topi/intel_graphics/conv2d.py | 22 ++--- topi/python/topi/nn/bitserial_util.py | 3 +- topi/python/topi/nn/conv2d.py | 5 +- topi/python/topi/nn/fifo_buffer.py | 2 +- topi/python/topi/opengl/softmax.py | 2 +- topi/python/topi/testing/one_hot.py | 2 +- topi/python/topi/transform.py | 9 +- topi/python/topi/util.py | 3 +- topi/python/topi/vision/rcnn/proposal.py | 4 +- topi/python/topi/x86/conv2d.py | 4 +- topi/python/topi/x86/conv2d_alter_op.py | 2 +- topi/python/topi/x86/conv2d_int8.py | 4 +- topi/python/topi/x86/nn.py | 2 +- topi/python/topi/x86/util.py | 2 +- vta/python/vta/bitstream.py | 9 +- vta/python/vta/environment.py | 4 +- vta/python/vta/exec/rpc_server.py | 1 + vta/python/vta/ir_pass.py | 4 +- vta/python/vta/program_bitstream.py | 1 + vta/python/vta/top/graphpack.py | 11 ++- vta/python/vta/top/op.py | 14 ++-- vta/scripts/tune_conv2d.py | 1 - vta/scripts/tune_conv2d_transpose.py | 1 - vta/scripts/tune_dense.py | 1 - vta/scripts/tune_group_conv2d.py | 1 - 96 files changed, 238 insertions(+), 278 deletions(-) diff --git a/Makefile b/Makefile index ad24baaa14851..c5aa74a25188c 100644 --- a/Makefile +++ b/Makefile @@ -94,10 +94,7 @@ javadoc: # Cython build cython: - cd python; python setup.py build_ext --inplace - -cython2: - cd python; python2 setup.py build_ext --inplace + cd python; python3 setup.py build_ext --inplace cython3: cd python; python3 setup.py build_ext --inplace diff --git a/python/tvm/_ffi/base.py b/python/tvm/_ffi/base.py index ddc942ada40d0..8d3ce19f9444d 100644 --- a/python/tvm/_ffi/base.py +++ b/python/tvm/_ffi/base.py @@ -15,7 +15,7 @@ # specific language governing permissions and limitations # under the License. # coding: utf-8 -# pylint: disable=invalid-name +# pylint: disable=invalid-name, import-outside-toplevel """Base library for TVM FFI.""" import sys import os @@ -204,14 +204,14 @@ def _find_error_type(line): if _valid_error_name(err_name): return err_name return None - else: - end_pos = line.find(":") - if end_pos == -1: - return None - err_name = line[:end_pos] - if _valid_error_name(err_name): - return err_name + + end_pos = line.find(":") + if end_pos == -1: return None + err_name = line[:end_pos] + if _valid_error_name(err_name): + return err_name + return None def c2pyerror(err_msg): diff --git a/python/tvm/autotvm/database.py b/python/tvm/autotvm/database.py index 07f3766acb1df..55d4180f03be4 100644 --- a/python/tvm/autotvm/database.py +++ b/python/tvm/autotvm/database.py @@ -104,6 +104,7 @@ class RedisDatabase(Database): MAGIC_SPLIT = "$" def __init__(self, db_index=REDIS_PROD): + # pylint: disable=import-outside-toplevel import redis if db_index == RedisDatabase.REDIS_TEST: diff --git a/python/tvm/autotvm/feature.py b/python/tvm/autotvm/feature.py index 3976ced54888b..56c52a52f218e 100644 --- a/python/tvm/autotvm/feature.py +++ b/python/tvm/autotvm/feature.py @@ -14,7 +14,7 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. -# pylint: disable=invalid-name +# pylint: disable=invalid-name, """Extract feature of iter vars There are two types of feature @@ -148,6 +148,7 @@ def get_flatten_name(fea): } if isinstance(fea, str): + # pylint: disable=import-outside-toplevel from .record import decode # flatten line to feature line = fea diff --git a/python/tvm/autotvm/graph_tuner/base_graph_tuner.py b/python/tvm/autotvm/graph_tuner/base_graph_tuner.py index 1cb76d4f120d0..bdff057c5a7ee 100644 --- a/python/tvm/autotvm/graph_tuner/base_graph_tuner.py +++ b/python/tvm/autotvm/graph_tuner/base_graph_tuner.py @@ -539,4 +539,3 @@ def write_opt_sch2record_file(self, record_file="graph_opt_schedule.log"): @abstractmethod def run(self, **kwargs): """Run graph tuning.""" - pass diff --git a/python/tvm/autotvm/graph_tuner/utils/traverse_graph.py b/python/tvm/autotvm/graph_tuner/utils/traverse_graph.py index f58dd28baa29d..d3a27cbc1ecd6 100644 --- a/python/tvm/autotvm/graph_tuner/utils/traverse_graph.py +++ b/python/tvm/autotvm/graph_tuner/utils/traverse_graph.py @@ -65,6 +65,7 @@ def expr2graph(expr, target_ops, node_dict, node_list): % op_name) topi_funcs += OP2COMPUTE[op_name] env.reset(topi_funcs) + # pylint: disable=not-context-manager with env: _expr2graph_impl(expr, target_ops, node_dict, node_list) task_pos = 0 diff --git a/python/tvm/autotvm/measure/measure.py b/python/tvm/autotvm/measure/measure.py index 0836fb741bd22..d77e7373b580d 100644 --- a/python/tvm/autotvm/measure/measure.py +++ b/python/tvm/autotvm/measure/measure.py @@ -208,6 +208,7 @@ def measure_option(builder, runner): Using `min_repeat_ms` can dynamically adjusts `number`, so it is recommended. The typical value for NVIDIA GPU is 150 ms. """ + # pylint: disable=import-outside-toplevel from .measure_methods import LocalBuilder, LocalRunner if isinstance(builder, str): diff --git a/python/tvm/autotvm/measure/measure_methods.py b/python/tvm/autotvm/measure/measure_methods.py index 68f2955b05aad..44e6de934649c 100644 --- a/python/tvm/autotvm/measure/measure_methods.py +++ b/python/tvm/autotvm/measure/measure_methods.py @@ -324,11 +324,11 @@ def __init__(self, self.server = None def set_task(self, task): - self.task = task - + # pylint: disable=import-outside-toplevel from ...rpc.tracker import Tracker from ...rpc.server import Server + self.task = task tracker = Tracker('0.0.0.0', port=9000, port_end=10000, silent=True) device_key = '$local$device$%d' % tracker.port server = Server('0.0.0.0', port=9000, port_end=10000, @@ -362,6 +362,7 @@ def _build_func_common(measure_input, check_gpu=None, cuda_arch=None, build_opti # if target is vta, we need to use vta build if hasattr(measure_input.target, 'device_name') and \ measure_input.target.device_name == 'vta': + # pylint: disable=import-outside-toplevel import vta func = vta.build(s, args, target_host=task.target_host) else: @@ -460,6 +461,7 @@ def run_through_rpc(measure_input, build_result, # Program the FPGA every single time when targeting VTA if hasattr(measure_input.target, 'device_name') and \ measure_input.target.device_name == 'vta': + # pylint: disable=import-outside-toplevel from vta import program_fpga, reconfig_runtime program_fpga(remote, None) reconfig_runtime(remote) diff --git a/python/tvm/autotvm/task/dispatcher.py b/python/tvm/autotvm/task/dispatcher.py index f6ab10064b4a2..15ed2953b28a7 100644 --- a/python/tvm/autotvm/task/dispatcher.py +++ b/python/tvm/autotvm/task/dispatcher.py @@ -282,6 +282,7 @@ def load(self, records): Each row of this file is an encoded record pair. Otherwise, it is an iterator. """ + # pylint: disable=import-outside-toplevel from pathlib import Path from ..record import load_from_file @@ -454,6 +455,7 @@ def __init__(self, records): Each row of this file is an encoded record pair. Otherwise, it is an iterator. """ + # pylint: disable=import-outside-toplevel from ..record import load_from_file super(ApplyGraphBest, self).__init__() diff --git a/python/tvm/autotvm/task/relay_integration.py b/python/tvm/autotvm/task/relay_integration.py index 3eb1f1ddf7c3f..7471ca3d6c8f7 100644 --- a/python/tvm/autotvm/task/relay_integration.py +++ b/python/tvm/autotvm/task/relay_integration.py @@ -14,7 +14,7 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. -# pylint: disable=unused-variable,invalid-name +# pylint: disable=unused-variable,invalid-name, not-context-manager """ Decorator and utilities for the integration with TOPI and Relay 99.9% copy-paste of implementation by @MerryMercy @@ -37,7 +37,7 @@ def _lower(mod, params): """ Helper to lower VTA properly. """ - + # pylint: disable=import-outside-toplevel from tvm import relay from tvm.relay.backend import graph_runtime_codegen @@ -114,6 +114,7 @@ def extract_from_multiple_program(mods, params, ops, target, target_host=None, task: Array of autotvm.task.Task collected tasks """ + # pylint: disable=import-outside-toplevel import tvm.relay.op from tvm import relay import topi diff --git a/python/tvm/autotvm/task/topi_integration.py b/python/tvm/autotvm/task/topi_integration.py index 8b3ba35e92abe..10a4f09b11f16 100644 --- a/python/tvm/autotvm/task/topi_integration.py +++ b/python/tvm/autotvm/task/topi_integration.py @@ -76,6 +76,7 @@ class TaskExtractEnv: registered = None def __init__(self, allow_duplicate=False): + # pylint: disable=import-outside-toplevel import topi # topi compute -> autotvm task name @@ -168,6 +169,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): def _register_topi_task(self): """register tuning wrapper for topi function""" + # pylint: disable=import-outside-toplevel import topi # Avoid double registration for certain targets diff --git a/python/tvm/autotvm/tophub.py b/python/tvm/autotvm/tophub.py index d953eaaeea9df..e1a7d86695f26 100644 --- a/python/tvm/autotvm/tophub.py +++ b/python/tvm/autotvm/tophub.py @@ -147,6 +147,7 @@ def check_backend(tophub_location, backend): if os.path.isfile(os.path.join(AUTOTVM_TOPHUB_ROOT_PATH, package_name)): return True + # pylint: disable=import-outside-toplevel if sys.version_info >= (3,): import urllib.request as urllib2 else: diff --git a/python/tvm/autotvm/tuner/callback.py b/python/tvm/autotvm/tuner/callback.py index 154406b9b2ed3..4c2fe87cf3c67 100644 --- a/python/tvm/autotvm/tuner/callback.py +++ b/python/tvm/autotvm/tuner/callback.py @@ -53,6 +53,7 @@ def _callback(_, inputs, results): for inp, result in zip(inputs, results): file_out.write(record.encode(inp, result, protocol) + "\n") + # pylint: disable=import-outside-toplevel from pathlib import Path if isinstance(file_out, Path): file_out = str(file_out) diff --git a/python/tvm/autotvm/tuner/sa_model_optimizer.py b/python/tvm/autotvm/tuner/sa_model_optimizer.py index babc612b86a99..5812033fd83df 100644 --- a/python/tvm/autotvm/tuner/sa_model_optimizer.py +++ b/python/tvm/autotvm/tuner/sa_model_optimizer.py @@ -14,7 +14,7 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. -# pylint: disable=consider-using-enumerate, invalid-name +# pylint: disable=consider-using-enumerate, invalid-name, invalid-sequence-index """ Cost model optimizer based on simulated annealing """ diff --git a/python/tvm/autotvm/tuner/xgboost_cost_model.py b/python/tvm/autotvm/tuner/xgboost_cost_model.py index 34f4c03e224b6..882b0ad19dd50 100644 --- a/python/tvm/autotvm/tuner/xgboost_cost_model.py +++ b/python/tvm/autotvm/tuner/xgboost_cost_model.py @@ -420,6 +420,7 @@ def _extract_curve_feature_log(arg): def custom_callback(stopping_rounds, metric, fevals, evals=(), log_file=None, maximize=False, verbose_eval=True): """callback function for xgboost to support multiple custom evaluation functions""" + # pylint: disable=import-outside-toplevel from xgboost.core import EarlyStopException from xgboost.callback import _fmt_metric from xgboost.training import aggcv diff --git a/python/tvm/build_module.py b/python/tvm/build_module.py index 898d394a0b376..9346d7d5a627e 100644 --- a/python/tvm/build_module.py +++ b/python/tvm/build_module.py @@ -467,7 +467,7 @@ def _build_for_device(flist, target, target_host): func = ir_pass.InferFragment(func) warp_size = target.thread_warp_size func = ir_pass.LowerThreadAllreduce(func, warp_size) - fsplits = [s for s in ir_pass.SplitHostDevice(func)] + fsplits = list(ir_pass.SplitHostDevice(func)) fhost.append(fsplits[0]) for x in fsplits[1:]: fdevice.append(x) diff --git a/python/tvm/contrib/cc.py b/python/tvm/contrib/cc.py index 1d368347686b8..ae37923a1dcfb 100644 --- a/python/tvm/contrib/cc.py +++ b/python/tvm/contrib/cc.py @@ -76,8 +76,7 @@ def get_target_triple(): msg += py_str(out) return None return py_str(out) - else: - return None + return None return get_target_triple diff --git a/python/tvm/contrib/dlpack.py b/python/tvm/contrib/dlpack.py index dfffc3fa7e56b..7d006a19b2ada 100644 --- a/python/tvm/contrib/dlpack.py +++ b/python/tvm/contrib/dlpack.py @@ -54,6 +54,7 @@ def to_pytorch_func(tvm_func): wrapped_func: Function Wrapped tvm function that operates on PyTorch tensors """ + # pylint: disable=import-outside-toplevel import torch import torch.utils.dlpack return convert_func(tvm_func, torch.Tensor, torch.utils.dlpack.to_dlpack) diff --git a/python/tvm/contrib/download.py b/python/tvm/contrib/download.py index 1ff4f93b6c2a4..cdb8101cd8c04 100644 --- a/python/tvm/contrib/download.py +++ b/python/tvm/contrib/download.py @@ -15,9 +15,6 @@ # specific language governing permissions and limitations # under the License. """Helper utility for downloading""" -from __future__ import print_function -from __future__ import absolute_import as _abs - import os import sys import time @@ -48,10 +45,8 @@ def download(url, path, overwrite=False, size_compare=False, verbose=1, retries= retries: int, optional Number of time to retry download, default at 3. """ - if sys.version_info >= (3,): - import urllib.request as urllib2 - else: - import urllib2 + # pylint: disable=import-outside-toplevel + import urllib.request as urllib2 if os.path.isfile(path) and not overwrite: if size_compare: @@ -114,9 +109,8 @@ def _download_progress(count, block_size, total_size): if os.path.exists(tempfile): os.remove(tempfile) raise err - else: - print("download failed due to {}, retrying, {} attempt{} left" - .format(repr(err), retries, 's' if retries > 1 else '')) + print("download failed due to {}, retrying, {} attempt{} left" + .format(repr(err), retries, 's' if retries > 1 else '')) if "TEST_DATA_ROOT_PATH" in os.environ: diff --git a/python/tvm/contrib/mxnet.py b/python/tvm/contrib/mxnet.py index e3b234c9aa079..3f05b70c90140 100644 --- a/python/tvm/contrib/mxnet.py +++ b/python/tvm/contrib/mxnet.py @@ -49,7 +49,7 @@ def to_mxnet_func(func, const_loc=None): Run asynchrously in MXNet's async engine. """ # only import mxnet when wrap get called. - # pylint: disable=import-self + # pylint: disable=import-self, import-outside-toplevel import mxnet if isinstance(func, Module): func = func.entry_func diff --git a/python/tvm/contrib/util.py b/python/tvm/contrib/util.py index 2ab370bdcd862..2ebe175e81601 100644 --- a/python/tvm/contrib/util.py +++ b/python/tvm/contrib/util.py @@ -15,7 +15,6 @@ # specific language governing permissions and limitations # under the License. """Common system utilities""" -from __future__ import absolute_import as _abs import os import tempfile import shutil @@ -167,35 +166,3 @@ def which(exec_name): if os.path.isfile(full_path) and os.access(full_path, os.X_OK): return full_path return None - -def get_lower_ir(s): - """Get lower ir code of a schedule. - This is useful for debug, since you don't have to find all inputs/outputs - for a schedule in a fused subgraph. - - Parameters - ---------- - s: Schedule - - Returns - ------- - ir: str - The lower ir - """ - from .. import tensor - from ..build_module import lower - - outputs = s.outputs - - inputs = [] - def find_all(op): - if isinstance(op, tensor.PlaceholderOp): - inputs.append(op.output(0)) - else: - for x in op.input_tensors: - find_all(x.op) - - for out in outputs: - find_all(out) - - return lower(s, inputs, simple_mode=True) diff --git a/python/tvm/hybrid/__init__.py b/python/tvm/hybrid/__init__.py index 55c33e5e317f4..988e5a67d7d26 100644 --- a/python/tvm/hybrid/__init__.py +++ b/python/tvm/hybrid/__init__.py @@ -50,7 +50,8 @@ def script(pyfunc): hybrid_func : function A decorated hybrid script function. """ - def wrapped_func(func, *args, **kwargs): #pylint: disable=missing-docstring + # pylint: disable=import-outside-toplevel, missing-docstring + def wrapped_func(func, *args, **kwargs): from .util import _is_tvm_arg_types if _is_tvm_arg_types(args): src = _pruned_source(func) diff --git a/python/tvm/hybrid/calls.py b/python/tvm/hybrid/calls.py index 7038f6144db34..e873e1974d213 100644 --- a/python/tvm/hybrid/calls.py +++ b/python/tvm/hybrid/calls.py @@ -69,6 +69,7 @@ def bind(func_id, args): def _math_intrin(func_id, args): + # pylint: disable=import-outside-toplevel from .. import intrin return getattr(intrin, func_id)(*args) diff --git a/python/tvm/hybrid/parser.py b/python/tvm/hybrid/parser.py index 57d6363288160..cd2433e64a8c6 100644 --- a/python/tvm/hybrid/parser.py +++ b/python/tvm/hybrid/parser.py @@ -198,7 +198,7 @@ def wrap_up_realize(self, node, body): ty, entry = self.symbols[key] #pylint: disable=invalid-name if ty in [Symbol.Input, Symbol.OutputBuffer]: continue - elif 'Buffer' in ty.name: + if 'Buffer' in ty.name: _buf = entry _scope = 'global' if ty is Symbol.BufferVar else ty.name[:-6].lower() to_pop.append(key) diff --git a/python/tvm/hybrid/util.py b/python/tvm/hybrid/util.py index a08a380dd7678..0883960fabfd6 100644 --- a/python/tvm/hybrid/util.py +++ b/python/tvm/hybrid/util.py @@ -70,6 +70,7 @@ def _pruned_source(func): def replace_io(body, rmap): """Replacing tensors usage according to the dict given""" + # pylint: disable=import-outside-toplevel from .. import ir_pass def replace(op): diff --git a/python/tvm/relay/_parser.py b/python/tvm/relay/_parser.py index 1f0088a768446..0fd1c105a3d1c 100644 --- a/python/tvm/relay/_parser.py +++ b/python/tvm/relay/_parser.py @@ -78,7 +78,7 @@ def __str__(self): class OpWrapper: """Overload the __call__ for op.""" - pass + class ExprOp(OpWrapper): """Call an expr. The default, but does not handle attrs well.""" @@ -273,7 +273,7 @@ def _check_existing_typ_expr(self, name, new_expr): def _type_expr_name(self, e): if isinstance(e, adt.Constructor): return "`{0}` ADT constructor".format(e.belong_to.name_hint) - elif isinstance(e, ty.GlobalTypeVar): + if isinstance(e, ty.GlobalTypeVar): if e.kind == ty.Kind.AdtHandle: return "ADT definition" return "function definition" @@ -623,7 +623,7 @@ def visitCallWithAttr(self, ctx: RelayParser.CallWithAttrContext): def call(self, func, args, attrs, type_args): if isinstance(func, OpWrapper): return func(args, attrs, type_args) - elif isinstance(func, adt.Constructor): + if isinstance(func, adt.Constructor): return func(*args) return expr.Call(func, args, attrs, type_args) diff --git a/python/tvm/relay/analysis.py b/python/tvm/relay/analysis.py index 7372fcdadd17f..a1206fb457a78 100644 --- a/python/tvm/relay/analysis.py +++ b/python/tvm/relay/analysis.py @@ -384,7 +384,7 @@ def detect_feature(a, b=None): """ if isinstance(a, Module): a, b = b, a - return set([Feature(int(x)) for x in _analysis.detect_feature(a, b)]) + return {[Feature(int(x)) for x in _analysis.detect_feature(a, b)]} def structural_hash(value): diff --git a/python/tvm/relay/backend/_backend.py b/python/tvm/relay/backend/_backend.py index 1db70c3e2dc21..270c38e4f523d 100644 --- a/python/tvm/relay/backend/_backend.py +++ b/python/tvm/relay/backend/_backend.py @@ -44,8 +44,9 @@ def lower(sch, inputs, func_name, source_func): lowered_funcs : List[tvm.LoweredFunc] The result of lowering. """ + # pylint: disable=broad-except, import-outside-toplevel import traceback - # pylint: disable=broad-except + try: f = _build.lower(sch, inputs, name=func_name) # logging.debug("lower function %s", func_name) diff --git a/python/tvm/relay/backend/compile_engine.py b/python/tvm/relay/backend/compile_engine.py index 172480da7286d..4eedd23faa1c3 100644 --- a/python/tvm/relay/backend/compile_engine.py +++ b/python/tvm/relay/backend/compile_engine.py @@ -86,7 +86,7 @@ def lower(self, source_func, target=None): cached_func: CachedFunc The result of lowering. """ - # pylint: disable=broad-except + # pylint: disable=broad-except, import-outside-toplevel try: key = _get_cache_key(source_func, target) return _backend._CompileEngineLower(self, key) diff --git a/python/tvm/relay/build_module.py b/python/tvm/relay/build_module.py index d848d9030c484..ea7a4cacfc60d 100644 --- a/python/tvm/relay/build_module.py +++ b/python/tvm/relay/build_module.py @@ -407,7 +407,6 @@ def create_executor(kind="debug", return _interpreter.Interpreter(mod, ctx, target) if kind == "graph": return GraphExecutor(mod, ctx, target) - elif kind == "vm": + if kind == "vm": return VMExecutor(mod, ctx, target) - else: - raise RuntimeError("unknown execution strategy: {0}".format(kind)) + raise RuntimeError("unknown execution strategy: {0}".format(kind)) diff --git a/python/tvm/relay/debug.py b/python/tvm/relay/debug.py index de18352d85a87..a2f3533a35641 100644 --- a/python/tvm/relay/debug.py +++ b/python/tvm/relay/debug.py @@ -20,7 +20,7 @@ from ..api import register_func -# pylint: disable=unused-argument +# pylint: disable=unused-argument, import-outside-toplevel def _debugger_init(expr, stack): import pdb pdb.set_trace() diff --git a/python/tvm/relay/expr.py b/python/tvm/relay/expr.py index 97185ee64bb3c..5add5e76a6802 100644 --- a/python/tvm/relay/expr.py +++ b/python/tvm/relay/expr.py @@ -125,8 +125,7 @@ def __sub__(self, other): def __rsub__(self, other): if isinstance(other, _Number): raise TypeError('convert "%s" with `const` first' % str(other)) - else: - raise TypeError("type %s not supported" % str(type(other))) + raise TypeError("type %s not supported" % str(type(other))) def __mul__(self, other): if isinstance(other, Expr): @@ -150,8 +149,7 @@ def __div__(self, other): def __rdiv__(self, other): if isinstance(other, _Number): raise TypeError('convert "%s" with `const` first' % str(other)) - else: - raise TypeError("type %s not supported" % str(type(other))) + raise TypeError("type %s not supported" % str(type(other))) def __truediv__(self, other): return self.__div__(other) diff --git a/python/tvm/relay/frontend/caffe2.py b/python/tvm/relay/frontend/caffe2.py index a010099c2bee4..566851d7f7ed9 100644 --- a/python/tvm/relay/frontend/caffe2.py +++ b/python/tvm/relay/frontend/caffe2.py @@ -401,6 +401,7 @@ def from_caffe2(self, init_net, predict_net): params : dict A dict of name: tvm.nd.array pairs, used as pretrained weights """ + # pylint: disable=import-outside-toplevel from caffe2.python import workspace workspace.RunNetOnce(init_net) diff --git a/python/tvm/relay/frontend/common.py b/python/tvm/relay/frontend/common.py index bc5c0e4222fb4..a0af826de32be 100644 --- a/python/tvm/relay/frontend/common.py +++ b/python/tvm/relay/frontend/common.py @@ -302,7 +302,7 @@ def set_expr(self, name, expr, force_override=False): self.exprs[name] = expr def has_expr(self, name): - return True if name in self.exprs else False + return name in self.exprs def set_padding(self, paddings): self.paddings = paddings @@ -391,7 +391,7 @@ def __call__(self, inputs, attrs, *args): if k in self._excludes: raise NotImplementedError('Attribute %s in operator %s is not' + ' supported.', k, op_name) - elif k in self._disables: + if k in self._disables: logging.warning("Attribute %s is disabled in relay.sym.%s", k, op_name) elif k in self._ignores: if k != 'tvm_custom': @@ -485,6 +485,7 @@ def infer_value(input_val, params): portion of the relay graph. This is often needed for functions that whose output shape depends on the value of a tensor. """ + # pylint: disable=import-outside-toplevel from tvm.contrib import graph_runtime # Check that all free variables have associated parameters. assert all(var.name_hint in params.keys() for var in analysis.free_vars( diff --git a/python/tvm/relay/frontend/coreml.py b/python/tvm/relay/frontend/coreml.py index d07b3f45fe1aa..719a2783fd3b7 100644 --- a/python/tvm/relay/frontend/coreml.py +++ b/python/tvm/relay/frontend/coreml.py @@ -14,7 +14,8 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. -# pylint: disable=invalid-name, import-self, unused-argument, unused-variable, inconsistent-return-statements +# pylint: disable=invalid-name, import-self, unused-argument, unused-variable +# pylint: disable=inconsistent-return-statements, import-outside-toplevel """CoreML frontend.""" from __future__ import absolute_import as _abs import math @@ -111,14 +112,13 @@ def _BatchnormLayerParams(op, inexpr, etab): if op.instanceNormalization: raise tvm.error.OpNotImplemented( 'Operator "instance normalization" is not supported in frontend CoreML.') - else: - params = {'gamma':etab.new_const(list(op.gamma.floatValue)), - 'beta':etab.new_const(list(op.beta.floatValue)), - 'moving_mean':etab.new_const(list(op.mean.floatValue)), - 'moving_var': etab.new_const(list(op.variance.floatValue)), - 'epsilon': op.epsilon} - result, moving_mean, moving_var = _op.nn.batch_norm(data=inexpr, **params) - return result + params = {'gamma':etab.new_const(list(op.gamma.floatValue)), + 'beta':etab.new_const(list(op.beta.floatValue)), + 'moving_mean':etab.new_const(list(op.mean.floatValue)), + 'moving_var': etab.new_const(list(op.variance.floatValue)), + 'epsilon': op.epsilon} + result, moving_mean, moving_var = _op.nn.batch_norm(data=inexpr, **params) + return result def _ActivationParams(op, inexpr, etab): @@ -197,37 +197,36 @@ def _PoolingLayerParams(op, inexpr, etab): raise tvm.error.OpNotImplemented( 'Only Max and Average Pooling are supported in frontend CoreML.') - else: - params = {'pool_size':list(op.kernelSize), - 'strides':list(op.stride)} - - if op.WhichOneof('PoolingPaddingType') == 'valid': - valid = op.valid - if valid.paddingAmounts.borderAmounts: - assert len(valid.paddingAmounts.borderAmounts) == 2 - pad_t = valid.paddingAmounts.borderAmounts[0].startEdgeSize - pad_l = valid.paddingAmounts.borderAmounts[1].startEdgeSize - pad_b = valid.paddingAmounts.borderAmounts[0].endEdgeSize - pad_r = valid.paddingAmounts.borderAmounts[1].endEdgeSize - if not all(v == 0 for v in (pad_t, pad_l, pad_b, pad_r)): - params['padding'] = [pad_t, pad_l, pad_b, pad_r] - elif op.WhichOneof('PoolingPaddingType') == 'includeLastPixel': - # I don't know if this is correct - valid = op.includeLastPixel - padding = list(valid.paddingAmounts) - params['padding'] = padding - params['ceil_mode'] = True - else: - msg = 'PoolingPaddingType {} is not supported in operator Pooling.' - op_name = op.WhichOneof('PoolingPaddingType') - raise tvm.error.OpAttributeUnImplemented(msg.format(op_name)) + params = {'pool_size':list(op.kernelSize), + 'strides':list(op.stride)} - if op.type == 0: - return _op.nn.max_pool2d(inexpr, **params) - if op.type == 1: - return _op.nn.avg_pool2d(inexpr, **params) - raise tvm.error.OpNotImplemented( - 'Only Max and Average Pooling are supported in CoreML.') + if op.WhichOneof('PoolingPaddingType') == 'valid': + valid = op.valid + if valid.paddingAmounts.borderAmounts: + assert len(valid.paddingAmounts.borderAmounts) == 2 + pad_t = valid.paddingAmounts.borderAmounts[0].startEdgeSize + pad_l = valid.paddingAmounts.borderAmounts[1].startEdgeSize + pad_b = valid.paddingAmounts.borderAmounts[0].endEdgeSize + pad_r = valid.paddingAmounts.borderAmounts[1].endEdgeSize + if not all(v == 0 for v in (pad_t, pad_l, pad_b, pad_r)): + params['padding'] = [pad_t, pad_l, pad_b, pad_r] + elif op.WhichOneof('PoolingPaddingType') == 'includeLastPixel': + # I don't know if this is correct + valid = op.includeLastPixel + padding = list(valid.paddingAmounts) + params['padding'] = padding + params['ceil_mode'] = True + else: + msg = 'PoolingPaddingType {} is not supported in operator Pooling.' + op_name = op.WhichOneof('PoolingPaddingType') + raise tvm.error.OpAttributeUnImplemented(msg.format(op_name)) + + if op.type == 0: + return _op.nn.max_pool2d(inexpr, **params) + if op.type == 1: + return _op.nn.avg_pool2d(inexpr, **params) + raise tvm.error.OpNotImplemented( + 'Only Max and Average Pooling are supported in CoreML.') def _SoftmaxLayerParams(op, inexpr, etab): @@ -297,10 +296,8 @@ def _PaddingLayerParams(op, inexpr, etab): (0, 0), (pad_t, pad_b), (pad_l, pad_r))) - - else: - raise tvm.error.OpNotImplemented( - 'Non-constant padding is not supported in frontend CoreML.') + raise tvm.error.OpNotImplemented( + 'Non-constant padding is not supported in frontend CoreML.') def _PermuteLayerParams(op, inexpr, etab): diff --git a/python/tvm/relay/frontend/keras.py b/python/tvm/relay/frontend/keras.py index 5458b517aafd4..740d600739068 100644 --- a/python/tvm/relay/frontend/keras.py +++ b/python/tvm/relay/frontend/keras.py @@ -14,9 +14,8 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. -# pylint: disable=invalid-name, import-self +# pylint: disable=invalid-name, import-self, import-outside-toplevel """Keras frontend.""" -from __future__ import absolute_import as _abs import sys import numpy as np import tvm @@ -133,7 +132,7 @@ def _convert_advanced_activation(inexpr, keras_layer, etab): # f(x) = max_value, for x >= max_value # f(x) = x, for threshold <= x < max_value return _op.clip(inexpr, a_min=0., a_max=float(keras_layer.max_value)) - elif keras_layer.max_value and _op.greater(threshold, inexpr).astype('float32'): + if keras_layer.max_value and _op.greater(threshold, inexpr).astype('float32'): # f(x) = negative_slope * (inexpr - threshold) negative_slope = _expr.const(keras_layer.negative_slope, dtype='float32') return _op.multiply(negative_slope, _op.subtract(inexpr, threshold)) diff --git a/python/tvm/relay/frontend/nnvm_common.py b/python/tvm/relay/frontend/nnvm_common.py index 5f24fa0a504e3..072c7ad3be395 100644 --- a/python/tvm/relay/frontend/nnvm_common.py +++ b/python/tvm/relay/frontend/nnvm_common.py @@ -16,15 +16,13 @@ # under the License. # pylint: disable=invalid-name, import-self, len-as-condition """Utility functions common to NNVM and MxNet conversion.""" -from __future__ import absolute_import as _abs - +import warnings from .. import expr as _expr from .. import op as _op from .common import get_relay_op from .common import infer_type as _infer_type def _warn_not_used(attr, op='nnvm'): - import warnings err = "{} is ignored in {}.".format(attr, op) warnings.warn(err) diff --git a/python/tvm/relay/frontend/onnx.py b/python/tvm/relay/frontend/onnx.py index ce7e01e96668d..e83ab1f729568 100644 --- a/python/tvm/relay/frontend/onnx.py +++ b/python/tvm/relay/frontend/onnx.py @@ -15,6 +15,7 @@ # specific language governing permissions and limitations # under the License. # pylint: disable=invalid-name, import-self, len-as-condition, unused-argument, too-many-lines +# pylint: disable=import-outside-toplevel """ONNX: Open Neural Network Exchange frontend for Relay.""" from __future__ import absolute_import as _abs diff --git a/python/tvm/relay/frontend/tensorflow.py b/python/tvm/relay/frontend/tensorflow.py index 8f7f4e63ec814..3aeb1d4f3d6d7 100644 --- a/python/tvm/relay/frontend/tensorflow.py +++ b/python/tvm/relay/frontend/tensorflow.py @@ -16,6 +16,7 @@ # specific language governing permissions and limitations # under the License. # pylint: disable=import-self, invalid-name, unused-argument, too-many-lines, len-as-condition, broad-except +# pylint: disable=import-outside-toplevel """TF: Tensorflow frontend.""" from __future__ import absolute_import as _abs from __future__ import print_function diff --git a/python/tvm/relay/frontend/tensorflow_parser.py b/python/tvm/relay/frontend/tensorflow_parser.py index d1c0bfe6a8474..943c24594a4d2 100644 --- a/python/tvm/relay/frontend/tensorflow_parser.py +++ b/python/tvm/relay/frontend/tensorflow_parser.py @@ -15,8 +15,8 @@ # specific language governing permissions and limitations # under the License. """TF: Tensorflow parser""" -from __future__ import absolute_import as _abs -from __future__ import print_function +# pylint: disable=import-outside-toplevel, assignment-from-no-return + import os from tvm.contrib import util diff --git a/python/tvm/relay/frontend/tflite.py b/python/tvm/relay/frontend/tflite.py index cefd4085b67ca..d3826b6ce52d9 100644 --- a/python/tvm/relay/frontend/tflite.py +++ b/python/tvm/relay/frontend/tflite.py @@ -14,7 +14,8 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. -# pylint: disable=invalid-name, unused-argument, too-many-lines +# pylint: disable=invalid-name, unused-argument, too-many-lines, import-outside-toplevel + """Tensorflow lite frontend.""" from __future__ import absolute_import as _abs import math @@ -1458,8 +1459,7 @@ def convert_pool2d(self, op, pool_type): raise tvm.error.OpNotImplemented( 'Operator {} with fused activation is not supported yet.' .format('qnn.op.pool2d')) - else: - out = self.convert_fused_activation_function(out, fused_activation_fn) + out = self.convert_fused_activation_function(out, fused_activation_fn) return out def convert_pad(self, op): diff --git a/python/tvm/relay/op/__init__.py b/python/tvm/relay/op/__init__.py index a089cab669c92..c2ec6ad2d22d7 100644 --- a/python/tvm/relay/op/__init__.py +++ b/python/tvm/relay/op/__init__.py @@ -46,6 +46,7 @@ def _register_op_make(): + # pylint: disable=import-outside-toplevel from . import _make from .. import expr expr._op_make = _make diff --git a/python/tvm/relay/op/_transform.py b/python/tvm/relay/op/_transform.py index 9f32c250ce6f9..8a4cb2f632c65 100644 --- a/python/tvm/relay/op/_transform.py +++ b/python/tvm/relay/op/_transform.py @@ -200,13 +200,12 @@ def take_shape_func(attrs, inputs, out_ndims): """ if attrs.axis is None: return [_take_no_axis_shape_func(inputs[1], out_ndims[0])] - else: - axis = get_const_int(attrs.axis) - data_ndim = int(inputs[0].shape[0]) - if axis < 0: - axis += data_ndim - assert 0 <= axis < data_ndim - return [_take_with_axis_shape_func(*inputs, convert(axis), out_ndims[0])] + axis = get_const_int(attrs.axis) + data_ndim = int(inputs[0].shape[0]) + if axis < 0: + axis += data_ndim + assert 0 <= axis < data_ndim + return [_take_with_axis_shape_func(*inputs, convert(axis), out_ndims[0])] @script def _argwhere_shape_func_1d(condition): @@ -275,13 +274,13 @@ def argwhere_shape_func(attrs, inputs, out_ndims): """ if len(inputs[0].shape) == 1: return [_argwhere_shape_func_1d(inputs[0])] - elif len(inputs[0].shape) == 2: + if len(inputs[0].shape) == 2: return [_argwhere_shape_func_2d(inputs[0])] - elif len(inputs[0].shape) == 3: + if len(inputs[0].shape) == 3: return [_argwhere_shape_func_3d(inputs[0])] - elif len(inputs[0].shape) == 4: + if len(inputs[0].shape) == 4: return [_argwhere_shape_func_4d(inputs[0])] - elif len(inputs[0].shape) == 5: + if len(inputs[0].shape) == 5: return [_argwhere_shape_func_5d(inputs[0])] return ValueError("Does not support rank higher than 5 in argwhere") diff --git a/python/tvm/relay/op/nn/_nn.py b/python/tvm/relay/op/nn/_nn.py index 8189967a9d03d..fcd9e99d1440d 100644 --- a/python/tvm/relay/op/nn/_nn.py +++ b/python/tvm/relay/op/nn/_nn.py @@ -14,7 +14,7 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. -# pylint: disable=no-else-return, invalid-name, unused-argument, too-many-arguments +# pylint: disable=no-else-return, invalid-name, unused-argument, too-many-arguments, consider-using-in """Backend compiler related feature registration""" from __future__ import absolute_import @@ -265,6 +265,7 @@ def schedule_conv2d(attrs, outs, target): @reg.register_alter_op_layout("nn.conv2d") def alter_op_layout_conv2d(attrs, inputs, tinfos): """Alternate the layout of conv2d""" + # pylint: disable=import-outside-toplevel from ... import op return topi.nn.conv2d_alter_layout(attrs, inputs, tinfos, op) @@ -309,7 +310,7 @@ def convert_conv2d(attrs, inputs, tinfos, desired_layout): result : tvm.relay.Expr The transformed expr """ - + # pylint: disable=import-outside-toplevel from tvm import relay data_layout = attrs['data_layout'] kernel_layout = attrs['kernel_layout'] diff --git a/python/tvm/relay/op/transform.py b/python/tvm/relay/op/transform.py index 0595f75b0fe77..710d203eccc69 100644 --- a/python/tvm/relay/op/transform.py +++ b/python/tvm/relay/op/transform.py @@ -14,6 +14,8 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. + +# pylint: disable=import-outside-toplevel """Transform operators.""" from . import _make diff --git a/python/tvm/relay/parser.py b/python/tvm/relay/parser.py index 0244debe7a8ba..6c4e3131e3c26 100644 --- a/python/tvm/relay/parser.py +++ b/python/tvm/relay/parser.py @@ -22,6 +22,7 @@ @register_func("relay.fromtext") def fromtext(data, source_name=None): """Parse a Relay program.""" + # pylint: disable=import-outside-toplevel from tvm.relay import _parser x = _parser.fromtext(data + "\n", source_name) if x is None: diff --git a/python/tvm/relay/qnn/op/op.py b/python/tvm/relay/qnn/op/op.py index 505f0473d43f7..6da15ebb479eb 100644 --- a/python/tvm/relay/qnn/op/op.py +++ b/python/tvm/relay/qnn/op/op.py @@ -16,7 +16,7 @@ # under the License. #pylint: disable=unused-argument """The register functions for the QNN dialect.""" -from tvm.relay.op.op import register as register +from tvm.relay.op.op import register def register_qnn_legalize(op_name, legal_op=None, level=10): """Register legal transformation function for a QNN op diff --git a/python/tvm/relay/quantize/_partition.py b/python/tvm/relay/quantize/_partition.py index 1180d8360754b..c6a621db368a9 100644 --- a/python/tvm/relay/quantize/_partition.py +++ b/python/tvm/relay/quantize/_partition.py @@ -88,7 +88,7 @@ def add_partition_generic(ref_call, new_args, ctx): lhs = new_args[0].realize() rhs = new_args[1].realize() return _forward_op(ref_call, [lhs, rhs]) - elif not lhs_cond and rhs_cond: + if not lhs_cond and rhs_cond: # - introduced by residual connection in ResNet # ... # %13 = nn.conv2d(%12, %meta[relay.Constant]) @@ -104,7 +104,7 @@ def add_partition_generic(ref_call, new_args, ctx): # ... rhs = new_args[1].realize() return _forward_op(ref_call, [lhs, rhs]) - elif lhs_cond and not rhs_cond: + if lhs_cond and not rhs_cond: if _analysis.check_constant(rhs): # - introduced by batch_norm: add(out, bias) return QPartitionExpr(_forward_op(ref_call, [lhs, rhs])) @@ -121,11 +121,11 @@ def add_partition_generic(ref_call, new_args, ctx): # ... lhs = new_args[0].realize() return _forward_op(ref_call, [lhs, rhs]) - elif not lhs_cond and not rhs_cond: + if not lhs_cond and not rhs_cond: # trivial case return None - else: - raise ValueError + + raise ValueError # TODO(ziheng) enhance `register_partition_function` to dispatch diff --git a/python/tvm/relay/quantize/quantize.py b/python/tvm/relay/quantize/quantize.py index a9d877cecd519..be8a3a3233167 100644 --- a/python/tvm/relay/quantize/quantize.py +++ b/python/tvm/relay/quantize/quantize.py @@ -14,7 +14,7 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. -#pylint: disable=unused-argument +#pylint: disable=unused-argument, not-context-manager """Automatic quantization toolkit.""" from __future__ import absolute_import from . import _quantize diff --git a/python/tvm/relay/scope_builder.py b/python/tvm/relay/scope_builder.py index 43c653203c814..cd8dc8dcd3096 100644 --- a/python/tvm/relay/scope_builder.py +++ b/python/tvm/relay/scope_builder.py @@ -41,8 +41,7 @@ def __enter__(self): def __exit__(self, ptype, value, trace): if value: raise value - else: - self._exit_cb() + self._exit_cb() def _make_lets(bindings, ret_value): """Make a nested let expressions. diff --git a/python/tvm/relay/testing/darknet.py b/python/tvm/relay/testing/darknet.py index 091600db30362..ab94ecd6d2a94 100644 --- a/python/tvm/relay/testing/darknet.py +++ b/python/tvm/relay/testing/darknet.py @@ -14,7 +14,7 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. -# pylint: disable=invalid-name, unused-variable, unused-argument, no-init +# pylint: disable=invalid-name, unused-variable, unused-argument, no-init, unpacking-non-sequence """ Compile DarkNet Models ==================== diff --git a/python/tvm/relay/testing/resnet.py b/python/tvm/relay/testing/resnet.py index a8e369b740219..bde788e1f9b9d 100644 --- a/python/tvm/relay/testing/resnet.py +++ b/python/tvm/relay/testing/resnet.py @@ -85,24 +85,25 @@ def residual_unit(data, data=act1, channels=num_filter, kernel_size=(1, 1), strides=stride, name=name+'_sc') return relay.add(conv3, shortcut) + + bn1 = layers.batch_norm_infer(data=data, epsilon=2e-5, name=name + '_bn1') + act1 = relay.nn.relu(data=bn1) + conv1 = layers.conv2d( + data=act1, channels=num_filter, kernel_size=(3, 3), + strides=stride, padding=(1, 1), name=name + '_conv1') + bn2 = layers.batch_norm_infer(data=conv1, epsilon=2e-5, name=name + '_bn2') + act2 = relay.nn.relu(data=bn2) + conv2 = layers.conv2d( + data=act2, channels=num_filter, kernel_size=(3, 3), + strides=(1, 1), padding=(1, 1), name=name + '_conv2') + + if dim_match: + shortcut = data else: - bn1 = layers.batch_norm_infer(data=data, epsilon=2e-5, name=name + '_bn1') - act1 = relay.nn.relu(data=bn1) - conv1 = layers.conv2d( - data=act1, channels=num_filter, kernel_size=(3, 3), - strides=stride, padding=(1, 1), name=name + '_conv1') - bn2 = layers.batch_norm_infer(data=conv1, epsilon=2e-5, name=name + '_bn2') - act2 = relay.nn.relu(data=bn2) - conv2 = layers.conv2d( - data=act2, channels=num_filter, kernel_size=(3, 3), - strides=(1, 1), padding=(1, 1), name=name + '_conv2') - if dim_match: - shortcut = data - else: - shortcut = layers.conv2d( - data=act1, channels=num_filter, kernel_size=(1, 1), - strides=stride, name=name+'_sc') - return relay.add(conv2, shortcut) + shortcut = layers.conv2d( + data=act1, channels=num_filter, kernel_size=(1, 1), + strides=stride, name=name+'_sc') + return relay.add(conv2, shortcut) def resnet(units, diff --git a/python/tvm/relay/testing/tf.py b/python/tvm/relay/testing/tf.py index e3d6e7df0b98b..1dbbf14b41f34 100644 --- a/python/tvm/relay/testing/tf.py +++ b/python/tvm/relay/testing/tf.py @@ -14,7 +14,7 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. -# pylint: disable=invalid-name, unused-variable, unused-argument, no-init +# pylint: disable=invalid-name, unused-variable, unused-argument, no-init, import-outside-toplevel """ Tensorflow Model Helpers ======================== @@ -346,7 +346,7 @@ def get_workload_ptb(): sample_data_file = 'simple-examples.tgz' sample_url = sample_repo+sample_data_file ptb_model_file = 'RNN/ptb/ptb_model_with_lstmblockcell.pb' - + # pylint: disable=import-outside-toplevel import tarfile file_path = download_testdata(sample_url, sample_data_file, module=['data', 'ptb_data']) dir_path = os.path.dirname(file_path) diff --git a/python/tvm/relay/testing/yolo_detection.py b/python/tvm/relay/testing/yolo_detection.py index bdf9efe62de42..d0a675f17a90a 100644 --- a/python/tvm/relay/testing/yolo_detection.py +++ b/python/tvm/relay/testing/yolo_detection.py @@ -14,7 +14,7 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. -# pylint: disable=invalid-name, unused-variable, unused-argument, no-init +# pylint: disable=invalid-name, unused-variable, unused-argument, no-init, """ Yolo detection boxes helper functions ==================== @@ -224,6 +224,7 @@ def _draw_label(im, r, c, label, rgb): _set_pixel(im, i+c, j+r, k, val)#rgb[k] * val) def _get_label(font_path, labelstr, rgb): + # pylint: disable=import-outside-toplevel from PIL import Image from PIL import ImageDraw from PIL import ImageFont diff --git a/python/tvm/rpc/proxy.py b/python/tvm/rpc/proxy.py index 02d109c18738f..c3a3647948eea 100644 --- a/python/tvm/rpc/proxy.py +++ b/python/tvm/rpc/proxy.py @@ -508,8 +508,7 @@ def __init__(self, except socket.error as sock_err: if sock_err.errno in [98, 48]: continue - else: - raise sock_err + raise sock_err if not self.port: raise ValueError("cannot bind to any port in [%d, %d)" % (port, port_end)) logging.info("RPCProxy: client port bind to %s:%d", host, self.port) @@ -569,7 +568,7 @@ def _connect(key): magic = struct.unpack(' 2: ops = [max_elem.op, expsum.op, softmax.op] - if exp != None: + if exp is not None: ops.append(exp.op) - + for op in ops: s = schedule_injective_from_existing(s, op.output(0)) else: @@ -64,7 +64,7 @@ def schedule_softmax(outs): block_x = tvm.thread_axis("blockIdx.x") thread_x = tvm.thread_axis((0, num_thread), "threadIdx.x") - if exp != None: + if exp is not None: s[exp].bind(exp.op.axis[0], block_x) s[max_elem].bind(max_elem.op.axis[0], block_x) diff --git a/topi/python/topi/cuda/sort.py b/topi/python/topi/cuda/sort.py index b02c14b47e60d..0e7a23eb14ab1 100644 --- a/topi/python/topi/cuda/sort.py +++ b/topi/python/topi/cuda/sort.py @@ -42,6 +42,7 @@ def _schedule_sort(outs): outs = [outs] if isinstance(outs, tvm.tensor.Tensor) else outs s = tvm.create_schedule([x.op for x in outs]) scheduled_ops = [] + # pylint: disable=import-outside-toplevel from .injective import schedule_injective_from_existing def traverse(op): if tag.is_injective(op.tag): diff --git a/topi/python/topi/cuda/vision.py b/topi/python/topi/cuda/vision.py index 3a90402707adf..2df273ff50e3d 100644 --- a/topi/python/topi/cuda/vision.py +++ b/topi/python/topi/cuda/vision.py @@ -14,7 +14,7 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. -# pylint: disable=invalid-name, unused-variable, unused-argument, no-member +# pylint: disable=invalid-name, unused-variable, unused-argument, no-member, import-outside-toplevel """Schedule for vision operators""" from __future__ import absolute_import as _abs import tvm diff --git a/topi/python/topi/hls/nn.py b/topi/python/topi/hls/nn.py index 6b925c8659471..d73cb9c847f75 100644 --- a/topi/python/topi/hls/nn.py +++ b/topi/python/topi/hls/nn.py @@ -275,7 +275,7 @@ def schedule_softmax(outs): raise ValueError('Tag is expected to be softmax_output or log_softmax_output. \ Got {0}'.format(op_tag)) - if exp != None: + if exp is not None: s[exp].compute_at(s[softmax], s[softmax].op.axis[1]) s[expsum].compute_at(s[softmax], s[softmax].op.axis[1]) diff --git a/topi/python/topi/intel_graphics/conv2d.py b/topi/python/topi/intel_graphics/conv2d.py index 128db22fa4849..4d1a4b3370dbf 100644 --- a/topi/python/topi/intel_graphics/conv2d.py +++ b/topi/python/topi/intel_graphics/conv2d.py @@ -38,17 +38,17 @@ def _get_default_config(cfg, data, kernel, strides, padding, out_dtype, is_depthwise=False): if is_depthwise: raise RuntimeError("Depthwise not supported for intel graphics.") - else: - batch_size, in_channel, height, width = get_const_tuple(data.shape) - out_channel, _, hkernel, _ = get_const_tuple(kernel.shape) - HSTR, _ = strides - ic_bn = 1 - oc_bn, oc_bn_upper = 16, 16 - for i in range(oc_bn_upper, 0, -1): - if out_channel % i == 0: - oc_bn = i - break + batch_size, in_channel, height, width = get_const_tuple(data.shape) + out_channel, _, hkernel, _ = get_const_tuple(kernel.shape) + HSTR, _ = strides + + ic_bn = 1 + oc_bn, oc_bn_upper = 16, 16 + for i in range(oc_bn_upper, 0, -1): + if out_channel % i == 0: + oc_bn = i + break if HSTR == 2: if out_channel + hkernel == 515: @@ -189,7 +189,7 @@ def __topi_nn_conv2d_NCHWc(*args, **kwargs): @conv2d_alter_layout.register(["intel_graphics"]) def _alter_conv2d_layout(attrs, inputs, tinfo, F): - copy_inputs = [s for s in inputs] + copy_inputs = list(inputs) new_attrs = {k : attrs[k] for k in attrs.keys()} if F.__name__ == 'tvm.relay.op': diff --git a/topi/python/topi/nn/bitserial_util.py b/topi/python/topi/nn/bitserial_util.py index 09a301f7c962d..def5b5e2e1935 100644 --- a/topi/python/topi/nn/bitserial_util.py +++ b/topi/python/topi/nn/bitserial_util.py @@ -60,7 +60,7 @@ def _bitpack(*indices): for i in range(n+1): if i == bit_axis: continue - elif i == pack_axis: + if i == pack_axis: idx[j] = indices[i] * data_width + k else: idx[j] = indices[i] @@ -88,4 +88,3 @@ def binary_op_multiplier(pack_dtype): pack_dtype: string pack type for the operator (must be a uint)""" return int(pack_dtype[4:]) - \ No newline at end of file diff --git a/topi/python/topi/nn/conv2d.py b/topi/python/topi/nn/conv2d.py index 664a293d56ae7..046c48e7d87cd 100644 --- a/topi/python/topi/nn/conv2d.py +++ b/topi/python/topi/nn/conv2d.py @@ -66,9 +66,9 @@ def conv2d(input, filter, strides, padding, dilation, layout='NCHW', out_dtype=N # default declaration if layout == 'NCHW': return conv2d_nchw(input, filter, strides, padding, dilation, out_dtype) - elif layout == 'HWCN': + if layout == 'HWCN': return conv2d_hwcn(input, filter, strides, padding, dilation, out_dtype) - elif layout == 'NHWC': + if layout == 'NHWC': return conv2d_nhwc(input, filter, strides, padding, dilation, out_dtype) raise ValueError("not support this layout {} yet".format(layout)) @@ -764,6 +764,7 @@ def conv2d_winograd_nnpack_weight_transform(kernel, convolution_algorithm, out_d output : tvm.Tensor 4-D with shape [alpha, alpha, CO, CI] """ + # pylint: disable=import-outside-toplevel from tvm.contrib import nnpack return nnpack.convolution_inference_weight_transform( kernel, algorithm=convolution_algorithm, dtype=out_dtype) diff --git a/topi/python/topi/nn/fifo_buffer.py b/topi/python/topi/nn/fifo_buffer.py index 23503a2f3e873..946b8d1e31807 100644 --- a/topi/python/topi/nn/fifo_buffer.py +++ b/topi/python/topi/nn/fifo_buffer.py @@ -76,7 +76,7 @@ def fifo_buffer(data, buffer, axis): buffer[i + data_size], data[i - buflen + data_size]), name='new_buffer') - elif len(buffer.shape) == 2: + if len(buffer.shape) == 2: if axis == 0: return tvm.compute(buffer.shape, lambda i, j: diff --git a/topi/python/topi/opengl/softmax.py b/topi/python/topi/opengl/softmax.py index 96218e04e60ba..e343d4513241c 100644 --- a/topi/python/topi/opengl/softmax.py +++ b/topi/python/topi/opengl/softmax.py @@ -51,7 +51,7 @@ def schedule_softmax(outs): raise ValueError('Tag is expected to be softmax_output or log_softmax_output. \ Got {0}'.format(op_tag)) - if exp != None: + if exp is not None: s[exp].opengl() s[max_elem].opengl() diff --git a/topi/python/topi/testing/one_hot.py b/topi/python/topi/testing/one_hot.py index 99c52be65c749..05834e3a73034 100644 --- a/topi/python/topi/testing/one_hot.py +++ b/topi/python/topi/testing/one_hot.py @@ -62,7 +62,7 @@ def one_hot(indices, on_value, off_value, depth, axis, dtype): indices_index += 1 out = np.empty(oshape) - output_indices = [index for index in np.ndindex(out.shape)] + output_indices = list(np.ndindex(out.shape)) for output_index in output_indices: indices_indices = [] for i, out_idx in enumerate(output_index): diff --git a/topi/python/topi/transform.py b/topi/python/topi/transform.py index 41bf2e893b4dd..bdeb22304b076 100644 --- a/topi/python/topi/transform.py +++ b/topi/python/topi/transform.py @@ -238,13 +238,10 @@ def _select(*indices): from_val = [] index_tuple = [] for i in range(n): - from_val.append( - within_index(begin[i], end[i], strides[i], indices[i])) + from_val.append(within_index(begin[i], end[i], strides[i], indices[i])) index_tuple.append( make_idx(begin[i], end[i], strides[i], a.shape[i], indices[i])) - return tvm.if_then_else(tvm.all(*from_val), - v(*index_tuple), - a(*indices)) + return tvm.if_then_else(tvm.all(*from_val), v(*index_tuple), a(*indices)) return tvm.compute(a.shape, _select, name="strided_set") @@ -568,7 +565,7 @@ def sequence_mask(data, valid_length, mask_value=0, axis=0): assert len(data.shape) >= 2,\ "only support data.ndim >= 2, received data.shape = {}".format(data.shape) - assert axis == 0 or axis == 1, "only support axis = 0, 1, received axis = {}".format(axis) + assert axis in (0, 1), "only support axis = 0, 1, received axis = {}".format(axis) return cpp.sequence_mask(data, valid_length, mask_value, axis) diff --git a/topi/python/topi/util.py b/topi/python/topi/util.py index 4c4aabfad0c15..f28bf012c97c2 100644 --- a/topi/python/topi/util.py +++ b/topi/python/topi/util.py @@ -25,7 +25,6 @@ class InvalidShapeError(ValueError): """Invalid shape for a topi function. i.e. call winograd template for non-3x3 kernel)""" - pass def nchw_pack_layout(layout_info): """Check whether the layout type is NCHWinic""" @@ -350,7 +349,7 @@ def get_shape(src_shape, src_layout, dst_layout): layout_mapping = bijective_layout(src_layout, dst_layout) dst_indices = layout_mapping.forward_index( - tvm.convert([i for i in range(len(src_layout))])) + tvm.convert(range(len(src_layout)))) return get_const_tuple(tuple([src_shape[i.value] for i in dst_indices])) diff --git a/topi/python/topi/vision/rcnn/proposal.py b/topi/python/topi/vision/rcnn/proposal.py index 507d464e081bb..d48c89078ec0d 100644 --- a/topi/python/topi/vision/rcnn/proposal.py +++ b/topi/python/topi/vision/rcnn/proposal.py @@ -14,7 +14,7 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. -# pylint: disable=invalid-name, singleton-comparison +# pylint: disable=invalid-name, singleton-comparison, bad-continuation """Proposal operator""" import math import tvm @@ -303,7 +303,7 @@ def prepare_output_ir(sorted_bbox_buf, remove_mask_buf, out_buf): with ib.for_range(0, batch) as b: with ib.if_scope(nkeep[b] > 0): with ib.for_range(0, tvm.ceil( - tvm.const(rpn_post_nms_top_n, 'float32') / nkeep[b]).astype('int32')): + tvm.const(rpn_post_nms_top_n, 'float32') / nkeep[b]).astype('int32')): with ib.for_range(0, num_bbox) as j: offset_j = (b * num_bbox + j) * 5 offset_i = (b * rpn_post_nms_top_n + i[b]) * 5 diff --git a/topi/python/topi/x86/conv2d.py b/topi/python/topi/x86/conv2d.py index 8a6b57eb9e66b..1ba4f68be6c4c 100644 --- a/topi/python/topi/x86/conv2d.py +++ b/topi/python/topi/x86/conv2d.py @@ -14,7 +14,7 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. -# pylint: disable=invalid-name,unused-variable,unused-argument,no-member +# pylint: disable=invalid-name,unused-variable,unused-argument,no-member,import-outside-toplevel """Conv2D schedule on x86""" import logging @@ -126,7 +126,7 @@ def _declaration_conv(cfg, data, kernel, strides, padding, dilation, layout, out # # specialize for INT8 1X1 conv on X86 # return conv2d_avx_1x1._declaration_conv_nhwc_pack(cfg, data, kernel, strides, # padding, dilation, out_dtype) - elif layout == 'NHWC': + if layout == 'NHWC': return nn.conv2d_nhwc(data, kernel, strides, padding, dilation, out_dtype) raise ValueError("not support this layout {} yet".format(layout)) diff --git a/topi/python/topi/x86/conv2d_alter_op.py b/topi/python/topi/x86/conv2d_alter_op.py index e4bc3cc5f74f7..cd612c34e5a20 100644 --- a/topi/python/topi/x86/conv2d_alter_op.py +++ b/topi/python/topi/x86/conv2d_alter_op.py @@ -63,7 +63,7 @@ def _alter_conv2d_layout(attrs, inputs, tinfo, F): is_depthwise = groups == kshape[0] and kshape[1] == 1 # Save the input exprs. - copy_inputs = [s for s in inputs] + copy_inputs = list(inputs) # Set the new attrs new_attrs = {k : attrs[k] for k in attrs.keys()} diff --git a/topi/python/topi/x86/conv2d_int8.py b/topi/python/topi/x86/conv2d_int8.py index 79527a747d0bd..1701643844e10 100644 --- a/topi/python/topi/x86/conv2d_int8.py +++ b/topi/python/topi/x86/conv2d_int8.py @@ -14,7 +14,7 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. -# pylint: disable=invalid-name,unused-variable,unused-argument,no-member +# pylint: disable=invalid-name,unused-variable,unused-argument,no-member, import-outside-toplevel """Conv2D int8 schedule on x86""" import re @@ -70,7 +70,7 @@ def _is_int8_hw_support(data_dtype, kernel_dtype): # 3) Check target mcpu = tvm.target.current_target().mcpu is_target_support = False - if mcpu == 'skylake-avx512' or mcpu == 'cascadelake': + if mcpu in ('skylake-avx512', 'cascadelake'): is_target_support = True return is_dtype_support and is_llvm_support and is_target_support diff --git a/topi/python/topi/x86/nn.py b/topi/python/topi/x86/nn.py index 8e506da1d8bfc..45cb17e5c7b33 100644 --- a/topi/python/topi/x86/nn.py +++ b/topi/python/topi/x86/nn.py @@ -63,7 +63,7 @@ def schedule_softmax(outs): s[max_elem].compute_at(s[softmax], fused_outer_axes) s[expsum].compute_at(s[softmax], fused_outer_axes) - if exp != None: + if exp is not None: s[exp].compute_at(s[softmax], fused_outer_axes) return s diff --git a/topi/python/topi/x86/util.py b/topi/python/topi/x86/util.py index 00f297e4307f2..aff37aa362025 100644 --- a/topi/python/topi/x86/util.py +++ b/topi/python/topi/x86/util.py @@ -21,6 +21,6 @@ def get_fp32_len(): mcpu = tvm.target.current_target().mcpu fp32_vec_len = 8 - if mcpu == 'skylake-avx512' or mcpu == 'cascadelake': + if mcpu in ('skylake-avx512', 'cascadelake'): fp32_vec_len = 16 return fp32_vec_len diff --git a/vta/python/vta/bitstream.py b/vta/python/vta/bitstream.py index b3d7df49328ed..3ee39ca0cb88c 100644 --- a/vta/python/vta/bitstream.py +++ b/vta/python/vta/bitstream.py @@ -79,11 +79,10 @@ def download_bitstream(): $VTA_CACHE_PATH. Alternatively edit your config.json back to its default \ settings. You can see the list of available bitstreams under {}" .format(url, BITSTREAM_URL)) - else: - raise RuntimeError( - # This could happen when trying to access the URL behind a proxy - "Something went wrong when trying to access {}. Check your \ + raise RuntimeError( + # This could happen when trying to access the URL behind a proxy + "Something went wrong when trying to access {}. Check your \ internet connection or proxy settings." - .format(url)) + .format(url)) return success diff --git a/vta/python/vta/environment.py b/vta/python/vta/environment.py index 3a3323ed8493c..83db6121ed558 100644 --- a/vta/python/vta/environment.py +++ b/vta/python/vta/environment.py @@ -231,9 +231,9 @@ def target_host(self): """The target host""" if self.TARGET in ["pynq", "de10nano"]: return "llvm -target=armv7-none-linux-gnueabihf" - elif self.TARGET == "ultra96": + if self.TARGET == "ultra96": return "llvm -target=aarch64-linux-gnu" - elif self.TARGET in ["sim", "tsim"]: + if self.TARGET in ["sim", "tsim"]: return "llvm" raise ValueError("Unknown target %s" % self.TARGET) diff --git a/vta/python/vta/exec/rpc_server.py b/vta/python/vta/exec/rpc_server.py index be9d91a006de0..5586323061115 100644 --- a/vta/python/vta/exec/rpc_server.py +++ b/vta/python/vta/exec/rpc_server.py @@ -66,6 +66,7 @@ def ext_dev_callback(): @tvm.register_func("tvm.contrib.vta.init", override=True) def program_fpga(file_name): + # pylint: disable=import-outside-toplevel from pynq import xlnk # Reset xilinx driver xlnk.Xlnk().xlnk_reset() diff --git a/vta/python/vta/ir_pass.py b/vta/python/vta/ir_pass.py index dbce9a7b91022..e42e3a0751dd5 100644 --- a/vta/python/vta/ir_pass.py +++ b/vta/python/vta/ir_pass.py @@ -15,9 +15,7 @@ # specific language governing permissions and limitations # under the License. """Additional IR Pass for VTA""" -# pylint: disable=len-as-condition -from __future__ import absolute_import as _abs - +# pylint: disable=len-as-condition, no-else-return import tvm from topi import util diff --git a/vta/python/vta/program_bitstream.py b/vta/python/vta/program_bitstream.py index e735a4cf252c5..7d2c4e38db3e1 100644 --- a/vta/python/vta/program_bitstream.py +++ b/vta/python/vta/program_bitstream.py @@ -43,6 +43,7 @@ def main(): bitstream_program(args.target, args.bitstream) def pynq_bitstream_program(bitstream_path): + # pylint: disable=import-outside-toplevel from pynq import Bitstream bitstream = Bitstream(bitstream_path) bitstream.download() diff --git a/vta/python/vta/top/graphpack.py b/vta/python/vta/top/graphpack.py index ba139a8b5ace3..b14f937b35df0 100644 --- a/vta/python/vta/top/graphpack.py +++ b/vta/python/vta/top/graphpack.py @@ -151,14 +151,12 @@ def visit_call(self, call): assert not self.start_pack self.start_pack = True return _pack_batch_channel(args[0], oshape, self.bfactor, self.cfactor) - elif call.op == self.bitpack_end: + if call.op == self.bitpack_end: if self.start_pack: self.start_pack = False data = args[0] data_shape = _get_shape(call.args[0]) return _unpack_batch_channel(data, data_shape) - else: - pass if self.start_pack: # Operator cases if call.op == self.conv2d and odtype == 'int32': @@ -188,7 +186,8 @@ def visit_call(self, call): kernel_layout=kernel_layout, out_dtype=call.attrs.out_dtype) return conv2d - elif call.op == self.conv2d_transpose and odtype == 'int32': + + if call.op == self.conv2d_transpose and odtype == 'int32': self.number_of_conv2d += 1 assert 8 % self.weight_bits == 0 w_lanes = 8 // self.weight_bits @@ -213,7 +212,7 @@ def visit_call(self, call): output_padding=call.attrs.output_padding, out_dtype=call.attrs.out_dtype) return conv2d - elif call.op == self.add and \ + if call.op == self.add and \ tuple(input_types[0].shape) == tuple(input_types[1].shape): pass elif call.op == self.add and len(input_types[1].shape) == 3: @@ -272,7 +271,7 @@ def _recursion(anf, start_found, stop_found, operator_current_idx): _recursion(anf.body, start_found, stop_found, operator_current_idx), anf.ret_type, anf.type_params, anf.attrs) - elif isinstance(anf, relay.expr.Let): + if isinstance(anf, relay.expr.Let): value = anf.value if isinstance(value, relay.expr.Call): if isinstance(value.op, relay.op.Op): diff --git a/vta/python/vta/top/op.py b/vta/python/vta/top/op.py index 6aca07ed655c7..ae77f00fb8a9f 100644 --- a/vta/python/vta/top/op.py +++ b/vta/python/vta/top/op.py @@ -127,10 +127,9 @@ def compute_conv2d_transpose(attrs, inputs, output_type, target): if is_packed_layout(layout): return [topi.nn.conv2d_transpose_nchw( inputs[0], inputs[1], strides, padding, out_dtype)] - else: - # If it's not packed, run on ARM CPU - with tvm.target.arm_cpu(tvm.target.current_target().model): - return _nn.compute_conv2d_transpose(attrs, inputs, output_type, target) + # If it's not packed, run on ARM CPU + with tvm.target.arm_cpu(tvm.target.current_target().model): + return _nn.compute_conv2d_transpose(attrs, inputs, output_type, target) # If VTA is not the target, default to _nn def return _nn.compute_conv2d_transpose(attrs, inputs, output_type, target) @@ -145,10 +144,9 @@ def schedule_conv2d_transpose(attrs, outputs, target): if target.device_name == "vta": if is_packed_layout(layout): return topi.nn.schedule_conv2d_transpose_nchw(outputs) - else: - # If it's not packed, run on ARM CPU - with tvm.target.arm_cpu(tvm.target.current_target().model): - return _nn.schedule_conv2d_transpose(attrs, outputs, tvm.target.current_target()) + # If it's not packed, run on ARM CPU + with tvm.target.arm_cpu(tvm.target.current_target().model): + return _nn.schedule_conv2d_transpose(attrs, outputs, tvm.target.current_target()) # If VTA is not the target, default to _nn def return _nn.schedule_conv2d_transpose(attrs, outputs, tvm.target.current_target()) diff --git a/vta/scripts/tune_conv2d.py b/vta/scripts/tune_conv2d.py index 87f790932b91c..2780f26ca57cc 100644 --- a/vta/scripts/tune_conv2d.py +++ b/vta/scripts/tune_conv2d.py @@ -23,7 +23,6 @@ import tvm from tvm import autotvm -from tvm.contrib.util import get_lower_ir import topi import vta import vta.testing diff --git a/vta/scripts/tune_conv2d_transpose.py b/vta/scripts/tune_conv2d_transpose.py index 3e51d410638b2..f779b76f82775 100644 --- a/vta/scripts/tune_conv2d_transpose.py +++ b/vta/scripts/tune_conv2d_transpose.py @@ -23,7 +23,6 @@ import tvm from tvm import autotvm -from tvm.contrib.util import get_lower_ir import topi import vta import vta.testing diff --git a/vta/scripts/tune_dense.py b/vta/scripts/tune_dense.py index 7b7ff5bd77b34..7813b00fc878a 100644 --- a/vta/scripts/tune_dense.py +++ b/vta/scripts/tune_dense.py @@ -23,7 +23,6 @@ import tvm from tvm import autotvm -from tvm.contrib.util import get_lower_ir import topi import vta import vta.testing diff --git a/vta/scripts/tune_group_conv2d.py b/vta/scripts/tune_group_conv2d.py index 6a542ddd39161..c578090e26aa3 100644 --- a/vta/scripts/tune_group_conv2d.py +++ b/vta/scripts/tune_group_conv2d.py @@ -23,7 +23,6 @@ import tvm from tvm import autotvm -from tvm.contrib.util import get_lower_ir import topi import vta import vta.testing