From 84359a97bdb896ddd2a74c65631a00da1ec5ec70 Mon Sep 17 00:00:00 2001 From: Masahiro Hiramori <8973217+mshr-h@users.noreply.github.com> Date: Tue, 23 Feb 2021 07:45:35 +0900 Subject: [PATCH] Fixed minor misspelling (#7499) Co-authored-by: mshr-h --- include/tvm/ir/attrs.h | 2 +- include/tvm/runtime/packed_func.h | 2 +- include/tvm/topi/einsum.h | 2 +- nnvm/src/core/symbolic.cc | 4 ++-- python/tvm/micro/contrib/zephyr.py | 2 +- python/tvm/relay/frontend/coreml.py | 2 +- python/tvm/relay/testing/resnet.py | 2 +- python/tvm/relay/testing/resnet_3d.py | 2 +- python/tvm/relay/transform/transform.py | 2 +- python/tvm/tir/stmt.py | 2 +- python/tvm/tir/transform/function_pass.py | 2 +- python/tvm/topi/arm_cpu/depthwise_conv2d.py | 2 +- python/tvm/topi/cuda/batch_matmul_tensorcore.py | 2 +- python/tvm/topi/cuda/rcnn/proposal.py | 2 +- python/tvm/topi/nn/batch_matmul.py | 2 +- python/tvm/topi/random/kernel.py | 2 +- python/tvm/topi/testing/strided_slice_python.py | 4 ++-- python/tvm/topi/utils.py | 2 +- python/tvm/topi/vision/rcnn/proposal.py | 2 +- python/tvm/topi/x86/batch_matmul.py | 6 +++--- python/tvm/topi/x86/conv2d_avx_1x1.py | 2 +- src/relay/ir/dataflow_matcher.cc | 2 +- src/relay/ir/indexed_graph.cc | 4 ++-- src/relay/transforms/partition_graph.cc | 2 +- src/runtime/c_runtime_api.cc | 2 +- src/tir/transforms/hoist_if_then_else.cc | 2 +- tests/python/frontend/mxnet/model_zoo/resnet.py | 2 +- tests/python/frontend/onnx/test_forward.py | 2 +- 28 files changed, 33 insertions(+), 33 deletions(-) diff --git a/include/tvm/ir/attrs.h b/include/tvm/ir/attrs.h index 13bfd715cdfb..f05ab04c3305 100644 --- a/include/tvm/ir/attrs.h +++ b/include/tvm/ir/attrs.h @@ -146,7 +146,7 @@ class BaseAttrsNode : public Object { virtual void VisitAttrs(AttrVisitor* v) {} /*! * \brief Initialize the attributes by sequence of arguments - * \param args The postional arguments in the form + * \param args The positional arguments in the form * [key0, value0, key1, value1, ..., key_n, value_n] */ template diff --git a/include/tvm/runtime/packed_func.h b/include/tvm/runtime/packed_func.h index e43e042866ff..cf30923aacb0 100644 --- a/include/tvm/runtime/packed_func.h +++ b/include/tvm/runtime/packed_func.h @@ -1204,7 +1204,7 @@ struct func_signature_helper { /*! * \brief template class to get function signature of a function or functor. - * \tparam T The funtion/functor type. + * \tparam T The function/functor type. */ template struct function_signature { diff --git a/include/tvm/topi/einsum.h b/include/tvm/topi/einsum.h index e1baadab09d3..a0c4039909ad 100644 --- a/include/tvm/topi/einsum.h +++ b/include/tvm/topi/einsum.h @@ -439,7 +439,7 @@ inline std::vector Split(const std::string& str, const std::string& * \param subscripts input subscripts. * \param operands operand tensors. * - * \return vector of strings, vector[0] represents the input part, vector[1] represents the ouput. + * \return vector of strings, vector[0] represents the input part, vector[1] represents the output. * if no output, the vector[1] is NULL. * "ab, bc -> ac" => ["ab,bc", "ac"] */ diff --git a/nnvm/src/core/symbolic.cc b/nnvm/src/core/symbolic.cc index 12b8675d0bd7..48f834b28535 100644 --- a/nnvm/src/core/symbolic.cc +++ b/nnvm/src/core/symbolic.cc @@ -240,7 +240,7 @@ std::vector Symbol::ListInputNames(ListInputOption option) const { } std::vector Symbol::ListOutputNames() const { - static auto& flist_ouputs = Op::GetAttr("FListOutputNames"); + static auto& flist_outputs = Op::GetAttr("FListOutputNames"); std::vector ret; ret.reserve(outputs.size()); @@ -250,7 +250,7 @@ std::vector Symbol::ListOutputNames() const { } else { const std::string& hname = head.node->attrs.name; std::string rname; - FListOutputNames fn = flist_ouputs.get(head.node->op(), nullptr); + FListOutputNames fn = flist_outputs.get(head.node->op(), nullptr); if (fn != nullptr) { rname = fn(head.node->attrs)[head.index]; } else { diff --git a/python/tvm/micro/contrib/zephyr.py b/python/tvm/micro/contrib/zephyr.py index 29bb5ecdbe6f..cd9c23cd2f9d 100644 --- a/python/tvm/micro/contrib/zephyr.py +++ b/python/tvm/micro/contrib/zephyr.py @@ -537,7 +537,7 @@ class QemuStartupFailureError(Exception): class QemuFdTransport(file_descriptor.FdTransport): - """An FdTransport subclass that escapes written data to accomodate the QEMU monitor. + """An FdTransport subclass that escapes written data to accommodate the QEMU monitor. It's supposedly possible to disable the monitor, but Zephyr controls most of the command-line arguments for QEMU and there are too many options which implictly enable the monitor, so this diff --git a/python/tvm/relay/frontend/coreml.py b/python/tvm/relay/frontend/coreml.py index 4efe014b9ffd..f850750fad51 100644 --- a/python/tvm/relay/frontend/coreml.py +++ b/python/tvm/relay/frontend/coreml.py @@ -524,7 +524,7 @@ def coreml_op_to_relay(op, inname, outnames, etab): outname = outnames if isinstance(outnames, _base.string_types) else outnames[0] etab.set_expr(outname, outs, force_override=True) else: - # the number of ouputs from model op and tvm relay must be same + # the number of outputs from model op and tvm relay must be same assert len(outnames) == len(outs) for outname, out in zip(outnames, outs): etab.set_expr(outname, out, force_override=True) diff --git a/python/tvm/relay/testing/resnet.py b/python/tvm/relay/testing/resnet.py index bc5f5c4eed3e..b35e01f6779b 100644 --- a/python/tvm/relay/testing/resnet.py +++ b/python/tvm/relay/testing/resnet.py @@ -177,7 +177,7 @@ def resnet( Channel size of each stage num_classes : int - Ouput size of symbol + Output size of symbol data_shape : tuple of int. The shape of input data. diff --git a/python/tvm/relay/testing/resnet_3d.py b/python/tvm/relay/testing/resnet_3d.py index 484f51dcac9b..715e3951b856 100644 --- a/python/tvm/relay/testing/resnet_3d.py +++ b/python/tvm/relay/testing/resnet_3d.py @@ -174,7 +174,7 @@ def resnet( Channel size of each stage num_classes : int - Ouput size of symbol + Output size of symbol data_shape : tuple of int. The shape of input data. diff --git a/python/tvm/relay/transform/transform.py b/python/tvm/relay/transform/transform.py index f02f8352de9e..0d078d39372d 100644 --- a/python/tvm/relay/transform/transform.py +++ b/python/tvm/relay/transform/transform.py @@ -985,7 +985,7 @@ def transform(func, mod, ctx): """ if opt_level is None: - raise ValueError("Please provide opt_level for the funtion pass.") + raise ValueError("Please provide opt_level for the function pass.") required = required if required else [] if not isinstance(required, (list, tuple)): diff --git a/python/tvm/tir/stmt.py b/python/tvm/tir/stmt.py index 9e1ef56cca58..5882dca5578e 100644 --- a/python/tvm/tir/stmt.py +++ b/python/tvm/tir/stmt.py @@ -109,7 +109,7 @@ class For(Stmt): The loop variable. min_val : PrimExpr - The begining value. + The beginning value. extent : PrimExpr The length of the loop. diff --git a/python/tvm/tir/transform/function_pass.py b/python/tvm/tir/transform/function_pass.py index 59b3ecd6237d..7cff1f66a625 100644 --- a/python/tvm/tir/transform/function_pass.py +++ b/python/tvm/tir/transform/function_pass.py @@ -130,7 +130,7 @@ def transform(func, mod, ctx): """ if opt_level is None: - raise ValueError("Please provide opt_level for the funtion pass.") + raise ValueError("Please provide opt_level for the function pass.") required = required if required else [] if not isinstance(required, (list, tuple)): diff --git a/python/tvm/topi/arm_cpu/depthwise_conv2d.py b/python/tvm/topi/arm_cpu/depthwise_conv2d.py index 441b0a5a3688..c21480724ae4 100644 --- a/python/tvm/topi/arm_cpu/depthwise_conv2d.py +++ b/python/tvm/topi/arm_cpu/depthwise_conv2d.py @@ -692,7 +692,7 @@ def _schedule_spatial_pack(cfg, s, data_vec, kernel_vec, conv, output, last): if kernel_vec.op.name == "kernel_vec": co, _, _, _, _ = s[kernel_vec].op.axis if autotvm.GLOBAL_SCOPE.in_tuning: - # kernel packing will be pre-computed during compliation, so we skip + # kernel packing will be pre-computed during compilation, so we skip # this part to make tuning records correct s[kernel_vec].pragma(co, "debug_skip_region") else: diff --git a/python/tvm/topi/cuda/batch_matmul_tensorcore.py b/python/tvm/topi/cuda/batch_matmul_tensorcore.py index 59b92ec9e623..962a8af7853b 100644 --- a/python/tvm/topi/cuda/batch_matmul_tensorcore.py +++ b/python/tvm/topi/cuda/batch_matmul_tensorcore.py @@ -291,7 +291,7 @@ def batch_matmul_tensorcore_cuda(x, y): x_shape = get_const_tuple(x.shape) y_shape = get_const_tuple(y.shape) assert x_shape[0] == y_shape[0], "batch dimension doesn't match" - assert x_shape[2] == y_shape[2], "shapes of x and y is inconsistant" + assert x_shape[2] == y_shape[2], "shapes of x and y is inconsistent" batch, M, K = x.shape N = y.shape[1] out_dtype = x.dtype diff --git a/python/tvm/topi/cuda/rcnn/proposal.py b/python/tvm/topi/cuda/rcnn/proposal.py index e5e83b4911a3..12f7a23abe35 100644 --- a/python/tvm/topi/cuda/rcnn/proposal.py +++ b/python/tvm/topi/cuda/rcnn/proposal.py @@ -203,7 +203,7 @@ def argsort_ir(data_buf, out_index_buf): def nms_ir(sorted_bbox_buf, out_buf, nms_threshold): - """Non-maximum supression. + """Non-maximum suppression. Parameters ---------- diff --git a/python/tvm/topi/nn/batch_matmul.py b/python/tvm/topi/nn/batch_matmul.py index 9c5848129397..b6ed5a373e81 100644 --- a/python/tvm/topi/nn/batch_matmul.py +++ b/python/tvm/topi/nn/batch_matmul.py @@ -62,7 +62,7 @@ def batch_matmul(x, y, oshape=None, auto_scheduler_rewritten_layout=""): k = te.reduce_axis((0, K), name="k") if oshape is None: assert XB == YB or XB == 1 or YB == 1, "batch dimension doesn't match" - assert x_shape[2] == y_shape[2], "shapes of x and y is inconsistant" + assert x_shape[2] == y_shape[2], "shapes of x and y is inconsistent" batch = te.max(XB, YB) N = y.shape[1] oshape = (batch, M, N) diff --git a/python/tvm/topi/random/kernel.py b/python/tvm/topi/random/kernel.py index b21db3778744..728cd682fa42 100644 --- a/python/tvm/topi/random/kernel.py +++ b/python/tvm/topi/random/kernel.py @@ -121,7 +121,7 @@ def _threefry( Threefry will write to :code:`out_buf[out_offset:out_offset+4*product(out_shape)]` out_shape: number - Determines the number of ouput states to generate. :code:`state[i]` will correspond to + Determines the number of output states to generate. :code:`state[i]` will correspond to counter+i. """ nrounds = 20 diff --git a/python/tvm/topi/testing/strided_slice_python.py b/python/tvm/topi/testing/strided_slice_python.py index c5eb72396c4f..30466c785778 100644 --- a/python/tvm/topi/testing/strided_slice_python.py +++ b/python/tvm/topi/testing/strided_slice_python.py @@ -26,7 +26,7 @@ def strided_slice_python(data, begin, end, strides, slice_mode="end"): Input data begin : list - Begining of the slices. + Beginning of the slices. end : list End of the slices. @@ -81,7 +81,7 @@ def strided_set_python(data, v, begin, end, strides): Value data begin : list - Begining of the slices. + Beginning of the slices. end : list End of the slices. diff --git a/python/tvm/topi/utils.py b/python/tvm/topi/utils.py index cd9f0c61c854..2e8528c5e76c 100644 --- a/python/tvm/topi/utils.py +++ b/python/tvm/topi/utils.py @@ -460,7 +460,7 @@ def make_idx(b, e, s, z, i): Returns ------- - postion: Expr + position: Expr int expression that corresponds to an array position in the selection. """ bc = tvm.tir.Select(s < 0, i <= e, i < b) diff --git a/python/tvm/topi/vision/rcnn/proposal.py b/python/tvm/topi/vision/rcnn/proposal.py index e15ba8cd27c7..12a0d6bcf0a0 100644 --- a/python/tvm/topi/vision/rcnn/proposal.py +++ b/python/tvm/topi/vision/rcnn/proposal.py @@ -231,7 +231,7 @@ def argsort_ir(data_buf, out_index_buf): def nms_ir(sorted_bbox_buf, out_buf, nms_threshold): - """Non-maximum supression. + """Non-maximum suppression. Parameters ---------- diff --git a/python/tvm/topi/x86/batch_matmul.py b/python/tvm/topi/x86/batch_matmul.py index 79b38de8cf93..df480123375d 100644 --- a/python/tvm/topi/x86/batch_matmul.py +++ b/python/tvm/topi/x86/batch_matmul.py @@ -49,7 +49,7 @@ def batch_matmul(cfg, x, y, out_shape=None): XB, M, XK = get_const_tuple(x.shape) YB, N, YK = get_const_tuple(y.shape) assert (XB == YB) or (YB == 1) or (XB == 1), "batch dimension doesn't match" - assert XK == YK, "shapes of x and y is inconsistant" + assert XK == YK, "shapes of x and y is inconsistent" B = te.max(XB, YB) K = XK if out_shape is not None: @@ -151,7 +151,7 @@ def batch_matmul_blas_common(cfg, x, y, out_shape, lib): 3-D with shape [batch, N, K] out_shape : tuple or None Shape of the output - lib : A contrib module which implements batch_matmul funtion + lib : A contrib module which implements batch_matmul function cblas and mkl are supported Returns @@ -163,7 +163,7 @@ def batch_matmul_blas_common(cfg, x, y, out_shape, lib): XB, M, XK = get_const_tuple(x.shape) YB, N, YK = get_const_tuple(y.shape) assert XB == YB, "batch dimension doesn't match" - assert XK == YK, "shapes of x and y is inconsistant" + assert XK == YK, "shapes of x and y is inconsistent" if out_shape is not None: assert out_shape[0] == XB, "got invalid output shape" assert out_shape[1] == M, "got invalid output shape" diff --git a/python/tvm/topi/x86/conv2d_avx_1x1.py b/python/tvm/topi/x86/conv2d_avx_1x1.py index afee03a9f6a0..32b06725cdc2 100644 --- a/python/tvm/topi/x86/conv2d_avx_1x1.py +++ b/python/tvm/topi/x86/conv2d_avx_1x1.py @@ -191,7 +191,7 @@ def _declaration_conv_nhwc_pack(cfg, Input, Filter, stride, padding, dilation, o pad_before = [0, pad_top, pad_left, 0] pad_after = [0, pad_down, pad_right, 0] PaddedInput = pad(Input, pad_before, pad_after, name="PaddedInput") - # todo: padding filter to accomodate the intrinsic + # todo: padding filter to accommodate the intrinsic # packing the Filter to let memory access be consecutive for AVX512 intrinsic # Done in pre-compute stage diff --git a/src/relay/ir/dataflow_matcher.cc b/src/relay/ir/dataflow_matcher.cc index ac716579f2ab..43a6473fb632 100644 --- a/src/relay/ir/dataflow_matcher.cc +++ b/src/relay/ir/dataflow_matcher.cc @@ -734,7 +734,7 @@ class PatternGrouper { // Exit due to overlapping partitions return; } else if (kv.second != body) { - // if the node isn't the ouput of the group + // if the node isn't the output of the group auto node = matcher_->expr_graph_.node_map_.at(kv.first); for (auto* output : node->outputs_) { // and the node is used by nodes outside of the group diff --git a/src/relay/ir/indexed_graph.cc b/src/relay/ir/indexed_graph.cc index 0f81c2360d0f..36789e6f808a 100644 --- a/src/relay/ir/indexed_graph.cc +++ b/src/relay/ir/indexed_graph.cc @@ -73,7 +73,7 @@ IndexedGraph CreateIndexedGraph(const Expr& expr) { return std::move(graph_); } - /*! Default visitation pushes the parent to the child's ouputs and the child to the parent's + /*! Default visitation pushes the parent to the child's outputs and the child to the parent's * inputs*/ void VisitExpr(const Expr& expr, NodePtr parent) override { auto current = graph_.node_map_[expr]; @@ -220,7 +220,7 @@ IndexedGraph CreateIndexedGraph(const DFPattern& pattern) { return std::move(graph_); } - /*! Default visitation pushes the parent to the child's ouputs */ + /*! Default visitation pushes the parent to the child's outputs */ void VisitDFPattern(const DFPattern& pattern, NodePtr parent) override { auto current = graph_.node_map_[pattern]; if (parent) { diff --git a/src/relay/transforms/partition_graph.cc b/src/relay/transforms/partition_graph.cc index 7508d4437c18..404c7efb10b0 100644 --- a/src/relay/transforms/partition_graph.cc +++ b/src/relay/transforms/partition_graph.cc @@ -177,7 +177,7 @@ class Partitioner : public MixedModeMutator { AnnotatedRegion region = GetRegion(GetRef(call)); // TODO(@manupa-arm) : need to use the parent function (to which region - // belongs to) name/key for the funtions that are created + // belongs to) name/key for the functions that are created BaseFunc f = GetFunc(GetRef(call)); // Traverse subgraph inputs. diff --git a/src/runtime/c_runtime_api.cc b/src/runtime/c_runtime_api.cc index 6ecc60a93dec..b4457bf66614 100644 --- a/src/runtime/c_runtime_api.cc +++ b/src/runtime/c_runtime_api.cc @@ -169,7 +169,7 @@ void DeviceAPI::SyncStreamFromTo(TVMContext ctx, TVMStreamHandle event_src, // {message1} // {message2} // {Stack trace:} // stack traces follow by this line -// {trace 0} // two spaces in the begining. +// {trace 0} // two spaces in the beginning. // {trace 1} // {trace 2} //-------------------------------------------------------- diff --git a/src/tir/transforms/hoist_if_then_else.cc b/src/tir/transforms/hoist_if_then_else.cc index 7bae0ce8ca75..4a11a7e90e30 100644 --- a/src/tir/transforms/hoist_if_then_else.cc +++ b/src/tir/transforms/hoist_if_then_else.cc @@ -168,7 +168,7 @@ class HoistCandidateSelector final : public StmtExprVisitor { // To stop hoisting if any of the block variables are used. // // In case we want to use hoisting in between certain passes - // which have interdependencies of the postioning of if nodes with scope var + // which have interdependencies of the positioning of if nodes with scope var // it is better to disable this section if (support_block_scope_hosting_) { if (IsRecordingOn()) { diff --git a/tests/python/frontend/mxnet/model_zoo/resnet.py b/tests/python/frontend/mxnet/model_zoo/resnet.py index 98cdce6b4ea7..00e68958b462 100644 --- a/tests/python/frontend/mxnet/model_zoo/resnet.py +++ b/tests/python/frontend/mxnet/model_zoo/resnet.py @@ -182,7 +182,7 @@ def resnet( filter_list : list Channel size of each stage num_classes : int - Ouput size of symbol + Output size of symbol dataset : str Dataset type, only cifar10 and imagenet supports workspace : int diff --git a/tests/python/frontend/onnx/test_forward.py b/tests/python/frontend/onnx/test_forward.py index 59ecffe829df..d6fe98d031fa 100644 --- a/tests/python/frontend/onnx/test_forward.py +++ b/tests/python/frontend/onnx/test_forward.py @@ -3502,7 +3502,7 @@ def verify_roi_align( # @tvm.testing.uses_gpu def test_non_max_suppression(): def verify_nms( - boxes, scores, max_ouput_boxes_per_class, iou_threshold, score_threshold, output_dims + boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, output_dims ): input_names = ["boxes", "scores", "max_output_boxes_per_class", "iou_threshold"] input_nodes = [