From 1022ad7c204127ca5581505f5888929c6116790f Mon Sep 17 00:00:00 2001 From: Siju Date: Tue, 25 Sep 2018 08:39:48 +0530 Subject: [PATCH] [DOC]Errors corrected (#1767) --- include/tvm/ir_pass.h | 2 +- python/tvm/_ffi/runtime_ctypes.py | 2 +- python/tvm/schedule.py | 2 +- python/tvm/tensor_intrin.py | 2 +- src/codegen/codegen_c.cc | 2 +- src/codegen/verilog/codegen_verilog.cc | 4 ++-- src/op/tensorize.cc | 4 ++-- src/runtime/pack_args.h | 2 +- src/runtime/rpc/rpc_session.cc | 4 ++-- vta/python/vta/ir_pass.py | 2 +- 10 files changed, 13 insertions(+), 13 deletions(-) diff --git a/include/tvm/ir_pass.h b/include/tvm/ir_pass.h index ab42cfc9625f..9403a2e6151b 100644 --- a/include/tvm/ir_pass.h +++ b/include/tvm/ir_pass.h @@ -217,7 +217,7 @@ Stmt NarrowChannelAccess(Stmt stmt); * \param auto_max_step The maximum step before stop attach automatic unroll * \param auto_max_depth The maximum depth before stop attach automatic unroll * \param auto_max_extent The maximum extent of the loop we can unroll, - * this is an legacy option that donot take the loop total steps into account. + * this is an legacy option that do not take the loop total steps into account. * \param explicit_unroll Whether explicitly unroll the loop, or leave unroll annotation to codegen. * \return Transformed stmt. */ diff --git a/python/tvm/_ffi/runtime_ctypes.py b/python/tvm/_ffi/runtime_ctypes.py index 4f94e0e62d0a..2aced1aef7d2 100644 --- a/python/tvm/_ffi/runtime_ctypes.py +++ b/python/tvm/_ffi/runtime_ctypes.py @@ -67,7 +67,7 @@ def __init__(self, type_str): bits = 64 head = "" else: - raise ValueError("Donot know how to handle type %s" % type_str) + raise ValueError("Do not know how to handle type %s" % type_str) bits = int(head) if head else bits self.bits = bits diff --git a/python/tvm/schedule.py b/python/tvm/schedule.py index 594c2f2dc8bd..6c261a453457 100644 --- a/python/tvm/schedule.py +++ b/python/tvm/schedule.py @@ -362,7 +362,7 @@ def split(self, parent, factor=None, nparts=None): """ if nparts is not None: if factor is not None: - raise ValueError("Donot need to provide both outer and nparts") + raise ValueError("Do not need to provide both outer and nparts") outer, inner = _api_internal._StageSplitByNParts(self, parent, nparts) else: if factor is None: diff --git a/python/tvm/tensor_intrin.py b/python/tvm/tensor_intrin.py index 62f8c8897d10..193124b2f946 100644 --- a/python/tvm/tensor_intrin.py +++ b/python/tvm/tensor_intrin.py @@ -72,7 +72,7 @@ def decl_tensor_intrin(op, binds_list = [] for t in inputs: if not isinstance(t.op, _tensor.PlaceholderOp): - raise ValueError("Donot yet support composition op") + raise ValueError("Do not yet support composition op") cfg = current_build_config() for t in tensors: diff --git a/src/codegen/codegen_c.cc b/src/codegen/codegen_c.cc index c3b0d278c7ac..d902437dd990 100644 --- a/src/codegen/codegen_c.cc +++ b/src/codegen/codegen_c.cc @@ -207,7 +207,7 @@ std::string CodeGenC::GetStructRef( } else if (t.is_int()) { os << "v_int64"; } else { - LOG(FATAL) << "donot know how to handle type" << t; + LOG(FATAL) << "Do not know how to handle type" << t; } os << ")"; return os.str(); diff --git a/src/codegen/verilog/codegen_verilog.cc b/src/codegen/verilog/codegen_verilog.cc index d7e149257fdb..af3d2fcfe467 100644 --- a/src/codegen/verilog/codegen_verilog.cc +++ b/src/codegen/verilog/codegen_verilog.cc @@ -213,11 +213,11 @@ VerilogValue CodeGenVerilog::VisitExpr_(const UIntImm *op) { return IntConst(op, this); } VerilogValue CodeGenVerilog::VisitExpr_(const FloatImm *op) { - LOG(FATAL) << "Donot support float constant in Verilog"; + LOG(FATAL) << "Do not support float constant in Verilog"; return VerilogValue(); } VerilogValue CodeGenVerilog::VisitExpr_(const StringImm *op) { - LOG(FATAL) << "Donot support string constant in Verilog"; + LOG(FATAL) << "Do not support string constant in Verilog"; return VerilogValue(); } diff --git a/src/op/tensorize.cc b/src/op/tensorize.cc index 6423c4e942e4..6daaedd16de1 100644 --- a/src/op/tensorize.cc +++ b/src/op/tensorize.cc @@ -52,10 +52,10 @@ size_t InferTensorizeRegion( const IterVarAttr& attr = (*iit).second; if (!found_point) { CHECK(!attr->bind_thread.defined()) - << "Donot allow thread in tensorize scope"; + << "Do not allow thread in tensorize scope"; } if (attr->iter_type == kTensorized) { - CHECK(!found_point) << "Donot allow two tensorized point"; + CHECK(!found_point) << "Do not allow two tensorized point"; found_point = true; loc_scope = i - 1; } diff --git a/src/runtime/pack_args.h b/src/runtime/pack_args.h index 0a00e79f07df..5170e5fd9e9a 100644 --- a/src/runtime/pack_args.h +++ b/src/runtime/pack_args.h @@ -168,7 +168,7 @@ inline PackedFunc PackFuncNonBufferArg_( switch (codes[i]) { case INT64_TO_INT64: case FLOAT64_TO_FLOAT64: { - LOG(FATAL) << "Donot support 64bit argument to device function"; break; + LOG(FATAL) << "Do not support 64bit argument to device function"; break; } case INT64_TO_INT32: { holder[i].v_int32 = static_cast(args.values[base + i].v_int64); diff --git a/src/runtime/rpc/rpc_session.cc b/src/runtime/rpc/rpc_session.cc index 0e2d637ab475..208944a69dce 100644 --- a/src/runtime/rpc/rpc_session.cc +++ b/src/runtime/rpc/rpc_session.cc @@ -250,9 +250,9 @@ class RPCSession::EventHandler : public dmlc::Stream { this->Write(arr->dtype); this->WriteArray(arr->shape, arr->ndim); CHECK(arr->strides == nullptr) - << "Donot support strided remote array"; + << "Do not support strided remote array"; CHECK_EQ(arr->byte_offset, 0) - << "Donot support send byte offset"; + << "Do not support send byte offset"; break; } case kNull: break; diff --git a/vta/python/vta/ir_pass.py b/vta/python/vta/ir_pass.py index 90df67c53278..3efef7135edb 100644 --- a/vta/python/vta/ir_pass.py +++ b/vta/python/vta/ir_pass.py @@ -556,7 +556,7 @@ def _inject_copy(src, dst, pad_before, pad_after, pad_value): return irb.get() else: - raise RuntimeError("Donot support copy %s->%s" % (src.scope, dst.scope)) + raise RuntimeError("Do not support copy %s->%s" % (src.scope, dst.scope)) return tvm.ir_pass.InjectCopyIntrin(stmt_in, "dma_copy", _inject_copy)