diff --git a/include/tvm/relay/error.h b/include/tvm/relay/error.h index 6b9a1fa7b7c6..5189fd982d37 100644 --- a/include/tvm/relay/error.h +++ b/include/tvm/relay/error.h @@ -83,7 +83,7 @@ struct Error : public dmlc::Error { * * The final mode represents the old mode, if we report an error that has no span or * expression, we will default to throwing an exception with a textual representation - * of the error and no indication of where it occured in the original program. + * of the error and no indication of where it occurred in the original program. * * The latter mode is not ideal, and the goal of the new error reporting machinery is * to avoid ever reporting errors in this style. diff --git a/include/tvm/runtime/c_runtime_api.h b/include/tvm/runtime/c_runtime_api.h index fd1b877f6d4c..ba2c0d2291b6 100644 --- a/include/tvm/runtime/c_runtime_api.h +++ b/include/tvm/runtime/c_runtime_api.h @@ -187,7 +187,7 @@ TVM_DLL void TVMAPISetLastError(const char* msg); /*! * \brief return str message of the last error * all function in this file will return 0 when success - * and -1 when an error occured, + * and -1 when an error occurred, * TVMGetLastError can be called to retrieve the error * * this function is threadsafe and can be called by different thread diff --git a/nnvm/include/nnvm/c_api.h b/nnvm/include/nnvm/c_api.h index 75054e892d8e..773bc63b7dad 100644 --- a/nnvm/include/nnvm/c_api.h +++ b/nnvm/include/nnvm/c_api.h @@ -60,7 +60,7 @@ NNVM_DLL void NNAPISetLastError(const char* msg); /*! * \brief return str message of the last error * all function in this file will return 0 when success - * and -1 when an error occured, + * and -1 when an error occurred, * NNGetLastError can be called to retrieve the error * * this function is threadsafe and can be called by different thread diff --git a/nnvm/python/nnvm/frontend/common.py b/nnvm/python/nnvm/frontend/common.py index 610546d1973b..0e09a2c43323 100644 --- a/nnvm/python/nnvm/frontend/common.py +++ b/nnvm/python/nnvm/frontend/common.py @@ -58,7 +58,7 @@ def __call__(self, inputs, attrs, *args): class AttrConverter(object): - """Common attribute conveter. An AttrConverter instance is a callable: + """Common attribute converter. An AttrConverter instance is a callable: ``` attr_converter = AttrConverter(op_name, transforms={'a':'b', 'c':('d', 1)}) new_op_name, new_attr = attr_converter(attrs) @@ -72,12 +72,12 @@ class AttrConverter(object): `op_name = func(attr)` transforms : dict of `new_name, or (new_name, default_value, transform function)` If only a new_name is provided, it's like renaming the attribute name. - If default_value if provded, then the attribute is considered as optional. + If default_value if provided, then the attribute is considered as optional. If transform function is provided, the original attribute value is handled by transform function. excludes : list A list of excluded attributes that should `NOT` appear. - Raise NotImplementedError if occured. + Raise NotImplementedError if occurred. disables : list A list of attributes that is disabled in nnvm. Log warnings. ignores : list diff --git a/nnvm/python/nnvm/frontend/tensorflow.py b/nnvm/python/nnvm/frontend/tensorflow.py index ee78a7e523e8..08a52a9a079e 100644 --- a/nnvm/python/nnvm/frontend/tensorflow.py +++ b/nnvm/python/nnvm/frontend/tensorflow.py @@ -1177,7 +1177,7 @@ def from_tensorflow(self, graph, layout="NHWC", shape=None, outputs=None): -> All Const nodes are params. -> Last node is assumed as graph output. -> _output_shapes : Graph should be frozen with add_shapes=True. - Or user can pass input shape dictionaly optionally. + Or user can pass input shape dictionary optionally. -> DecodeJpeg, ResizeBilinear: These are dummy operators. Hence user should handle preprocessing outside. -> CheckNumerics: No implementation as of now for this. diff --git a/python/tvm/relay/frontend/common.py b/python/tvm/relay/frontend/common.py index 23477626b63b..efd198803c2b 100644 --- a/python/tvm/relay/frontend/common.py +++ b/python/tvm/relay/frontend/common.py @@ -286,7 +286,7 @@ def clear_padding(self): class AttrCvt(object): - """Common attribute conveter. An AttrConverter instance is a callable: + """Common attribute converter. An AttrConverter instance is a callable: ``` attr_converter = AttrConverter(op_name, transforms={'a':'b', 'c':('d', 1)}) new_op_name, new_attr = attr_converter(attrs) @@ -300,12 +300,12 @@ class AttrCvt(object): `op_name = func(attr)` transforms : dict of `new_name, or (new_name, default_value, transform function)` If only a new_name is provided, it's like renaming the attribute name. - If default_value if provded, then the attribute is considered as optional. + If default_value if provided, then the attribute is considered as optional. If transform function is provided, the original attribute value is handled by transform function. excludes : list A list of excluded attributes that should `NOT` appear. - Raise NotImplementedError if occured. + Raise NotImplementedError if occurred. disables : list A list of attributes that is disabled in relay. Log warnings. ignores : list diff --git a/python/tvm/relay/frontend/tensorflow.py b/python/tvm/relay/frontend/tensorflow.py index f709a63e79e8..45ae2cd19cd1 100644 --- a/python/tvm/relay/frontend/tensorflow.py +++ b/python/tvm/relay/frontend/tensorflow.py @@ -77,12 +77,12 @@ class AttrCvt(object): `op_name = func(attr)` transforms : dict of `new_name, or (new_name, default_value, transform function)` If only a new_name is provided, it's like renaming the attribute name. - If default_value if provded, then the attribute is considered as optional. + If default_value if provided, then the attribute is considered as optional. If transform function is provided, the original attribute value is handled by transform function. excludes : list A list of excluded attributes that should `NOT` appear. - Raise NotImplementedError if occured. + Raise NotImplementedError if occurred. disables : list A list of attributes that is disabled in relay. Log warnings. ignores : list @@ -1567,7 +1567,7 @@ def _in_while_loop(control_flow_node_map, op_name): Parameters ---------- control_flow_node_map : Dict[str, Set[str]] - A dictionay contains the unqiue control flow execution frame name to + A dictionay contains the unique control flow execution frame name to a set of primitive operators mapping. op_name : str @@ -1619,7 +1619,7 @@ def f2(): return tf.add(4, 23) r = tf.cond(tf.less(i, j), f1, f2) - This condition statement should be coverted into Relay in the following + This condition statement should be converted into Relay in the following form: .. code-block:: python @@ -1727,7 +1727,7 @@ def __init__(self): self._loop = None def _while_loop(self): - """An internal API to create a Relay recurisve call for a matched TF + """An internal API to create a Relay recursive call for a matched TF `while_loop` construct. """ wl = tvm.relay.var('while_loop') @@ -1796,7 +1796,7 @@ def from_tensorflow(self, graph, layout="NHWC", shape=None, outputs=None): -> All Const nodes are params. -> Last node is assumed as graph output. -> _output_shapes : Graph should be frozen with add_shapes=True. - Or user can pass input shape dictionaly optionally. + Or user can pass input shape dictionary optionally. -> DecodeJpeg, ResizeBilinear: These are dummy operators. Hence user should handle preprocessing outside. -> CheckNumerics: No implementation as of now for this. diff --git a/python/tvm/relay/op/nn/nn.py b/python/tvm/relay/op/nn/nn.py index b4ebffb355d0..7bce9dd3c5b9 100644 --- a/python/tvm/relay/op/nn/nn.py +++ b/python/tvm/relay/op/nn/nn.py @@ -67,7 +67,7 @@ def conv2d(data, The weight expressions. strides : tuple of int, optional - The strides of convoltution. + The strides of convolution. padding : tuple of int, optional The padding of convolution on both sides of inputs before convolution. @@ -129,7 +129,7 @@ def conv2d_transpose(data, The weight expressions. strides : Tuple[int], optional - The strides of convoltution. + The strides of convolution. padding : Tuple[int], optional The padding of convolution on both sides of inputs. @@ -842,7 +842,7 @@ def contrib_conv2d_winograd_without_weight_transform(data, The Tile size of winograd. E.g. 2 for F(2x2, 3x3) and 4 for F(4x4, 3x3) strides : tuple of int, optional - The strides of convoltution. + The strides of convolution. padding : tuple of int, optional The padding of convolution on both sides of inputs before convolution. @@ -908,7 +908,7 @@ def contrib_conv2d_winograd_nnpack_without_weight_transform(data, The weight expressions. strides : tuple of int, optional - The strides of convoltution. + The strides of convolution. padding : tuple of int, optional The padding of convolution on both sides of inputs before convolution. @@ -975,7 +975,7 @@ def contrib_conv2d_nchwc(data, The kernel expressions. strides : tuple of int, optional - The strides of convoltution. + The strides of convolution. padding : tuple of int, optional The padding of convolution on both sides of inputs before convolution. @@ -1040,7 +1040,7 @@ def contrib_depthwise_conv2d_nchwc(data, The kernel expressions. strides : tuple of int, optional - The strides of convoltution. + The strides of convolution. padding : tuple of int, optional The padding of convolution on both sides of inputs before convolution. @@ -1156,7 +1156,7 @@ def deformable_conv2d(data, The weight expressions. strides : tuple of int, optional - The strides of convoltution. + The strides of convolution. padding : tuple of int, optional The padding of convolution on both sides of inputs before convolution. diff --git a/src/common/socket.h b/src/common/socket.h index 58705f16bf73..91f9f4e5cf0a 100644 --- a/src/common/socket.h +++ b/src/common/socket.h @@ -373,7 +373,7 @@ class TCPSocket : public Socket { } /*! * \brief decide whether the socket is at OOB mark - * \return 1 if at mark, 0 if not, -1 if an error occured + * \return 1 if at mark, 0 if not, -1 if an error occurred */ int AtMark() const { #ifdef _WIN32 diff --git a/src/pass/arg_binder.h b/src/pass/arg_binder.h index 9de3a13270dc..f235ea49faac 100644 --- a/src/pass/arg_binder.h +++ b/src/pass/arg_binder.h @@ -50,7 +50,7 @@ namespace ir { * - assert bufferB.shape[1] == n + 3 * * In general, this is a constraint solving problem. We have simplified assumption - * over the binding declaration, such that we require the variable occured in + * over the binding declaration, such that we require the variable occurred in * constraint must be declared in argument list. So it is illegal to have signature * f(tA(shape=(n+3))) without any argument variable corresponds to n, even though * it is already enough to derive n from the input argument. diff --git a/topi/python/topi/cuda/reduction.py b/topi/python/topi/cuda/reduction.py index ff7232cc0fac..25885315179c 100644 --- a/topi/python/topi/cuda/reduction.py +++ b/topi/python/topi/cuda/reduction.py @@ -37,7 +37,7 @@ def _schedule_reduce(op, sch, is_idx_reduce=False): num_thread = 32 target = tvm.target.current_target() if target and target.target_name == "opencl": - # without it, CL_INVALID_WORK_GROUP_SIZE occured when running test_topi_reduce.py + # without it, CL_INVALID_WORK_GROUP_SIZE occurred when running test_topi_reduce.py # don't know why num_thread = 16 block_x = tvm.thread_axis("blockIdx.x")