Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

If variable is not used within the loop body, start the name with an underscore #20505

Merged
merged 11 commits into from
Sep 5, 2021
2 changes: 1 addition & 1 deletion python/mxnet/gluon/contrib/estimator/estimator.py
Original file line number Diff line number Diff line change
Expand Up @@ -392,7 +392,7 @@ def fit(self, train_data,
for handler in epoch_begin:
handler.epoch_begin(estimator_ref)

for i, batch in enumerate(train_data):
for _, batch in enumerate(train_data):
mozga-intel marked this conversation as resolved.
Show resolved Hide resolved
# batch begin
for handler in batch_begin:
handler.batch_begin(estimator_ref, batch=batch)
Expand Down
2 changes: 1 addition & 1 deletion python/mxnet/operator.py
Original file line number Diff line number Diff line change
Expand Up @@ -824,7 +824,7 @@ def infer_storage_type_backward_entry(num_tensor, tensor_stypes, tags, _):
"entries in returned aux stypes, " \
"got %d."%(len(tensors[4]), len(ret[4]))
rstype = []
for i, ret_list in enumerate(ret):
for _, ret_list in enumerate(ret):
szha marked this conversation as resolved.
Show resolved Hide resolved
rstype.extend(ret_list)

for i, stype in enumerate(rstype):
Expand Down
2 changes: 1 addition & 1 deletion python/mxnet/test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -1584,7 +1584,7 @@ def smaller_dtype(dt1, dt2):
else:
arg_params[n] = np.random.normal(size=arr.shape,
scale=scale).astype(rand_type)
for n, arr in exe_list[0].aux_dict.items():
for n, _ in exe_list[0].aux_dict.items():
szha marked this conversation as resolved.
Show resolved Hide resolved
if n not in aux_params:
aux_params[n] = 0
for exe in exe_list:
Expand Down
2 changes: 1 addition & 1 deletion tests/nightly/dist_device_sync_kvstore_byteps.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ def check_default_keys(nrepeat=3):
# init kv dns keys
kv.broadcast('3', mx.nd.ones(shape, ctx=get_current_context(device=True)), mx.nd.ones(shape, ctx=get_current_context(device=True)))
kv.broadcast('99', mx.nd.ones(big_shape, ctx=get_current_context(device=True)), mx.nd.ones(big_shape, ctx=get_current_context(device=True)))
for i in range(nrepeat):
for _ in range(nrepeat):
scale = my_rank + 1
num = (my_num_workers + 1) * my_num_workers / 2

Expand Down
2 changes: 1 addition & 1 deletion tests/nightly/dist_device_sync_kvstore_custom.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ def check_default_keys(nrepeat=3):
# init kv dns keys
kv.broadcast('3', mx.nd.ones(shape, ctx=mx.gpu()), mx.nd.ones(shape, ctx=mx.gpu()))
kv.broadcast('99', mx.nd.ones(big_shape, ctx=mx.gpu()), mx.nd.ones(big_shape, ctx=mx.gpu()))
for i in range(nrepeat):
for _ in range(nrepeat):
scale = my_rank + 1
num = (my_num_workers + 1) * my_num_workers * num_gpus / 2

Expand Down
6 changes: 3 additions & 3 deletions tests/nightly/dist_sync_kvstore.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ def check_row_sparse_keys_with_zeros(dtype, nrepeat):
v = mx.nd.sparse.zeros('row_sparse', shape, dtype=dtype)
big_v = mx.nd.sparse.zeros('row_sparse', big_shape, dtype=dtype)
# push
for i in range(nrepeat):
for _ in range(nrepeat):
kv.push(k1, v)
kv.push(k2, big_v)
# pull a subset of rows this worker is interested in
Expand Down Expand Up @@ -301,7 +301,7 @@ def check_compr_random(threshold, nrepeat):
kv.init(k, mx.nd.zeros(s))
for k,s in compr_random_keys_shapes:
curr_residual = np.zeros(s)
for l in range(nrepeat):
for _ in range(nrepeat):
orig_val = mx.nd.zeros(s)
kv.pull(k, orig_val)

Expand Down Expand Up @@ -399,7 +399,7 @@ def check_compr_random(threshold, nrepeat):
kv.init(k, mx.nd.zeros(s))
for k,s in compr_random_keys_shapes:
curr_residual = np.zeros(s)
for l in range(nrepeat):
for _ in range(nrepeat):
orig_val = mx.nd.zeros(s)
kv.pull(k, orig_val)

Expand Down
6 changes: 3 additions & 3 deletions tests/nightly/test_kvstore.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ def pull_init_test(kv):
assert_almost_equal(o.asnumpy(), exp)

def pull_before_push(kv):
for i in range(nrepeat):
for _ in range(nrepeat):
for j in range(len(keys)):
out = [mx.nd.ones(shapes[j], mx.gpu(g)) for g in range(nworker)]
kv.pull(keys[j], out=out)
Expand Down Expand Up @@ -209,7 +209,7 @@ def verify_residual_1bit(kv, threshold, rate):
check_diff_to_scalar(o, curr_val)

def push_zeros(kv):
for i in range(nrepeat):
for _ in range(nrepeat):
for j in range(len(keys)):
kv.push(keys[j], [mx.nd.zeros(shapes[j], mx.gpu(g)) for g in range(nworker)])
out = [mx.nd.ones(shapes[j], mx.gpu(g)) for g in range(nworker)]
Expand Down Expand Up @@ -249,7 +249,7 @@ def verify_residual_2bit(kv, threshold, rate):
return curval

def check_neg(kv, neg, rate, curval):
for r in range(nrepeat):
for _ in range(nrepeat):
curval = curval + rate*nworker*neg
for j in range(len(keys)):
kv.push(keys[j], [mx.nd.ones(shapes[j], mx.gpu(g))*neg for g in range(nworker)])
Expand Down
2 changes: 1 addition & 1 deletion tests/nightly/test_np_random.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ def test_np_gamma():
# Generation test
trials = 8
num_buckets = 5
for dtype in types:
for _ in types:
for alpha, beta in [(2.0, 3.0), (0.5, 1.0)]:
buckets, probs = gen_buckets_probs_with_ppf(
lambda x: ss.gamma.ppf(x, a=alpha, loc=0, scale=beta), num_buckets)
Expand Down
2 changes: 1 addition & 1 deletion tests/nightly/test_server_profiling.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def check_default_keys(kv, my_rank):
nrepeat = 10
# checks pull after push in loop, because behavior during
# consecutive pushes doesn't offer any guarantees
for i in range(nrepeat):
for _ in range(nrepeat):
kv.push(key, mx.nd.ones(shape, dtype='float32') * (my_rank+1))
val = mx.nd.zeros(shape, dtype='float32')
kv.pull(key, out=val)
Expand Down
4 changes: 2 additions & 2 deletions tests/python/gpu/test_gluon_gpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ def test_lstmp():
assert_almost_equal(layer_output, cell_output, rtol=rtol, atol=atol)
layer_output.backward()
cell_output.backward()
for k, v in weights.items():
for k, _ in weights.items():
layer_grad = layer_params['l0_' + k].grad()
cell_grad = cell_params[k].grad()
print('checking gradient for {}'.format('lstm0_l0_' + k))
Expand Down Expand Up @@ -414,7 +414,7 @@ def get_num_devices():
return
ndev = 2
# check with unsync version
for i in range(10):
for _ in range(10):
_check_batchnorm_result(mx.np.random.uniform(size=(4, 1, 4, 4)),
num_devices=ndev, cuda=True)

Expand Down
2 changes: 1 addition & 1 deletion tests/python/gpu/test_gluon_model_zoo_gpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ def test_inference(model_name):
gpu_param.set_data(cpu_param.data().as_in_context(mx.gpu()))

cpu_data = mx.np.array(data, ctx=mx.cpu())
for i in range(5):
for _ in range(5):
# Run inference.
with autograd.record(train_mode=False):
cpu_out = cpu_model(cpu_data)
Expand Down
2 changes: 1 addition & 1 deletion tests/python/gpu/test_kvstore_gpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ def check_rsp_pull(kv, ctxs, sparse_pull, is_same_rowid=False, use_slice=False):
total_row_ids = mx.nd.array(np.random.randint(num_rows, size=count*num_rows))
row_ids = [total_row_ids[i*num_rows : (i+1)*num_rows] for i in range(count)]
else:
for i in range(count):
for _ in range(count):
row_id = np.random.randint(num_rows, size=num_rows)
row_ids.append(mx.nd.array(row_id))
row_ids_to_pull = row_ids[0] if (len(row_ids) == 1 or is_same_rowid) else row_ids
Expand Down
10 changes: 5 additions & 5 deletions tests/python/gpu/test_operator_gpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,7 @@ def check_fft(shape):
def test_fft():
nrepeat = 2
maxdim = 10
for repeat in range(nrepeat):
for _ in range(nrepeat):
for order in [2,4]:
shape = tuple(np.random.randint(1, maxdim, size=order))
check_fft(shape)
Expand Down Expand Up @@ -339,7 +339,7 @@ def _flatten_list(nested_list):
rescale_grad=0.95, momentum=momentum, out=mx_p_w)

def _assert_all_almost_equal(lhs_list, rhs_list, rtol, atol):
for i, (lhs, rhs) in enumerate(zip(lhs_list, rhs_list)):
for _, (lhs, rhs) in enumerate(zip(lhs_list, rhs_list)):
assert_almost_equal(lhs.asnumpy(), rhs.asnumpy(), rtol=rtol, atol=atol)
if dtype == 'float16':
rtol = 1e-3
Expand Down Expand Up @@ -1788,7 +1788,7 @@ def test_autograd_save_memory():
x.attach_grad()

with mx.autograd.record():
for i in range(200):
for _ in range(200):
x = x + 1
x.wait_to_read()
x.backward()
Expand Down Expand Up @@ -1848,7 +1848,7 @@ def test_cross_device_autograd():

with mx.autograd.record():
y = x
for i in range(3):
for _ in range(3):
y = mx.nd.tanh(y)
y.backward()

Expand Down Expand Up @@ -2224,7 +2224,7 @@ def math_square(shape, dtype, check_value):

def run_math(op, shape, dtype="float32", check_value=True):
run_num = 10
for i in range(run_num):
for _ in range(run_num):
if op == 'log':
math_log(shape=shape, dtype=dtype, check_value=check_value)
elif op == 'erf':
Expand Down
2 changes: 1 addition & 1 deletion tests/python/mkl/subgraphs/test_conv_subgraph.py
Original file line number Diff line number Diff line change
Expand Up @@ -254,7 +254,7 @@ class SingleConcat(nn.HybridBlock):
def __init__(self, input_num, dim, **kwargs):
super(SingleConcat, self).__init__(**kwargs)
self.concat = nn.HybridConcatenate(axis=dim)
for i in range(input_num):
for _ in range(input_num):
self.concat.add(nn.Identity())

def forward(self, x):
Expand Down
2 changes: 1 addition & 1 deletion tests/python/mkl/test_mkldnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -235,7 +235,7 @@ def test_mkldnn_sum_with_mkldnn_layout():
num_inputs = [2, 3, 4, 5]
for i in num_inputs:
inputs = []
for n in range(i):
for _ in range(i):
inputs.append(z)
y = mx.sym.add_n(*inputs) # (only MKLDNN data input)
exe = y._simple_bind(ctx=mx.cpu(), x=x_shape, w=w_shape)
Expand Down
2 changes: 1 addition & 1 deletion tests/python/quantization/test_quantization.py
Original file line number Diff line number Diff line change
Expand Up @@ -1312,7 +1312,7 @@ def test_optimal_threshold_adversarial_case():
hist_edges = []
min_val = -2
max_val = 2
for i in range(0, 998):
for _ in range(0, 998):
hist.append(0)
for i in range(0, 999):
hist_edges.append((max_val - min_val) / 999 * i + min_val)
Expand Down
4 changes: 2 additions & 2 deletions tests/python/unittest/test_autograd.py
Original file line number Diff line number Diff line change
Expand Up @@ -387,7 +387,7 @@ def backward(self, dY):
with mx.autograd.record():
X = mx.nd.zeros((3, 4))
#X.attach_grad() # uncommenting this line works
for i in range(5):
for _ in range(5):
f = Foo()
X = f(X)
X.wait_to_read()
Expand Down Expand Up @@ -446,7 +446,7 @@ def backward(self, dY):
with mx.autograd.record():
X = mx.np.zeros((3, 4))
#X.attach_grad() # uncommenting this line works
for i in range(5):
for _ in range(5):
f = Foo()
X = f(X)
X.wait_to_read()
Expand Down
4 changes: 2 additions & 2 deletions tests/python/unittest/test_contrib_control_flow.py
Original file line number Diff line number Diff line change
Expand Up @@ -990,7 +990,7 @@ def verify_foreach(step, in_syms, state_syms, free_syms,
e = out._bind(ctx=default_context(), args=arg_dict)
# the inputs to forward and backward are the same so forward and backward
# should always return the same outputs.
for i in range(num_iters):
for _ in range(num_iters):
e.forward(is_train=is_train)
if (is_train):
# backward
Expand All @@ -1000,7 +1000,7 @@ def verify_foreach(step, in_syms, state_syms, free_syms,

# Below we use imperative to reimplement foreach and compute its gradients.
res = []
for i in range(len(_as_list(out_grads[0]))):
for _ in range(len(_as_list(out_grads[0]))):
res.append([])
for arr in _as_list(in_arrs):
arr.attach_grad()
Expand Down
4 changes: 2 additions & 2 deletions tests/python/unittest/test_contrib_gluon_data_vision.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ def test_imageiter(self):
rand_crop=1, rand_gray=0.1, rand_mirror=True)
]
for it in imageiter_list:
for batch in it:
for _ in it:
pass

def test_image_bbox_iter(self):
Expand Down Expand Up @@ -141,6 +141,6 @@ def test_bbox_augmenters(self):
pca_noise=0.1, hue=0.1, inter_method=10,
max_aspect_ratio=5, area_range=(0.1, 4.0),
max_attempts=50)
for batch in det_iter:
for _ in det_iter:
pass
mx.npx.waitall()
2 changes: 1 addition & 1 deletion tests/python/unittest/test_contrib_io.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ def test_mnist_batches(batch_size, expected, last_batch='discard'):
assert batch.label[0].shape == (batch_size,)
count = 0
test_iter.reset()
for batch in test_iter:
for _ in test_iter:
count += 1
assert count == expected, "expected {} batches, given {}".format(expected, count)

Expand Down
2 changes: 1 addition & 1 deletion tests/python/unittest/test_dgl_graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -227,7 +227,7 @@ def test_subgraph():
assert np.sum(remain == row_start) == len(remain)
break
row = subgs[0].indices[row_start:row_end]
for j, subv2 in enumerate(row.asnumpy()):
for _, subv2 in enumerate(row.asnumpy()):
v2 = vertices[subv2]
assert sp_g[v1, v2] == sp_subg[subv1, subv2]

Expand Down
2 changes: 1 addition & 1 deletion tests/python/unittest/test_engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ def test_bulk():
x.wait_to_read()
x += 1
assert (x.asnumpy() == 4).all()
for i in range(100):
for _ in range(100):
x += 1
assert (x.asnumpy() == 104).all()

Expand Down
6 changes: 3 additions & 3 deletions tests/python/unittest/test_executor.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ def test_bind():
'MXNET_EXEC_BULK_EXEC_TRAIN': enable_bulking}):
nrepeat = 10
maxdim = 4
for repeat in range(nrepeat):
for _ in range(nrepeat):
for dim in range(1, maxdim):
check_bind_with_uniform(lambda x, y: x + y,
lambda g, x, y: (g, g),
Expand Down Expand Up @@ -107,15 +107,15 @@ def test_bind():
def test_dot():
nrepeat = 10
maxdim = 4
for repeat in range(nrepeat):
for _ in range(nrepeat):
s =tuple(np.random.randint(1, 200, size=3))
check_bind_with_uniform(lambda x, y: np.dot(x, y),
lambda g, x, y: (np.dot(g, y.T), np.dot(x.T, g)),
2,
lshape=(s[0], s[1]),
rshape=(s[1], s[2]),
sf = mx.symbol.dot)
for repeat in range(nrepeat):
for _ in range(nrepeat):
s =tuple(np.random.randint(1, 200, size=1))
check_bind_with_uniform(lambda x, y: np.dot(x, y),
lambda g, x, y: (g * y, g * x),
Expand Down
10 changes: 5 additions & 5 deletions tests/python/unittest/test_gluon.py
Original file line number Diff line number Diff line change
Expand Up @@ -692,7 +692,7 @@ def _syncParameters(bn1, bn2, ctx):
# check with unsync version
for shape in [(batch_size, 2), (batch_size, 3, 4), (batch_size, 4, 4, 4), (batch_size, 5, 6, 4, 4)]:
print(str((ndev, cuda, shape)))
for i in range(10):
for _ in range(10):
_check_batchnorm_result(mx.np.random.uniform(size=shape,
ctx=mx.cpu(0)),
num_devices=ndev, cuda=cuda)
Expand Down Expand Up @@ -1741,7 +1741,7 @@ def forward(self, in1):
for param in params:
t = TestIOForward()
t.hybridize(**param)
for i in range(5):
for _ in range(5):
d1.attach_grad()
out_grad = mx.np.random.uniform(size=(10))
res = t(d1)
Expand All @@ -1751,7 +1751,7 @@ def forward(self, in1):
for param in params:
t = TestIOBackward()
t.hybridize(**param)
for i in range(5):
for _ in range(5):
d1.attach_grad()
d2.attach_grad()
out_grad = mx.np.random.uniform(size=(10))
Expand Down Expand Up @@ -1946,13 +1946,13 @@ def __init__(self,
**kwargs):
super(Net, self).__init__(**kwargs)
self.concat = nn.HybridConcatenate(axis=check_dim)
for i in range(input_num):
for _ in range(input_num):
self.concat.add(gluon.nn.Conv2D(chn_num, (kernel, kernel)))

def forward(self, x):
return self.concat(x)

for s in range(len(shape_list)):
for _ in range(len(shape_list)):
shape = (batch_size,) + (3,) + shape_list[i]
x = mx.np.random.uniform(-1.0, 1.0, size=shape)
for i in range(len(chn_list)):
Expand Down
Loading