diff --git a/torchvision/datasets/mnist.py b/torchvision/datasets/mnist.py index 7c206f55edb..9a0ffa86160 100644 --- a/torchvision/datasets/mnist.py +++ b/torchvision/datasets/mnist.py @@ -471,8 +471,8 @@ def read_sn3_pascalvincent_tensor(path: Union[str, IO], strict: bool = True) -> magic = get_int(data[0:4]) nd = magic % 256 ty = magic // 256 - assert nd >= 1 and nd <= 3 - assert ty >= 8 and ty <= 14 + assert 1 <= nd <= 3 + assert 8 <= ty <= 14 m = SN3_PASCALVINCENT_TYPEMAP[ty] s = [get_int(data[4 * (i + 1): 4 * (i + 2)]) for i in range(nd)] parsed = np.frombuffer(data, dtype=m[1], offset=(4 * (nd + 1))) diff --git a/torchvision/datasets/phototour.py b/torchvision/datasets/phototour.py index 0a56afc8382..ce427e04883 100644 --- a/torchvision/datasets/phototour.py +++ b/torchvision/datasets/phototour.py @@ -192,7 +192,6 @@ def read_info_file(data_dir: str, info_file: str) -> torch.Tensor: """Return a Tensor containing the list of labels Read the file and keep only the ID of the 3D point. """ - labels = [] with open(os.path.join(data_dir, info_file), 'r') as f: labels = [int(line.split()[0]) for line in f] return torch.LongTensor(labels) diff --git a/torchvision/io/_video_opt.py b/torchvision/io/_video_opt.py index dbb31574fc1..5fbf4a1d921 100644 --- a/torchvision/io/_video_opt.py +++ b/torchvision/io/_video_opt.py @@ -303,7 +303,7 @@ def _read_video_timestamps_from_file(filename): 1, # audio_timebase_den ) _vframes, vframe_pts, vtimebase, vfps, vduration, \ - _aframes, aframe_pts, atimebase, asample_rate, aduration = (result) + _aframes, aframe_pts, atimebase, asample_rate, aduration = result info = _fill_info(vtimebase, vfps, vduration, atimebase, asample_rate, aduration) vframe_pts = vframe_pts.numpy().tolist() diff --git a/torchvision/models/detection/backbone_utils.py b/torchvision/models/detection/backbone_utils.py index dce7f038370..e9a4a7104cf 100644 --- a/torchvision/models/detection/backbone_utils.py +++ b/torchvision/models/detection/backbone_utils.py @@ -87,7 +87,7 @@ def resnet_fpn_backbone( norm_layer=norm_layer) # select layers that wont be frozen - assert trainable_layers <= 5 and trainable_layers >= 0 + assert 0 <= trainable_layers <= 5 layers_to_train = ['layer4', 'layer3', 'layer2', 'layer1', 'conv1'][:trainable_layers] # freeze layers only if pretrained backbone is used for name, parameter in backbone.named_parameters(): diff --git a/torchvision/models/detection/generalized_rcnn.py b/torchvision/models/detection/generalized_rcnn.py index 1d7161ac5db..c321e79f298 100644 --- a/torchvision/models/detection/generalized_rcnn.py +++ b/torchvision/models/detection/generalized_rcnn.py @@ -106,6 +106,6 @@ def forward(self, images, targets=None): if not self._has_warned: warnings.warn("RCNN always returns a (Losses, Detections) tuple in scripting") self._has_warned = True - return (losses, detections) + return losses, detections else: return self.eager_outputs(losses, detections) diff --git a/torchvision/models/detection/roi_heads.py b/torchvision/models/detection/roi_heads.py index be067ea2433..5f476f63827 100644 --- a/torchvision/models/detection/roi_heads.py +++ b/torchvision/models/detection/roi_heads.py @@ -190,7 +190,7 @@ def _onnx_heatmaps_to_keypoints(maps, maps_i, roi_map_width, roi_map_height, xy_preds_i_0 = x + offset_x_i.to(dtype=torch.float32) xy_preds_i_1 = y + offset_y_i.to(dtype=torch.float32) - xy_preds_i_2 = torch.ones((xy_preds_i_1.shape), dtype=torch.float32) + xy_preds_i_2 = torch.ones(xy_preds_i_1.shape, dtype=torch.float32) xy_preds_i = torch.stack([xy_preds_i_0.to(dtype=torch.float32), xy_preds_i_1.to(dtype=torch.float32), xy_preds_i_2.to(dtype=torch.float32)], 0) @@ -795,7 +795,6 @@ def forward(self, mask_features = self.mask_head(mask_features) mask_logits = self.mask_predictor(mask_features) else: - mask_logits = torch.tensor(0) raise Exception("Expected mask_roi_pool to be not None") loss_mask = {} diff --git a/torchvision/models/video/resnet.py b/torchvision/models/video/resnet.py index fbdaf3d555a..e60d27c18b2 100644 --- a/torchvision/models/video/resnet.py +++ b/torchvision/models/video/resnet.py @@ -30,7 +30,7 @@ def __init__(self, @staticmethod def get_downsample_stride(stride): - return (stride, stride, stride) + return stride, stride, stride class Conv2Plus1D(nn.Sequential): @@ -53,7 +53,7 @@ def __init__(self, @staticmethod def get_downsample_stride(stride): - return (stride, stride, stride) + return stride, stride, stride class Conv3DNoTemporal(nn.Conv3d): @@ -75,7 +75,7 @@ def __init__(self, @staticmethod def get_downsample_stride(stride): - return (1, stride, stride) + return 1, stride, stride class BasicBlock(nn.Module): diff --git a/torchvision/ops/feature_pyramid_network.py b/torchvision/ops/feature_pyramid_network.py index 74ed7b9b69c..7d72769ab07 100644 --- a/torchvision/ops/feature_pyramid_network.py +++ b/torchvision/ops/feature_pyramid_network.py @@ -99,9 +99,7 @@ def get_result_from_inner_blocks(self, x: Tensor, idx: int) -> Tensor: This is equivalent to self.inner_blocks[idx](x), but torchscript doesn't support this yet """ - num_blocks = 0 - for m in self.inner_blocks: - num_blocks += 1 + num_blocks = len(self.inner_blocks) if idx < 0: idx += num_blocks i = 0 @@ -117,9 +115,7 @@ def get_result_from_layer_blocks(self, x: Tensor, idx: int) -> Tensor: This is equivalent to self.layer_blocks[idx](x), but torchscript doesn't support this yet """ - num_blocks = 0 - for m in self.layer_blocks: - num_blocks += 1 + num_blocks = len(self.layer_blocks) if idx < 0: idx += num_blocks i = 0 diff --git a/torchvision/transforms/functional_tensor.py b/torchvision/transforms/functional_tensor.py index c97fcf44fe7..21458b3bdab 100644 --- a/torchvision/transforms/functional_tensor.py +++ b/torchvision/transforms/functional_tensor.py @@ -45,7 +45,7 @@ def _max_value(dtype: torch.dtype) -> float: max_value = next_value bits *= 2 else: - return max_value.item() + break return max_value.item()