Skip to content

Commit

Permalink
[*.py] Rename "Arguments:" to "Args:" (#3203)
Browse files Browse the repository at this point in the history
Co-authored-by: Vasilis Vryniotis <[email protected]>
  • Loading branch information
SamuelMarks and datumbox authored Dec 22, 2020
1 parent ca6fdd6 commit 3d60f49
Show file tree
Hide file tree
Showing 28 changed files with 73 additions and 73 deletions.
2 changes: 1 addition & 1 deletion references/detection/group_by_aspect_ratio.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ class GroupedBatchSampler(BatchSampler):
It enforces that the batch only contain elements from the same group.
It also tries to provide mini-batches which follows an ordering which is
as close as possible to the ordering from the original sampler.
Arguments:
Args:
sampler (Sampler): Base sampler.
group_ids (list[int]): If the sampler produces indices in range [0, N),
`group_ids` must be a list of `N` ints which contains the group id of each sample.
Expand Down
4 changes: 2 additions & 2 deletions torchvision/datasets/samplers/clip_sampler.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ class UniformClipSampler(Sampler):
When number of unique clips in the video is fewer than num_video_clips_per_video,
repeat the clips until `num_video_clips_per_video` clips are collected
Arguments:
Args:
video_clips (VideoClips): video clips to sample from
num_clips_per_video (int): number of clips to be sampled per video
"""
Expand Down Expand Up @@ -151,7 +151,7 @@ class RandomClipSampler(Sampler):
"""
Samples at most `max_video_clips_per_video` clips for each video randomly
Arguments:
Args:
video_clips (VideoClips): video clips to sample from
max_clips_per_video (int): maximum number of clips to be sampled per video
"""
Expand Down
6 changes: 3 additions & 3 deletions torchvision/datasets/video_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ class VideoClips(object):
Recreating the clips for different clip lengths is fast, and can be done
with the `compute_clips` method.
Arguments:
Args:
video_paths (List[str]): paths to the video files
clip_length_in_frames (int): size of a clip in number of frames
frames_between_clips (int): step (in frames) between each clip
Expand Down Expand Up @@ -227,7 +227,7 @@ def compute_clips(self, num_frames, step, frame_rate=None):
Always returns clips of size `num_frames`, meaning that the
last few frames in a video can potentially be dropped.
Arguments:
Args:
num_frames (int): number of frames for the clip
step (int): distance between two clips
"""
Expand Down Expand Up @@ -285,7 +285,7 @@ def get_clip(self, idx):
"""
Gets a subclip from a list of videos.
Arguments:
Args:
idx (int): index of the subclip. Must be between 0 and num_clips().
Returns:
Expand Down
8 changes: 4 additions & 4 deletions torchvision/io/image.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ def read_file(path: str) -> torch.Tensor:
Reads and outputs the bytes contents of a file as a uint8 Tensor
with one dimension.
Arguments:
Args:
path (str): the path to the file to be read
Returns:
Expand All @@ -86,7 +86,7 @@ def write_file(filename: str, data: torch.Tensor) -> None:
Writes the contents of a uint8 tensor with one dimension to a
file.
Arguments:
Args:
filename (str): the path to the file to be written
data (Tensor): the contents to be written to the output file
"""
Expand All @@ -99,7 +99,7 @@ def decode_png(input: torch.Tensor, mode: ImageReadMode = ImageReadMode.UNCHANGE
Optionally converts the image to the desired format.
The values of the output tensor are uint8 between 0 and 255.
Arguments:
Args:
input (Tensor[1]): a one dimensional uint8 tensor containing
the raw bytes of the PNG image.
mode (ImageReadMode): the read mode used for optionally
Expand Down Expand Up @@ -162,7 +162,7 @@ def decode_jpeg(input: torch.Tensor, mode: ImageReadMode = ImageReadMode.UNCHANG
Optionally converts the image to the desired format.
The values of the output tensor are uint8 between 0 and 255.
Arguments:
Args:
input (Tensor[1]): a one dimensional uint8 tensor containing
the raw bytes of the JPEG image.
mode (ImageReadMode): the read mode used for optionally
Expand Down
2 changes: 1 addition & 1 deletion torchvision/models/_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ class IntermediateLayerGetter(nn.ModuleDict):
assigned to the model. So if `model` is passed, `model.feature1` can
be returned, but not `model.feature1.layer2`.
Arguments:
Args:
model (nn.Module): model on which we will extract the features
return_layers (Dict[name, new_name]): a dict containing the names
of the modules for which the activations will be returned as
Expand Down
14 changes: 7 additions & 7 deletions torchvision/models/detection/_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ class BalancedPositiveNegativeSampler(object):
def __init__(self, batch_size_per_image, positive_fraction):
# type: (int, float) -> None
"""
Arguments:
Args:
batch_size_per_image (int): number of elements to be selected per image
positive_fraction (float): percentace of positive elements per batch
"""
Expand All @@ -25,7 +25,7 @@ def __init__(self, batch_size_per_image, positive_fraction):
def __call__(self, matched_idxs):
# type: (List[Tensor]) -> Tuple[List[Tensor], List[Tensor]]
"""
Arguments:
Args:
matched idxs: list of tensors containing -1, 0 or positive values.
Each tensor corresponds to a specific image.
-1 values are ignored, 0 are considered as negatives and > 0 as
Expand Down Expand Up @@ -83,7 +83,7 @@ def encode_boxes(reference_boxes, proposals, weights):
Encode a set of proposals with respect to some
reference boxes
Arguments:
Args:
reference_boxes (Tensor): reference boxes
proposals (Tensor): boxes to be encoded
"""
Expand Down Expand Up @@ -133,7 +133,7 @@ class BoxCoder(object):
def __init__(self, weights, bbox_xform_clip=math.log(1000. / 16)):
# type: (Tuple[float, float, float, float], float) -> None
"""
Arguments:
Args:
weights (4-element tuple)
bbox_xform_clip (float)
"""
Expand All @@ -153,7 +153,7 @@ def encode_single(self, reference_boxes, proposals):
Encode a set of proposals with respect to some
reference boxes
Arguments:
Args:
reference_boxes (Tensor): reference boxes
proposals (Tensor): boxes to be encoded
"""
Expand Down Expand Up @@ -183,7 +183,7 @@ def decode_single(self, rel_codes, boxes):
From a set of original boxes and encoded relative box offsets,
get the decoded boxes.
Arguments:
Args:
rel_codes (Tensor): encoded boxes
boxes (Tensor): reference boxes.
"""
Expand Down Expand Up @@ -361,7 +361,7 @@ def overwrite_eps(model, eps):
only when the pretrained weights are loaded to maintain compatibility
with previous versions.
Arguments:
Args:
model (nn.Module): The model on which we perform the overwrite.
eps (float): The new value of eps.
"""
Expand Down
2 changes: 1 addition & 1 deletion torchvision/models/detection/anchor_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ class AnchorGenerator(nn.Module):
and AnchorGenerator will output a set of sizes[i] * aspect_ratios[i] anchors
per spatial location for feature map i.
Arguments:
Args:
sizes (Tuple[Tuple[int]]):
aspect_ratios (Tuple[Tuple[float]]):
"""
Expand Down
4 changes: 2 additions & 2 deletions torchvision/models/detection/backbone_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ class BackboneWithFPN(nn.Module):
Internally, it uses torchvision.models._utils.IntermediateLayerGetter to
extract a submodel that returns the feature maps specified in return_layers.
The same limitations of IntermediatLayerGetter apply here.
Arguments:
Args:
backbone (nn.Module)
return_layers (Dict[name, new_name]): a dict containing the names
of the modules for which the activations will be returned as
Expand Down Expand Up @@ -73,7 +73,7 @@ def resnet_fpn_backbone(
>>> ('3', torch.Size([1, 256, 2, 2])),
>>> ('pool', torch.Size([1, 256, 1, 1]))]
Arguments:
Args:
backbone_name (string): resnet architecture. Possible values are 'ResNet', 'resnet18', 'resnet34', 'resnet50',
'resnet101', 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d', 'wide_resnet50_2', 'wide_resnet101_2'
norm_layer (torchvision.ops): it is recommended to use the default value. For details visit:
Expand Down
8 changes: 4 additions & 4 deletions torchvision/models/detection/faster_rcnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ class FasterRCNN(GeneralizedRCNN):
- labels (Int64Tensor[N]): the predicted labels for each image
- scores (Tensor[N]): the scores or each prediction
Arguments:
Args:
backbone (nn.Module): the network used to compute the features for the model.
It should contain a out_channels attribute, which indicates the number of output
channels that each feature map has (and it should be the same for all feature maps).
Expand Down Expand Up @@ -239,7 +239,7 @@ class TwoMLPHead(nn.Module):
"""
Standard heads for FPN-based models
Arguments:
Args:
in_channels (int): number of input channels
representation_size (int): size of the intermediate representation
"""
Expand All @@ -264,7 +264,7 @@ class FastRCNNPredictor(nn.Module):
Standard classification + bounding box regression layers
for Fast R-CNN.
Arguments:
Args:
in_channels (int): number of input channels
num_classes (int): number of output classes (including background)
"""
Expand Down Expand Up @@ -341,7 +341,7 @@ def fasterrcnn_resnet50_fpn(pretrained=False, progress=True,
>>> # optionally, if you want to export the model to ONNX:
>>> torch.onnx.export(model, x, "faster_rcnn.onnx", opset_version = 11)
Arguments:
Args:
pretrained (bool): If True, returns a model pre-trained on COCO train2017
progress (bool): If True, displays a progress bar of the download to stderr
pretrained_backbone (bool): If True, returns a model with backbone pre-trained on Imagenet
Expand Down
4 changes: 2 additions & 2 deletions torchvision/models/detection/generalized_rcnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ class GeneralizedRCNN(nn.Module):
"""
Main class for Generalized R-CNN.
Arguments:
Args:
backbone (nn.Module):
rpn (nn.Module):
roi_heads (nn.Module): takes the features + the proposals from the RPN and computes
Expand Down Expand Up @@ -43,7 +43,7 @@ def eager_outputs(self, losses, detections):
def forward(self, images, targets=None):
# type: (List[Tensor], Optional[List[Dict[str, Tensor]]]) -> Tuple[Dict[str, Tensor], List[Dict[str, Tensor]]]
"""
Arguments:
Args:
images (list[Tensor]): images to be processed
targets (list[Dict[Tensor]]): ground-truth boxes present in the image (optional)
Expand Down
2 changes: 1 addition & 1 deletion torchvision/models/detection/image_list.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ class ImageList(object):

def __init__(self, tensors: Tensor, image_sizes: List[Tuple[int, int]]):
"""
Arguments:
Args:
tensors (tensor)
image_sizes (list[tuple[int, int]])
"""
Expand Down
4 changes: 2 additions & 2 deletions torchvision/models/detection/keypoint_rcnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ class KeypointRCNN(FasterRCNN):
- scores (Tensor[N]): the scores or each prediction
- keypoints (FloatTensor[N, K, 3]): the locations of the predicted keypoints, in [x, y, v] format.
Arguments:
Args:
backbone (nn.Module): the network used to compute the features for the model.
It should contain a out_channels attribute, which indicates the number of output
channels that each feature map has (and it should be the same for all feature maps).
Expand Down Expand Up @@ -309,7 +309,7 @@ def keypointrcnn_resnet50_fpn(pretrained=False, progress=True,
>>> # optionally, if you want to export the model to ONNX:
>>> torch.onnx.export(model, x, "keypoint_rcnn.onnx", opset_version = 11)
Arguments:
Args:
pretrained (bool): If True, returns a model pre-trained on COCO train2017
progress (bool): If True, displays a progress bar of the download to stderr
pretrained_backbone (bool): If True, returns a model with backbone pre-trained on Imagenet
Expand Down
6 changes: 3 additions & 3 deletions torchvision/models/detection/mask_rcnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ class MaskRCNN(FasterRCNN):
obtain the final segmentation masks, the soft masks can be thresholded, generally
with a value of 0.5 (mask >= 0.5)
Arguments:
Args:
backbone (nn.Module): the network used to compute the features for the model.
It should contain a out_channels attribute, which indicates the number of output
channels that each feature map has (and it should be the same for all feature maps).
Expand Down Expand Up @@ -222,7 +222,7 @@ def __init__(self, backbone, num_classes=None,
class MaskRCNNHeads(nn.Sequential):
def __init__(self, in_channels, layers, dilation):
"""
Arguments:
Args:
in_channels (int): number of input channels
layers (list): feature dimensions of each FCN layer
dilation (int): dilation rate of kernel
Expand Down Expand Up @@ -308,7 +308,7 @@ def maskrcnn_resnet50_fpn(pretrained=False, progress=True,
>>> # optionally, if you want to export the model to ONNX:
>>> torch.onnx.export(model, x, "mask_rcnn.onnx", opset_version = 11)
Arguments:
Args:
pretrained (bool): If True, returns a model pre-trained on COCO train2017
progress (bool): If True, displays a progress bar of the download to stderr
pretrained_backbone (bool): If True, returns a model with backbone pre-trained on Imagenet
Expand Down
12 changes: 6 additions & 6 deletions torchvision/models/detection/retinanet.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ class RetinaNetHead(nn.Module):
"""
A regression and classification head for use in RetinaNet.
Arguments:
Args:
in_channels (int): number of channels of the input feature
num_anchors (int): number of anchors to be predicted
num_classes (int): number of classes to be predicted
Expand Down Expand Up @@ -64,7 +64,7 @@ class RetinaNetClassificationHead(nn.Module):
"""
A classification head for use in RetinaNet.
Arguments:
Args:
in_channels (int): number of channels of the input feature
num_anchors (int): number of anchors to be predicted
num_classes (int): number of classes to be predicted
Expand Down Expand Up @@ -149,7 +149,7 @@ class RetinaNetRegressionHead(nn.Module):
"""
A regression head for use in RetinaNet.
Arguments:
Args:
in_channels (int): number of channels of the input feature
num_anchors (int): number of anchors to be predicted
"""
Expand Down Expand Up @@ -251,7 +251,7 @@ class RetinaNet(nn.Module):
- labels (Int64Tensor[N]): the predicted labels for each image
- scores (Tensor[N]): the scores for each prediction
Arguments:
Args:
backbone (nn.Module): the network used to compute the features for the model.
It should contain an out_channels attribute, which indicates the number of output
channels that each feature map has (and it should be the same for all feature maps).
Expand Down Expand Up @@ -457,7 +457,7 @@ def postprocess_detections(self, head_outputs, anchors, image_shapes):
def forward(self, images, targets=None):
# type: (List[Tensor], Optional[List[Dict[str, Tensor]]]) -> Tuple[Dict[str, Tensor], List[Dict[str, Tensor]]]
"""
Arguments:
Args:
images (list[Tensor]): images to be processed
targets (list[Dict[Tensor]]): ground-truth boxes present in the image (optional)
Expand Down Expand Up @@ -597,7 +597,7 @@ def retinanet_resnet50_fpn(pretrained=False, progress=True,
>>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
>>> predictions = model(x)
Arguments:
Args:
pretrained (bool): If True, returns a model pre-trained on COCO train2017
progress (bool): If True, displays a progress bar of the download to stderr
"""
Expand Down
8 changes: 4 additions & 4 deletions torchvision/models/detection/roi_heads.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ def fastrcnn_loss(class_logits, box_regression, labels, regression_targets):
"""
Computes the loss for Faster R-CNN.
Arguments:
Args:
class_logits (Tensor)
box_regression (Tensor)
labels (list[BoxList])
Expand Down Expand Up @@ -61,7 +61,7 @@ def maskrcnn_inference(x, labels):
probability (which are of fixed size and directly output
by the CNN) and return the masks in the mask field of the BoxList.
Arguments:
Args:
x (Tensor): the mask logits
labels (list[BoxList]): bounding boxes that are used as
reference, one for ech image
Expand Down Expand Up @@ -101,7 +101,7 @@ def project_masks_on_boxes(gt_masks, boxes, matched_idxs, M):
def maskrcnn_loss(mask_logits, proposals, gt_masks, gt_labels, mask_matched_idxs):
# type: (Tensor, List[Tensor], List[Tensor], List[Tensor], List[Tensor]) -> Tensor
"""
Arguments:
Args:
proposals (list[BoxList])
mask_logits (Tensor)
targets (list[BoxList])
Expand Down Expand Up @@ -727,7 +727,7 @@ def forward(self,
):
# type: (...) -> Tuple[List[Dict[str, Tensor]], Dict[str, Tensor]]
"""
Arguments:
Args:
features (List[Tensor])
proposals (List[Tensor[N, 4]])
image_shapes (List[Tuple[H, W]])
Expand Down
Loading

0 comments on commit 3d60f49

Please sign in to comment.