From 52b605dd5b400b352cea445be363d1e7402c3375 Mon Sep 17 00:00:00 2001
From: Jirka Borovec
Date: Thu, 28 Oct 2021 18:35:01 +0200
Subject: [PATCH] Add pre-commit CI actions (#4982)
* define pre-commit
* add CI code
* configure
* apply pre-commit
* fstring
* apply MD
* pre-commit
* Update torch_utils.py
* Update print strings
* notes
* Cleanup code-format.yml
* Update setup.cfg
* Update .pre-commit-config.yaml
Co-authored-by: Glenn Jocher
---
.github/ISSUE_TEMPLATE/feature-request.md | 2 +-
.github/workflows/ci-testing.yml | 2 +-
.github/workflows/code-format.yml | 47 ++++++++++++++++
.github/workflows/codeql-analysis.yml | 2 +-
.github/workflows/greetings.yml | 1 -
.gitignore | 1 +
.pre-commit-config.yaml | 67 +++++++++++++++++++++++
LICENSE | 2 +-
README.md | 10 ++--
data/Objects365.yaml | 12 ++--
data/coco128.yaml | 2 +-
data/hyps/hyp.scratch-high.yaml | 2 +-
data/hyps/hyp.scratch-low.yaml | 2 +-
models/common.py | 10 ++--
models/experimental.py | 1 -
models/hub/yolov5-bifpn.yaml | 2 +-
models/tf.py | 40 +++++++-------
models/yolo.py | 10 ++--
setup.cfg | 45 +++++++++++++++
tutorial.ipynb | 2 +-
utils/datasets.py | 24 ++++----
utils/general.py | 4 +-
utils/google_app_engine/app.yaml | 2 +-
utils/loggers/__init__.py | 2 +-
utils/loggers/wandb/README.md | 32 +++++------
utils/loggers/wandb/sweep.yaml | 10 ++--
utils/loggers/wandb/wandb_utils.py | 30 +++++-----
utils/loss.py | 6 +-
utils/plots.py | 8 +--
utils/torch_utils.py | 6 +-
30 files changed, 273 insertions(+), 113 deletions(-)
create mode 100644 .github/workflows/code-format.yml
create mode 100644 .pre-commit-config.yaml
create mode 100644 setup.cfg
diff --git a/.github/ISSUE_TEMPLATE/feature-request.md b/.github/ISSUE_TEMPLATE/feature-request.md
index 1fdf99045488..994f506e0f09 100644
--- a/.github/ISSUE_TEMPLATE/feature-request.md
+++ b/.github/ISSUE_TEMPLATE/feature-request.md
@@ -13,7 +13,7 @@ assignees: ''
## Motivation
-
## Pitch
diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml
index 6d1603880f4d..8ebfdeca8d74 100644
--- a/.github/workflows/ci-testing.yml
+++ b/.github/workflows/ci-testing.yml
@@ -83,7 +83,7 @@ jobs:
# Python
python - <> $GITHUB_ENV
+ - uses: actions/cache@v2
+ with:
+ path: ~/.cache/pre-commit
+ key: pre-commit|${{ env.PY }}|${{ hashFiles('.pre-commit-config.yaml') }}
+
+ - uses: pre-commit/action@v2.0.3
+ # this action also provides an additional behaviour when used in private repositories
+ # when configured with a github token, the action will push back fixes to the pull request branch
+ with:
+ token: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml
index 2305ea07e902..67f51f0e8bce 100644
--- a/.github/workflows/codeql-analysis.yml
+++ b/.github/workflows/codeql-analysis.yml
@@ -1,4 +1,4 @@
-# This action runs GitHub's industry-leading static analysis engine, CodeQL, against a repository's source code to find security vulnerabilities.
+# This action runs GitHub's industry-leading static analysis engine, CodeQL, against a repository's source code to find security vulnerabilities.
# https://github.com/github/codeql-action
name: "CodeQL"
diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml
index a00ee8da66e1..0daf9514d3c5 100644
--- a/.github/workflows/greetings.yml
+++ b/.github/workflows/greetings.yml
@@ -57,4 +57,3 @@ jobs:
If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), validation ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on MacOS, Windows, and Ubuntu every 24 hours and on every commit.
-
diff --git a/.gitignore b/.gitignore
index 375b71807588..5f8cab550021 100755
--- a/.gitignore
+++ b/.gitignore
@@ -20,6 +20,7 @@
*.data
*.json
*.cfg
+!setup.cfg
!cfg/yolov3*.cfg
storage.googleapis.com
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
new file mode 100644
index 000000000000..2eb78aa17ef4
--- /dev/null
+++ b/.pre-commit-config.yaml
@@ -0,0 +1,67 @@
+# Define hooks for code formations
+# Will be applied on any updated commit files if a user has installed and linked commit hook
+
+default_language_version:
+ python: python3.8
+
+# Define bot property if installed via https://github.com/marketplace/pre-commit-ci
+ci:
+ autofix_prs: true
+ autoupdate_commit_msg: '[pre-commit.ci] pre-commit suggestions'
+ autoupdate_schedule: quarterly
+ # submodules: true
+
+repos:
+ - repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v4.0.1
+ hooks:
+ - id: end-of-file-fixer
+ - id: trailing-whitespace
+ - id: check-case-conflict
+ - id: check-yaml
+ - id: check-toml
+ - id: pretty-format-json
+ - id: check-docstring-first
+
+ - repo: https://github.com/asottile/pyupgrade
+ rev: v2.23.1
+ hooks:
+ - id: pyupgrade
+ args: [--py36-plus]
+ name: Upgrade code
+
+ # TODO
+ #- repo: https://github.com/PyCQA/isort
+ # rev: 5.9.3
+ # hooks:
+ # - id: isort
+ # name: imports
+
+ # TODO
+ #- repo: https://github.com/pre-commit/mirrors-yapf
+ # rev: v0.31.0
+ # hooks:
+ # - id: yapf
+ # name: formatting
+
+ # TODO
+ #- repo: https://github.com/executablebooks/mdformat
+ # rev: 0.7.7
+ # hooks:
+ # - id: mdformat
+ # additional_dependencies:
+ # - mdformat-gfm
+ # - mdformat-black
+ # - mdformat_frontmatter
+
+ # TODO
+ #- repo: https://github.com/asottile/yesqa
+ # rev: v1.2.3
+ # hooks:
+ # - id: yesqa
+
+ - repo: https://github.com/PyCQA/flake8
+ rev: 3.9.2
+ hooks:
+ - id: flake8
+ name: PEP8
diff --git a/LICENSE b/LICENSE
index 9e419e042146..92b370f0e0e1 100644
--- a/LICENSE
+++ b/LICENSE
@@ -671,4 +671,4 @@ into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
-.
\ No newline at end of file
+.
diff --git a/README.md b/README.md
index 0d474cb4a09b..d3fd7e9a92f5 100644
--- a/README.md
+++ b/README.md
@@ -46,7 +46,7 @@ YOLOv5 🚀 is a family of object detection architectures and models pretrained
open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development.
-
@@ -109,7 +109,7 @@ the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases) and
```bash
$ python detect.py --source 0 # webcam
- file.jpg # image
+ file.jpg # image
file.mp4 # video
path/ # directory
path/*.jpg # glob
@@ -136,7 +136,7 @@ $ python train.py --data coco.yaml --cfg yolov5s.yaml --weights '' --batch-size
-
+
Tutorials
@@ -178,7 +178,7 @@ Get started in seconds with our verified environments. Click each icon below for
-
+
## Integrations
@@ -239,7 +239,7 @@ We are super excited about our first-ever Ultralytics YOLOv5 🚀 EXPORT Competi
|[YOLOv5s6][assets] |1280 |44.5 |63.0 |385 |8.2 |3.6 |16.8 |12.6
|[YOLOv5m6][assets] |1280 |51.0 |69.0 |887 |11.1 |6.8 |35.7 |50.0
|[YOLOv5l6][assets] |1280 |53.6 |71.6 |1784 |15.8 |10.5 |76.8 |111.4
-|[YOLOv5x6][assets]
+ [TTA][TTA]|1280
1536 |54.7
**55.4** |**72.4**
72.3 |3136
- |26.2
- |19.4
- |140.7
- |209.8
-
+|[YOLOv5x6][assets]
+ [TTA][TTA]|1280
1536 |54.7
**55.4** |**72.4**
72.3 |3136
- |26.2
- |19.4
- |140.7
- |209.8
-
Table Notes (click to expand)
diff --git a/data/Objects365.yaml b/data/Objects365.yaml
index 97a424fd03a0..b10c28e764c1 100644
--- a/data/Objects365.yaml
+++ b/data/Objects365.yaml
@@ -62,21 +62,21 @@ names: ['Person', 'Sneakers', 'Chair', 'Other Shoes', 'Hat', 'Car', 'Lamp', 'Gla
download: |
from pycocotools.coco import COCO
from tqdm import tqdm
-
+
from utils.general import Path, download, np, xyxy2xywhn
-
+
# Make Directories
dir = Path(yaml['path']) # dataset root dir
for p in 'images', 'labels':
(dir / p).mkdir(parents=True, exist_ok=True)
for q in 'train', 'val':
(dir / p / q).mkdir(parents=True, exist_ok=True)
-
+
# Train, Val Splits
for split, patches in [('train', 50 + 1), ('val', 43 + 1)]:
print(f"Processing {split} in {patches} patches ...")
images, labels = dir / 'images' / split, dir / 'labels' / split
-
+
# Download
url = f"https://dorc.ks3-cn-beijing.ksyun.com/data-set/2020Objects365%E6%95%B0%E6%8D%AE%E9%9B%86/{split}/"
if split == 'train':
@@ -86,11 +86,11 @@ download: |
download([f'{url}zhiyuan_objv2_{split}.json'], dir=dir, delete=False) # annotations json
download([f'{url}images/v1/patch{i}.tar.gz' for i in range(15 + 1)], dir=images, curl=True, delete=False, threads=8)
download([f'{url}images/v2/patch{i}.tar.gz' for i in range(16, patches)], dir=images, curl=True, delete=False, threads=8)
-
+
# Move
for f in tqdm(images.rglob('*.jpg'), desc=f'Moving {split} images'):
f.rename(images / f.name) # move to /images/{split}
-
+
# Labels
coco = COCO(dir / f'zhiyuan_objv2_{split}.json')
names = [x["name"] for x in coco.loadCats(coco.getCatIds())]
diff --git a/data/coco128.yaml b/data/coco128.yaml
index 70cf52c397af..b1dfb004afa1 100644
--- a/data/coco128.yaml
+++ b/data/coco128.yaml
@@ -27,4 +27,4 @@ names: ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 't
# Download script/URL (optional)
-download: https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip
\ No newline at end of file
+download: https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip
diff --git a/data/hyps/hyp.scratch-high.yaml b/data/hyps/hyp.scratch-high.yaml
index 519c82687e09..5a586cc63fae 100644
--- a/data/hyps/hyp.scratch-high.yaml
+++ b/data/hyps/hyp.scratch-high.yaml
@@ -31,4 +31,4 @@ flipud: 0.0 # image flip up-down (probability)
fliplr: 0.5 # image flip left-right (probability)
mosaic: 1.0 # image mosaic (probability)
mixup: 0.1 # image mixup (probability)
-copy_paste: 0.1 # segment copy-paste (probability)
\ No newline at end of file
+copy_paste: 0.1 # segment copy-paste (probability)
diff --git a/data/hyps/hyp.scratch-low.yaml b/data/hyps/hyp.scratch-low.yaml
index b093a95ac53b..b9ef1d55a3b6 100644
--- a/data/hyps/hyp.scratch-low.yaml
+++ b/data/hyps/hyp.scratch-low.yaml
@@ -31,4 +31,4 @@ flipud: 0.0 # image flip up-down (probability)
fliplr: 0.5 # image flip left-right (probability)
mosaic: 1.0 # image mosaic (probability)
mixup: 0.0 # image mixup (probability)
-copy_paste: 0.0 # segment copy-paste (probability)
\ No newline at end of file
+copy_paste: 0.0 # segment copy-paste (probability)
diff --git a/models/common.py b/models/common.py
index 5da35690a4ec..d0fb0e8596ed 100644
--- a/models/common.py
+++ b/models/common.py
@@ -79,7 +79,7 @@ def __init__(self, c1, c2, num_heads, num_layers):
if c1 != c2:
self.conv = Conv(c1, c2)
self.linear = nn.Linear(c2, c2) # learnable position embedding
- self.tr = nn.Sequential(*[TransformerLayer(c2, num_heads) for _ in range(num_layers)])
+ self.tr = nn.Sequential(*(TransformerLayer(c2, num_heads) for _ in range(num_layers)))
self.c2 = c2
def forward(self, x):
@@ -114,7 +114,7 @@ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, nu
self.cv4 = Conv(2 * c_, c2, 1, 1)
self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)
self.act = nn.LeakyReLU(0.1, inplace=True)
- self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
+ self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)))
def forward(self, x):
y1 = self.cv3(self.m(self.cv1(x)))
@@ -130,7 +130,7 @@ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, nu
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c1, c_, 1, 1)
self.cv3 = Conv(2 * c_, c2, 1) # act=FReLU(c2)
- self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
+ self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)))
# self.m = nn.Sequential(*[CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)])
def forward(self, x):
@@ -158,7 +158,7 @@ class C3Ghost(C3):
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
super().__init__(c1, c2, n, shortcut, g, e)
c_ = int(c2 * e) # hidden channels
- self.m = nn.Sequential(*[GhostBottleneck(c_, c_) for _ in range(n)])
+ self.m = nn.Sequential(*(GhostBottleneck(c_, c_) for _ in range(n)))
class SPP(nn.Module):
@@ -362,7 +362,7 @@ class Detections:
def __init__(self, imgs, pred, files, times=None, names=None, shape=None):
super().__init__()
d = pred[0].device # device
- gn = [torch.tensor([*[im.shape[i] for i in [1, 0, 1, 0]], 1., 1.], device=d) for im in imgs] # normalizations
+ gn = [torch.tensor([*(im.shape[i] for i in [1, 0, 1, 0]), 1., 1.], device=d) for im in imgs] # normalizations
self.imgs = imgs # list of images as numpy arrays
self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls)
self.names = names # class names
diff --git a/models/experimental.py b/models/experimental.py
index edccc9632fb5..adb86c81fc06 100644
--- a/models/experimental.py
+++ b/models/experimental.py
@@ -97,7 +97,6 @@ def attempt_load(weights, map_location=None, inplace=True, fuse=True):
else:
model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().eval()) # without layer fuse
-
# Compatibility updates
for m in model.modules():
if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model]:
diff --git a/models/hub/yolov5-bifpn.yaml b/models/hub/yolov5-bifpn.yaml
index 119aebb1523a..2f2c82c70122 100644
--- a/models/hub/yolov5-bifpn.yaml
+++ b/models/hub/yolov5-bifpn.yaml
@@ -18,7 +18,7 @@ backbone:
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
[-1, 9, C3, [256]],
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
- [-1, 9, C3, [512]]
+ [-1, 9, C3, [512]],
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
[-1, 1, SPP, [1024, [5, 9, 13]]],
[-1, 3, C3, [1024, False]], # 9
diff --git a/models/tf.py b/models/tf.py
index 1c6da43adaac..5599ff5cce91 100644
--- a/models/tf.py
+++ b/models/tf.py
@@ -40,7 +40,7 @@
class TFBN(keras.layers.Layer):
# TensorFlow BatchNormalization wrapper
def __init__(self, w=None):
- super(TFBN, self).__init__()
+ super().__init__()
self.bn = keras.layers.BatchNormalization(
beta_initializer=keras.initializers.Constant(w.bias.numpy()),
gamma_initializer=keras.initializers.Constant(w.weight.numpy()),
@@ -54,7 +54,7 @@ def call(self, inputs):
class TFPad(keras.layers.Layer):
def __init__(self, pad):
- super(TFPad, self).__init__()
+ super().__init__()
self.pad = tf.constant([[0, 0], [pad, pad], [pad, pad], [0, 0]])
def call(self, inputs):
@@ -65,7 +65,7 @@ class TFConv(keras.layers.Layer):
# Standard convolution
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None):
# ch_in, ch_out, weights, kernel, stride, padding, groups
- super(TFConv, self).__init__()
+ super().__init__()
assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument"
assert isinstance(k, int), "Convolution with multiple kernels are not allowed."
# TensorFlow convolution padding is inconsistent with PyTorch (e.g. k=3 s=2 'SAME' padding)
@@ -96,7 +96,7 @@ class TFFocus(keras.layers.Layer):
# Focus wh information into c-space
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None):
# ch_in, ch_out, kernel, stride, padding, groups
- super(TFFocus, self).__init__()
+ super().__init__()
self.conv = TFConv(c1 * 4, c2, k, s, p, g, act, w.conv)
def call(self, inputs): # x(b,w,h,c) -> y(b,w/2,h/2,4c)
@@ -110,7 +110,7 @@ def call(self, inputs): # x(b,w,h,c) -> y(b,w/2,h/2,4c)
class TFBottleneck(keras.layers.Layer):
# Standard bottleneck
def __init__(self, c1, c2, shortcut=True, g=1, e=0.5, w=None): # ch_in, ch_out, shortcut, groups, expansion
- super(TFBottleneck, self).__init__()
+ super().__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
self.cv2 = TFConv(c_, c2, 3, 1, g=g, w=w.cv2)
@@ -123,7 +123,7 @@ def call(self, inputs):
class TFConv2d(keras.layers.Layer):
# Substitution for PyTorch nn.Conv2D
def __init__(self, c1, c2, k, s=1, g=1, bias=True, w=None):
- super(TFConv2d, self).__init__()
+ super().__init__()
assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument"
self.conv = keras.layers.Conv2D(
c2, k, s, 'VALID', use_bias=bias,
@@ -138,7 +138,7 @@ class TFBottleneckCSP(keras.layers.Layer):
# CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):
# ch_in, ch_out, number, shortcut, groups, expansion
- super(TFBottleneckCSP, self).__init__()
+ super().__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
self.cv2 = TFConv2d(c1, c_, 1, 1, bias=False, w=w.cv2)
@@ -158,7 +158,7 @@ class TFC3(keras.layers.Layer):
# CSP Bottleneck with 3 convolutions
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):
# ch_in, ch_out, number, shortcut, groups, expansion
- super(TFC3, self).__init__()
+ super().__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
self.cv2 = TFConv(c1, c_, 1, 1, w=w.cv2)
@@ -172,7 +172,7 @@ def call(self, inputs):
class TFSPP(keras.layers.Layer):
# Spatial pyramid pooling layer used in YOLOv3-SPP
def __init__(self, c1, c2, k=(5, 9, 13), w=None):
- super(TFSPP, self).__init__()
+ super().__init__()
c_ = c1 // 2 # hidden channels
self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
self.cv2 = TFConv(c_ * (len(k) + 1), c2, 1, 1, w=w.cv2)
@@ -186,7 +186,7 @@ def call(self, inputs):
class TFSPPF(keras.layers.Layer):
# Spatial pyramid pooling-Fast layer
def __init__(self, c1, c2, k=5, w=None):
- super(TFSPPF, self).__init__()
+ super().__init__()
c_ = c1 // 2 # hidden channels
self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
self.cv2 = TFConv(c_ * 4, c2, 1, 1, w=w.cv2)
@@ -201,7 +201,7 @@ def call(self, inputs):
class TFDetect(keras.layers.Layer):
def __init__(self, nc=80, anchors=(), ch=(), imgsz=(640, 640), w=None): # detection layer
- super(TFDetect, self).__init__()
+ super().__init__()
self.stride = tf.convert_to_tensor(w.stride.numpy(), dtype=tf.float32)
self.nc = nc # number of classes
self.no = nc + 5 # number of outputs per anchor
@@ -249,7 +249,7 @@ def _make_grid(nx=20, ny=20):
class TFUpsample(keras.layers.Layer):
def __init__(self, size, scale_factor, mode, w=None): # warning: all arguments needed including 'w'
- super(TFUpsample, self).__init__()
+ super().__init__()
assert scale_factor == 2, "scale_factor must be 2"
self.upsample = lambda x: tf.image.resize(x, (x.shape[1] * 2, x.shape[2] * 2), method=mode)
# self.upsample = keras.layers.UpSampling2D(size=scale_factor, interpolation=mode)
@@ -263,7 +263,7 @@ def call(self, inputs):
class TFConcat(keras.layers.Layer):
def __init__(self, dimension=1, w=None):
- super(TFConcat, self).__init__()
+ super().__init__()
assert dimension == 1, "convert only NCHW to NHWC concat"
self.d = 3
@@ -272,7 +272,7 @@ def call(self, inputs):
def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3)
- LOGGER.info('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments'))
+ LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}")
anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple']
na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors
no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
@@ -299,7 +299,7 @@ def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3)
elif m is nn.BatchNorm2d:
args = [ch[f]]
elif m is Concat:
- c2 = sum([ch[-1 if x == -1 else x + 1] for x in f])
+ c2 = sum(ch[-1 if x == -1 else x + 1] for x in f)
elif m is Detect:
args.append([ch[x + 1] for x in f])
if isinstance(args[1], int): # number of anchors
@@ -312,11 +312,11 @@ def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3)
m_ = keras.Sequential([tf_m(*args, w=model.model[i][j]) for j in range(n)]) if n > 1 \
else tf_m(*args, w=model.model[i]) # module
- torch_m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) # module
+ torch_m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module
t = str(m)[8:-2].replace('__main__.', '') # module type
- np = sum([x.numel() for x in torch_m_.parameters()]) # number params
+ np = sum(x.numel() for x in torch_m_.parameters()) # number params
m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params
- LOGGER.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print
+ LOGGER.info(f'{i:>3}{str(f):>18}{str(n):>3}{np:>10} {t:<40}{str(args):<30}') # print
save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
layers.append(m_)
ch.append(c2)
@@ -325,7 +325,7 @@ def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3)
class TFModel:
def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, model=None, imgsz=(640, 640)): # model, channels, classes
- super(TFModel, self).__init__()
+ super().__init__()
if isinstance(cfg, dict):
self.yaml = cfg # model dict
else: # is *.yaml
@@ -336,7 +336,7 @@ def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, model=None, imgsz=(640, 64
# Define model
if nc and nc != self.yaml['nc']:
- print('Overriding %s nc=%g with nc=%g' % (cfg, self.yaml['nc'], nc))
+ print(f"Overriding {cfg} nc={self.yaml['nc']} with nc={nc}")
self.yaml['nc'] = nc # override yaml value
self.model, self.savelist = parse_model(deepcopy(self.yaml), ch=[ch], model=model, imgsz=imgsz)
diff --git a/models/yolo.py b/models/yolo.py
index 497a0e9c24e6..0fa2db91e82b 100644
--- a/models/yolo.py
+++ b/models/yolo.py
@@ -247,7 +247,7 @@ def _apply(self, fn):
def parse_model(d, ch): # model_dict, input_channels(3)
- LOGGER.info('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments'))
+ LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}")
anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple']
na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors
no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
@@ -275,7 +275,7 @@ def parse_model(d, ch): # model_dict, input_channels(3)
elif m is nn.BatchNorm2d:
args = [ch[f]]
elif m is Concat:
- c2 = sum([ch[x] for x in f])
+ c2 = sum(ch[x] for x in f)
elif m is Detect:
args.append([ch[x] for x in f])
if isinstance(args[1], int): # number of anchors
@@ -287,11 +287,11 @@ def parse_model(d, ch): # model_dict, input_channels(3)
else:
c2 = ch[f]
- m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) # module
+ m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module
t = str(m)[8:-2].replace('__main__.', '') # module type
- np = sum([x.numel() for x in m_.parameters()]) # number params
+ np = sum(x.numel() for x in m_.parameters()) # number params
m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params
- LOGGER.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n_, np, t, args)) # print
+ LOGGER.info(f'{i:>3}{str(f):>18}{n_:>3}{np:10.0f} {t:<40}{str(args):<30}') # print
save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
layers.append(m_)
if i == 0:
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 000000000000..7d25200cdb33
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,45 @@
+# Project-wide configuration file, can be used for package metadata and other toll configurations
+# Example usage: global configuration for PEP8 (via flake8) setting or default pytest arguments
+
+[metadata]
+license_file = LICENSE
+description-file = README.md
+
+
+[tool:pytest]
+norecursedirs =
+ .git
+ dist
+ build
+addopts =
+ --doctest-modules
+ --durations=25
+ --color=yes
+
+
+[flake8]
+max-line-length = 120
+exclude = .tox,*.egg,build,temp
+select = E,W,F
+doctests = True
+verbose = 2
+# https://pep8.readthedocs.io/en/latest/intro.html#error-codes
+format = pylint
+# see: https://www.flake8rules.com/
+ignore =
+ E731 # Do not assign a lambda expression, use a def
+ F405
+ E402
+ F841
+ E741
+ F821
+ E722
+ F401
+ W504
+ E127
+ W504
+ E231
+ E501
+ F403
+ E302
+ F541
diff --git a/tutorial.ipynb b/tutorial.ipynb
index 47c44251b5ab..115d767a70bf 100644
--- a/tutorial.ipynb
+++ b/tutorial.ipynb
@@ -1014,4 +1014,4 @@
"outputs": []
}
]
-}
\ No newline at end of file
+}
diff --git a/utils/datasets.py b/utils/datasets.py
index 3997a5df6331..fce005bd597c 100755
--- a/utils/datasets.py
+++ b/utils/datasets.py
@@ -140,7 +140,7 @@ def __iter__(self):
yield next(self.iterator)
-class _RepeatSampler(object):
+class _RepeatSampler:
""" Sampler that repeats forever
Args:
@@ -287,7 +287,7 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True):
self.stride = stride
if os.path.isfile(sources):
- with open(sources, 'r') as f:
+ with open(sources) as f:
sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]
else:
sources = [sources]
@@ -398,14 +398,14 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r
f += glob.glob(str(p / '**' / '*.*'), recursive=True)
# f = list(p.rglob('*.*')) # pathlib
elif p.is_file(): # file
- with open(p, 'r') as t:
+ with open(p) as t:
t = t.read().strip().splitlines()
parent = str(p.parent) + os.sep
f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
# f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib)
else:
raise Exception(f'{prefix}{p} does not exist')
- self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS])
+ self.img_files = sorted(x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS)
# self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS]) # pathlib
assert self.img_files, f'{prefix}No images found'
except Exception as e:
@@ -681,7 +681,7 @@ def load_mosaic(self, index):
# YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic
labels4, segments4 = [], []
s = self.img_size
- yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
+ yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border) # mosaic center x, y
indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices
random.shuffle(indices)
for i, index in enumerate(indices):
@@ -767,7 +767,7 @@ def load_mosaic9(self, index):
c = s - w, s + h0 - hp - h, s, s + h0 - hp
padx, pady = c[:2]
- x1, y1, x2, y2 = [max(x, 0) for x in c] # allocate coords
+ x1, y1, x2, y2 = (max(x, 0) for x in c) # allocate coords
# Labels
labels, segments = self.labels[index].copy(), self.segments[index].copy()
@@ -782,7 +782,7 @@ def load_mosaic9(self, index):
hp, wp = h, w # height, width previous
# Offset
- yc, xc = [int(random.uniform(0, s)) for _ in self.mosaic_border] # mosaic center x, y
+ yc, xc = (int(random.uniform(0, s)) for _ in self.mosaic_border) # mosaic center x, y
img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s]
# Concat/clip labels
@@ -838,7 +838,7 @@ def extract_boxes(path='../datasets/coco128'): # from utils.datasets import *;
# labels
lb_file = Path(img2label_paths([str(im_file)])[0])
if Path(lb_file).exists():
- with open(lb_file, 'r') as f:
+ with open(lb_file) as f:
lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
for j, x in enumerate(lb):
@@ -866,7 +866,7 @@ def autosplit(path='../datasets/coco128/images', weights=(0.9, 0.1, 0.0), annota
annotated_only: Only use images with an annotated txt file
"""
path = Path(path) # images dir
- files = sorted([x for x in path.rglob('*.*') if x.suffix[1:].lower() in IMG_FORMATS]) # image files only
+ files = sorted(x for x in path.rglob('*.*') if x.suffix[1:].lower() in IMG_FORMATS) # image files only
n = len(files) # number of files
random.seed(0) # for reproducibility
indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split
@@ -902,7 +902,7 @@ def verify_image_label(args):
# verify labels
if os.path.isfile(lb_file):
nf = 1 # label found
- with open(lb_file, 'r') as f:
+ with open(lb_file) as f:
l = [x.split() for x in f.read().strip().splitlines() if len(x)]
if any([len(x) > 8 for x in l]): # is segment
classes = np.array([x[0] for x in l], dtype=np.float32)
@@ -944,7 +944,7 @@ def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False, profil
def round_labels(labels):
# Update labels to integer class and 6 decimal place floats
- return [[int(c), *[round(x, 4) for x in points]] for c, *points in labels]
+ return [[int(c), *(round(x, 4) for x in points)] for c, *points in labels]
def unzip(path):
# Unzip data.zip TODO: CONSTRAINT: path/to/abc.zip MUST unzip to 'path/to/abc/'
@@ -1019,7 +1019,7 @@ def hub_ops(f, max_dim=1920):
with open(file, 'w') as f:
json.dump(stats, f) # save stats *.json
t2 = time.time()
- with open(file, 'r') as f:
+ with open(file) as f:
x = json.load(f) # load hyps dict
print(f'stats.json times: {time.time() - t2:.3f}s read, {t2 - t1:.3f}s write')
diff --git a/utils/general.py b/utils/general.py
index 02bc741ca3ba..f22908907fd0 100755
--- a/utils/general.py
+++ b/utils/general.py
@@ -136,7 +136,7 @@ def is_writeable(dir, test=False):
pass
file.unlink() # remove file
return True
- except IOError:
+ except OSError:
return False
else: # method 2
return os.access(dir, os.R_OK) # possible issues on Windows
@@ -355,7 +355,7 @@ def check_dataset(data, autodownload=True):
assert 'nc' in data, "Dataset 'nc' key missing."
if 'names' not in data:
data['names'] = [f'class{i}' for i in range(data['nc'])] # assign class names if missing
- train, val, test, s = [data.get(x) for x in ('train', 'val', 'test', 'download')]
+ train, val, test, s = (data.get(x) for x in ('train', 'val', 'test', 'download'))
if val:
val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path
if not all(x.exists() for x in val):
diff --git a/utils/google_app_engine/app.yaml b/utils/google_app_engine/app.yaml
index ac29d104b144..5056b7c1186d 100644
--- a/utils/google_app_engine/app.yaml
+++ b/utils/google_app_engine/app.yaml
@@ -11,4 +11,4 @@ manual_scaling:
resources:
cpu: 1
memory_gb: 4
- disk_size_gb: 20
\ No newline at end of file
+ disk_size_gb: 20
diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py
index 0b457df63c93..ae2d98bdc36d 100644
--- a/utils/loggers/__init__.py
+++ b/utils/loggers/__init__.py
@@ -135,7 +135,7 @@ def on_train_end(self, last, best, plots, epoch, results):
# Callback runs on training end
if plots:
plot_results(file=self.save_dir / 'results.csv') # save results.png
- files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]]
+ files = ['results.png', 'confusion_matrix.png', *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))]
files = [(self.save_dir / f) for f in files if (self.save_dir / f).exists()] # filter
if self.tb:
diff --git a/utils/loggers/wandb/README.md b/utils/loggers/wandb/README.md
index dd7dc1e46d45..d787fb7a5a0e 100644
--- a/utils/loggers/wandb/README.md
+++ b/utils/loggers/wandb/README.md
@@ -61,10 +61,10 @@ You can leverage W&B artifacts and Tables integration to easily visualize and ma
Usage
Code $ python utils/logger/wandb/log_dataset.py --project ... --name ... --data ..
-
+
![Screenshot (64)](https://user-images.githubusercontent.com/15766192/128486078-d8433890-98a3-4d12-8986-b6c0e3fc64b9.png)
-
+
2: Train and Log Evaluation simultaneousy
This is an extension of the previous section, but it'll also training after uploading the dataset. This also evaluation Table
Evaluation table compares your predictions and ground truths across the validation set for each epoch. It uses the references to the already uploaded datasets,
@@ -72,31 +72,31 @@ You can leverage W&B artifacts and Tables integration to easily visualize and ma
Usage
Code $ python utils/logger/wandb/log_dataset.py --data .. --upload_data
-
+
![Screenshot (72)](https://user-images.githubusercontent.com/15766192/128979739-4cf63aeb-a76f-483f-8861-1c0100b938a5.png)
-
+
3: Train using dataset artifact
- When you upload a dataset as described in the first section, you get a new config file with an added `_wandb` to its name. This file contains the information that
+ When you upload a dataset as described in the first section, you get a new config file with an added `_wandb` to its name. This file contains the information that
can be used to train a model directly from the dataset artifact. This also logs evaluation
Usage
Code $ python utils/logger/wandb/log_dataset.py --data {data}_wandb.yaml
-
+
![Screenshot (72)](https://user-images.githubusercontent.com/15766192/128979739-4cf63aeb-a76f-483f-8861-1c0100b938a5.png)
-
+
4: Save model checkpoints as artifacts
- To enable saving and versioning checkpoints of your experiment, pass `--save_period n` with the base cammand, where `n` represents checkpoint interval.
+ To enable saving and versioning checkpoints of your experiment, pass `--save_period n` with the base cammand, where `n` represents checkpoint interval.
You can also log both the dataset and model checkpoints simultaneously. If not passed, only the final model will be logged
Usage
Code $ python train.py --save_period 1
-
+
![Screenshot (68)](https://user-images.githubusercontent.com/15766192/128726138-ec6c1f60-639d-437d-b4ee-3acd9de47ef3.png)
-
+
5: Resume runs from checkpoint artifacts.
@@ -105,28 +105,28 @@ Any run can be resumed using artifacts if the --resume
argument sta
Usage
Code $ python train.py --resume wandb-artifact://{run_path}
-
+
![Screenshot (70)](https://user-images.githubusercontent.com/15766192/128728988-4e84b355-6c87-41ae-a591-14aecf45343e.png)
-
+
6: Resume runs from dataset artifact & checkpoint artifacts.
Local dataset or model checkpoints are not required. This can be used to resume runs directly on a different device
- The syntax is same as the previous section, but you'll need to lof both the dataset and model checkpoints as artifacts, i.e, set bot --upload_dataset
or
+ The syntax is same as the previous section, but you'll need to lof both the dataset and model checkpoints as artifacts, i.e, set bot --upload_dataset
or
train from _wandb.yaml
file and set --save_period
Usage
Code $ python train.py --resume wandb-artifact://{run_path}
-
+
![Screenshot (70)](https://user-images.githubusercontent.com/15766192/128728988-4e84b355-6c87-41ae-a591-14aecf45343e.png)
-
+
Reports
W&B Reports can be created from your saved runs for sharing online. Once a report is created you will receive a link you can use to publically share your results. Here is an example report created from the COCO128 tutorial trainings of all four YOLOv5 models ([link](https://wandb.ai/glenn-jocher/yolov5_tutorial/reports/YOLOv5-COCO128-Tutorial-Results--VmlldzozMDI5OTY)).
-
+
diff --git a/utils/loggers/wandb/sweep.yaml b/utils/loggers/wandb/sweep.yaml
index c3727de82d4a..c7790d75f6b2 100644
--- a/utils/loggers/wandb/sweep.yaml
+++ b/utils/loggers/wandb/sweep.yaml
@@ -1,17 +1,17 @@
# Hyperparameters for training
-# To set range-
+# To set range-
# Provide min and max values as:
# parameter:
-#
+#
# min: scalar
# max: scalar
# OR
#
# Set a specific list of search space-
-# parameter:
+# parameter:
# values: [scalar1, scalar2, scalar3...]
-#
-# You can use grid, bayesian and hyperopt search strategy
+#
+# You can use grid, bayesian and hyperopt search strategy
# For more info on configuring sweeps visit - https://docs.wandb.ai/guides/sweeps/configuration
program: utils/loggers/wandb/sweep.py
diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py
index 7fb76b05e987..8546ec6c63cb 100644
--- a/utils/loggers/wandb/wandb_utils.py
+++ b/utils/loggers/wandb/wandb_utils.py
@@ -5,6 +5,7 @@
import sys
from contextlib import contextmanager
from pathlib import Path
+from typing import Dict
import pkg_resources as pkg
import yaml
@@ -25,7 +26,7 @@
assert hasattr(wandb, '__version__') # verify package import not local dir
except (ImportError, AssertionError):
wandb = None
-
+
RANK = int(os.getenv('RANK', -1))
WANDB_ARTIFACT_PREFIX = 'wandb-artifact://'
@@ -127,7 +128,7 @@ def __init__(self, opt, run_id=None, job_type='Training'):
arguments:
opt (namespace) -- Commandline arguments for this run
run_id (str) -- Run ID of W&B run to be resumed
- job_type (str) -- To set the job_type for this run
+ job_type (str) -- To set the job_type for this run
"""
# Pre-training routine --
@@ -142,7 +143,8 @@ def __init__(self, opt, run_id=None, job_type='Training'):
self.max_imgs_to_log = 16
self.wandb_artifact_data_dict = None
self.data_dict = None
- # It's more elegant to stick to 1 wandb.init call, but useful config data is overwritten in the WandbLogger's wandb.init call
+ # It's more elegant to stick to 1 wandb.init call,
+ # but useful config data is overwritten in the WandbLogger's wandb.init call
if isinstance(opt.resume, str): # checks resume from artifact
if opt.resume.startswith(WANDB_ARTIFACT_PREFIX):
entity, project, run_id, model_artifact_name = get_run_info(opt.resume)
@@ -212,7 +214,7 @@ def setup_training(self, opt):
Setup the necessary processes for training YOLO models:
- Attempt to download model checkpoint and dataset artifacts if opt.resume stats with WANDB_ARTIFACT_PREFIX
- Update data_dict, to contain info of previous run if resumed and the paths of dataset artifact if downloaded
- - Setup log_dict, initialize bbox_interval
+ - Setup log_dict, initialize bbox_interval
arguments:
opt (namespace) -- commandline arguments for this run
@@ -301,7 +303,7 @@ def log_model(self, path, opt, epoch, fitness_score, best_model=False):
path (Path) -- Path of directory containing the checkpoints
opt (namespace) -- Command line arguments for this run
epoch (int) -- Current epoch number
- fitness_score (float) -- fitness score for current epoch
+ fitness_score (float) -- fitness score for current epoch
best_model (boolean) -- Boolean representing if the current checkpoint is the best yet.
"""
model_artifact = wandb.Artifact('run_' + wandb.run.id + '_model', type='model', metadata={
@@ -325,7 +327,7 @@ def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=
data_file (str) -- the .yaml file with information about the dataset like - path, classes etc.
single_class (boolean) -- train multi-class data as single-class
project (str) -- project name. Used to construct the artifact path
- overwrite_config (boolean) -- overwrites the data.yaml file if set to true otherwise creates a new
+ overwrite_config (boolean) -- overwrites the data.yaml file if set to true otherwise creates a new
file with _wandb postfix. Eg -> data_wandb.yaml
returns:
@@ -371,14 +373,14 @@ def map_val_table_path(self):
for i, data in enumerate(tqdm(self.val_table.data)):
self.val_table_path_map[data[3]] = data[0]
- def create_dataset_table(self, dataset, class_to_id, name='dataset'):
+ def create_dataset_table(self, dataset: LoadImagesAndLabels, class_to_id: Dict[int,str], name: str = 'dataset'):
"""
Create and return W&B artifact containing W&B Table of the dataset.
arguments:
- dataset (LoadImagesAndLabels) -- instance of LoadImagesAndLabels class used to iterate over the data to build Table
- class_to_id (dict(int, str)) -- hash map that maps class ids to labels
- name (str) -- name of the artifact
+ dataset -- instance of LoadImagesAndLabels class used to iterate over the data to build Table
+ class_to_id -- hash map that maps class ids to labels
+ name -- name of the artifact
returns:
dataset artifact to be logged or used
@@ -419,7 +421,7 @@ def log_training_progress(self, predn, path, names):
arguments:
predn (list): list of predictions in the native space in the format - [xmin, ymin, xmax, ymax, confidence, class]
- path (str): local path of the current evaluation image
+ path (str): local path of the current evaluation image
names (dict(int, str)): hash map that maps class ids to labels
"""
class_set = wandb.Classes([{'id': id, 'name': name} for id, name in names.items()])
@@ -430,7 +432,7 @@ def log_training_progress(self, predn, path, names):
box_data.append(
{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]},
"class_id": int(cls),
- "box_caption": "%s %.3f" % (names[cls], conf),
+ "box_caption": f"{names[cls]} {conf:.3f}",
"scores": {"class_score": conf},
"domain": "pixel"})
total_conf += conf
@@ -450,7 +452,7 @@ def val_one_image(self, pred, predn, path, names, im):
arguments:
pred (list): list of scaled predictions in the format - [xmin, ymin, xmax, ymax, confidence, class]
predn (list): list of predictions in the native space - [xmin, ymin, xmax, ymax, confidence, class]
- path (str): local path of the current evaluation image
+ path (str): local path of the current evaluation image
"""
if self.val_table and self.result_table: # Log Table if Val dataset is uploaded as artifact
self.log_training_progress(predn, path, names)
@@ -459,7 +461,7 @@ def val_one_image(self, pred, predn, path, names, im):
if self.current_epoch % self.bbox_interval == 0:
box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]},
"class_id": int(cls),
- "box_caption": "%s %.3f" % (names[cls], conf),
+ "box_caption": f"{names[cls]} {conf:.3f}",
"scores": {"class_score": conf},
"domain": "pixel"} for *xyxy, conf, cls in pred.tolist()]
boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space
diff --git a/utils/loss.py b/utils/loss.py
index fac432d0edc3..e8ce42ad994a 100644
--- a/utils/loss.py
+++ b/utils/loss.py
@@ -18,7 +18,7 @@ def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#iss
class BCEBlurWithLogitsLoss(nn.Module):
# BCEwithLogitLoss() with reduced missing label effects.
def __init__(self, alpha=0.05):
- super(BCEBlurWithLogitsLoss, self).__init__()
+ super().__init__()
self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss()
self.alpha = alpha
@@ -35,7 +35,7 @@ def forward(self, pred, true):
class FocalLoss(nn.Module):
# Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
- super(FocalLoss, self).__init__()
+ super().__init__()
self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
self.gamma = gamma
self.alpha = alpha
@@ -65,7 +65,7 @@ def forward(self, pred, true):
class QFocalLoss(nn.Module):
# Wraps Quality focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
- super(QFocalLoss, self).__init__()
+ super().__init__()
self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
self.gamma = gamma
self.alpha = alpha
diff --git a/utils/plots.py b/utils/plots.py
index 00b8f88811e2..00cda6d8d986 100644
--- a/utils/plots.py
+++ b/utils/plots.py
@@ -250,7 +250,7 @@ def plot_targets_txt(): # from utils.plots import *; plot_targets_txt()
fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)
ax = ax.ravel()
for i in range(4):
- ax[i].hist(x[i], bins=100, label='%.3g +/- %.3g' % (x[i].mean(), x[i].std()))
+ ax[i].hist(x[i], bins=100, label=f'{x[i].mean():.3g} +/- {x[i].std():.3g}')
ax[i].legend()
ax[i].set_title(s[i])
plt.savefig('targets.jpg', dpi=200)
@@ -363,7 +363,7 @@ def profile_idetection(start=0, stop=0, labels=(), save_dir=''):
else:
a.remove()
except Exception as e:
- print('Warning: Plotting error for %s; %s' % (f, e))
+ print(f'Warning: Plotting error for {f}; {e}')
ax[1].legend()
plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200)
@@ -384,10 +384,10 @@ def plot_evolve(evolve_csv='path/to/evolve.csv'): # from utils.plots import *;
plt.subplot(6, 5, i + 1)
plt.scatter(v, f, c=hist2d(v, f, 20), cmap='viridis', alpha=.8, edgecolors='none')
plt.plot(mu, f.max(), 'k+', markersize=15)
- plt.title('%s = %.3g' % (k, mu), fontdict={'size': 9}) # limit to 40 characters
+ plt.title(f'{k} = {mu:.3g}', fontdict={'size': 9}) # limit to 40 characters
if i % 5 != 0:
plt.yticks([])
- print('%15s: %.3g' % (k, mu))
+ print(f'{k:>15}: {mu:.3g}')
f = evolve_csv.with_suffix('.png') # filename
plt.savefig(f, dpi=200)
plt.close()
diff --git a/utils/torch_utils.py b/utils/torch_utils.py
index 6f52f9a3728d..e6d8ebd743bf 100644
--- a/utils/torch_utils.py
+++ b/utils/torch_utils.py
@@ -123,7 +123,7 @@ def profile(input, ops, n=10, device=None):
y = m(x)
t[1] = time_sync()
try:
- _ = (sum([yi.sum() for yi in y]) if isinstance(y, list) else y).sum().backward()
+ _ = (sum(yi.sum() for yi in y) if isinstance(y, list) else y).sum().backward()
t[2] = time_sync()
except Exception as e: # no backward method
# print(e) # for debug
@@ -223,7 +223,7 @@ def model_info(model, verbose=False, img_size=640):
n_p = sum(x.numel() for x in model.parameters()) # number parameters
n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients
if verbose:
- print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma'))
+ print(f"{'layer':>5} {'name':>40} {'gradient':>9} {'parameters':>12} {'shape':>20} {'mu':>10} {'sigma':>10}")
for i, (name, p) in enumerate(model.named_parameters()):
name = name.replace('module_list.', '')
print('%5g %40s %9s %12g %20s %10.3g %10.3g' %
@@ -270,7 +270,7 @@ def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416)
s = (int(h * ratio), int(w * ratio)) # new size
img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize
if not same_shape: # pad/crop img
- h, w = [math.ceil(x * ratio / gs) * gs for x in (h, w)]
+ h, w = (math.ceil(x * ratio / gs) * gs for x in (h, w))
return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean